def __init__(self, bids_directory): print('- Validate: init started.') file_paths = [] result = [] validator = BIDSValidator() for path, dirs, files in os.walk(bids_directory): for filename in files: if filename == '.bidsignore': continue if filename.endswith('_annotations.tsv'): continue if filename.endswith('_annotations.json'): continue temp = os.path.join(path, filename) file_paths.append(temp[len(bids_directory):len(temp)]) result.append( validator.is_bids(temp[len(bids_directory):len(temp)])) # print(validator.is_bids(temp[len(bids_directory):len(temp)])) self.set_file_paths(file_paths) self.set_result(result)
class BIDSLayout(object): """ Layout class representing an entire BIDS dataset. Args: root (str): The root directory of the BIDS dataset. validate (bool): If True, all files are checked for BIDS compliance when first indexed, and non-compliant files are ignored. This provides a convenient way to restrict file indexing to only those files defined in the "core" BIDS spec, as setting validate=True will lead files in supplementary folders like derivatives/, code/, etc. to be ignored. index_associated (bool): Argument passed onto the BIDSValidator; ignored if validate = False. absolute_paths (bool): If True, queries always return absolute paths. If False, queries return relative paths (for files and directories). derivatives (bool, str, list): Specifies whether and/or which derivatives to to index. If True, all pipelines found in the derivatives/ subdirectory will be indexed. If a str or list, gives the paths to one or more derivatives directories to index. If False or None, the derivatives/ directory is ignored during indexing, and derivatives will have to be added manually via add_derivatives(). Note: derivatives datasets MUST contain a dataset_description.json file in order to be indexed. config (str, list): Optional name(s) of configuration file(s) to use. By default (None), uses 'bids'. sources (BIDLayout, list): Optional BIDSLayout(s) from which the current BIDSLayout is derived. ignore (str, SRE_Pattern, list): Path(s) to exclude from indexing. Each path is either a string or a SRE_Pattern object (i.e., compiled regular expression). If a string is passed, it must be either an absolute path, or be relative to the BIDS project root. If an SRE_Pattern is passed, the contained regular expression will be matched against the full (absolute) path of all files and directories. force_index (str, SRE_Pattern, list): Path(s) to forcibly index in the BIDSLayout, even if they would otherwise fail validation. See the documentation for the ignore argument for input format details. Note that paths in force_index takes precedence over those in ignore (i.e., if a file matches both ignore and force_index, it *will* be indexed). config_filename (str): Optional name of filename within directories that contains configuration information. regex_search (bool): Whether to require exact matching (True) or regex search (False, default) when comparing the query string to each entity in .get() calls. This sets a default for the instance, but can be overridden in individual .get() requests. """ _default_ignore = {"code", "stimuli", "sourcedata", "models", "derivatives", re.compile(r'^\.')} def __init__(self, root, validate=True, index_associated=True, absolute_paths=True, derivatives=False, config=None, sources=None, ignore=None, force_index=None, config_filename='layout_config.json', regex_search=False): self.root = root self._validator = BIDSValidator(index_associated=index_associated) self.validate = validate self.absolute_paths = absolute_paths self.derivatives = {} self.sources = sources self.regex_search = regex_search self.metadata_index = MetadataIndex(self) self.config_filename = config_filename self.files = {} self.nodes = [] self.entities = {} self.ignore = [os.path.abspath(os.path.join(self.root, patt)) if isinstance(patt, six.string_types) else patt for patt in listify(ignore or [])] self.force_index = [os.path.abspath(os.path.join(self.root, patt)) if isinstance(patt, six.string_types) else patt for patt in listify(force_index or [])] # Do basic BIDS validation on root directory self._validate_root() # Initialize the BIDS validator and examine ignore/force_index args self._setup_file_validator() # Set up configs if config is None: config = 'bids' config = [Config.load(c) for c in listify(config)] self.config = {c.name: c for c in config} self.root_node = BIDSRootNode(self.root, config, self) # Consolidate entities into master list. Note: no conflicts occur b/c # multiple entries with the same name all point to the same instance. for n in self.nodes: self.entities.update(n.available_entities) # Add derivatives if any are found if derivatives: if derivatives is True: derivatives = os.path.join(root, 'derivatives') self.add_derivatives( derivatives, validate=validate, index_associated=index_associated, absolute_paths=absolute_paths, derivatives=None, config=None, sources=self, ignore=ignore, force_index=force_index) def _validate_root(self): # Validate root argument and make sure it contains mandatory info try: self.root = str(self.root) except: raise TypeError("root argument must be a string (or a type that " "supports casting to string, such as pathlib.Path)" " specifying the directory containing the BIDS dataset.") self.root = os.path.abspath(self.root) if not os.path.exists(self.root): raise ValueError("BIDS root does not exist: %s" % self.root) target = os.path.join(self.root, 'dataset_description.json') if not os.path.exists(target): if self.validate: raise ValueError( "'dataset_description.json' is missing from project root." " Every valid BIDS dataset must have this file.") else: self.description = None else: with open(target, 'r', encoding='utf-8') as desc_fd: self.description = json.load(desc_fd) if self.validate: for k in ['Name', 'BIDSVersion']: if k not in self.description: raise ValueError("Mandatory '%s' field missing from " "dataset_description.json." % k) def _setup_file_validator(self): # Derivatives get special handling; they shouldn't be indexed normally if self.force_index is not None: for entry in self.force_index: if (isinstance(entry, six.string_types) and os.path.normpath(entry).startswith('derivatives')): msg = ("Do not pass 'derivatives' in the force_index " "list. To index derivatives, either set " "derivatives=True, or use add_derivatives().") raise ValueError(msg) def _validate_dir(self, d): return not check_path_matches_patterns(d, self.ignore) def _validate_file(self, f): # Validate a file. if check_path_matches_patterns(f, self.force_index): return True if check_path_matches_patterns(f, self.ignore): return False if not self.validate: return True # Derivatives are currently not validated. # TODO: raise warning the first time in a session this is encountered if 'derivatives' in self.config: return True # BIDS validator expects absolute paths, but really these are relative # to the BIDS project root. to_check = os.path.relpath(f, self.root) to_check = os.path.join(os.path.sep, to_check) return self._validator.is_bids(to_check) def _get_layouts_in_scope(self, scope): # Determine which BIDSLayouts to search layouts = [] scope = listify(scope) if 'all' in scope or 'raw' in scope: layouts.append(self) for deriv in self.derivatives.values(): if ('all' in scope or 'derivatives' in scope or deriv.description["PipelineDescription"]['Name'] in scope): layouts.append(deriv) return layouts def __getattr__(self, key): ''' Dynamically inspect missing methods for get_<entity>() calls and return a partial function of get() if a match is found. ''' if key.startswith('get_'): ent_name = key.replace('get_', '') # Use inflect to check both singular and plural forms if ent_name not in self.entities: sing = inflect.engine().singular_noun(ent_name) if sing in self.entities: ent_name = sing else: raise AttributeError( "'get_{}' can't be called because '{}' isn't a " "recognized entity name.".format(ent_name, ent_name)) return partial(self.get, return_type='id', target=ent_name) # Spit out default message if we get this far raise AttributeError("%s object has no attribute named %r" % (self.__class__.__name__, key)) def __repr__(self): # A tidy summary of key properties n_sessions = len([session for isub in self.get_subjects() for session in self.get_sessions(subject=isub)]) n_runs = len([run for isub in self.get_subjects() for run in self.get_runs(subject=isub)]) n_subjects = len(self.get_subjects()) root = self.root[-30:] s = ("BIDS Layout: ...{} | Subjects: {} | Sessions: {} | " "Runs: {}".format(root, n_subjects, n_sessions, n_runs)) return s def clone(self): """ Return a deep copy of the current BIDSLayout. """ return copy.deepcopy(self) def parse_file_entities(self, filename, scope='all', entities=None, config=None, include_unmatched=False): ''' Parse the passed filename for entity/value pairs. Args: filename (str): The filename to parse for entity values scope (str, list): The scope of the search space. Indicates which BIDSLayouts' entities to extract. See BIDSLayout docstring for valid values. By default, extracts all entities entities (list): An optional list of Entity instances to use in extraction. If passed, the scope and config arguments are ignored, and only the Entities in this list are used. config (str, Config, list): One or more Config objects, or paths to JSON config files on disk, containing the Entity definitions to use in extraction. If passed, scope is ignored. include_unmatched (bool): If True, unmatched entities are included in the returned dict, with values set to None. If False (default), unmatched entities are ignored. Returns: A dict, where keys are Entity names and values are the values extracted from the filename. ''' # If either entities or config is specified, just pass through if entities is None and config is None: layouts = self._get_layouts_in_scope(scope) config = chain(*[list(l.config.values()) for l in layouts]) config = list(set(config)) return parse_file_entities(filename, entities, config, include_unmatched) def add_derivatives(self, path, **kwargs): ''' Add BIDS-Derivatives datasets to tracking. Args: path (str, list): One or more paths to BIDS-Derivatives datasets. Each path can point to either a derivatives/ directory containing one more more pipeline directories, or to a single pipeline directory (e.g., derivatives/fmriprep). kwargs (dict): Optional keyword arguments to pass on to BIDSLayout() when initializing each of the derivative datasets. Note: Every derivatives directory intended for indexing MUST contain a valid dataset_description.json file. See the BIDS-Derivatives specification for details. ''' paths = listify(path) deriv_dirs = [] # Collect all paths that contain a dataset_description.json def check_for_description(dir): dd = os.path.join(dir, 'dataset_description.json') return os.path.exists(dd) for p in paths: p = os.path.abspath(p) if os.path.exists(p): if check_for_description(p): deriv_dirs.append(p) else: subdirs = [d for d in os.listdir(p) if os.path.isdir(os.path.join(p, d))] for sd in subdirs: sd = os.path.join(p, sd) if check_for_description(sd): deriv_dirs.append(sd) if not deriv_dirs: warnings.warn("Derivative indexing was enabled, but no valid " "derivatives datasets were found in any of the " "provided or default locations. Please make sure " "all derivatives datasets you intend to index " "contain a 'dataset_description.json' file, as " "described in the BIDS-derivatives specification.") for deriv in deriv_dirs: dd = os.path.join(deriv, 'dataset_description.json') with open(dd, 'r', encoding='utf-8') as ddfd: description = json.load(ddfd) pipeline_name = description.get( 'PipelineDescription', {}).get('Name') if pipeline_name is None: raise ValueError("Every valid BIDS-derivatives dataset must " "have a PipelineDescription.Name field set " "inside dataset_description.json.") if pipeline_name in self.derivatives: raise ValueError("Pipeline name '%s' has already been added " "to this BIDSLayout. Every added pipeline " "must have a unique name!") # Default config and sources values kwargs['config'] = kwargs.get('config') or ['bids', 'derivatives'] kwargs['sources'] = kwargs.get('sources') or self self.derivatives[pipeline_name] = BIDSLayout(deriv, **kwargs) # Consolidate all entities post-indexing. Note: no conflicts occur b/c # multiple entries with the same name all point to the same instance. for deriv in self.derivatives.values(): self.entities.update(deriv.entities) def to_df(self, **kwargs): """ Return information for all BIDSFiles tracked in the Layout as a pandas DataFrame. Args: kwargs: Optional keyword arguments passed on to get(). This allows one to easily select only a subset of files for export. Returns: A pandas DataFrame, where each row is a file, and each column is a tracked entity. NaNs are injected whenever a file has no value for a given attribute. """ try: import pandas as pd except ImportError: raise ImportError("What are you doing trying to export a BIDSLayout" " as a pandas DataFrame when you don't have " "pandas installed? Eh? Eh?") files = self.get(return_type='obj', **kwargs) data = pd.DataFrame.from_records([f.entities for f in files]) data.insert(0, 'path', [f.path for f in files]) return data def get(self, return_type='object', target=None, extensions=None, scope='all', regex_search=False, defined_fields=None, absolute_paths=None, **kwargs): """ Retrieve files and/or metadata from the current Layout. Args: return_type (str): Type of result to return. Valid values: 'object' (default): return a list of matching BIDSFile objects. 'file': return a list of matching filenames. 'dir': return a list of directories. 'id': return a list of unique IDs. Must be used together with a valid target. target (str): Optional name of the target entity to get results for (only used if return_type is 'dir' or 'id'). extensions (str, list): One or more file extensions to filter on. BIDSFiles with any other extensions will be excluded. scope (str, list): Scope of the search space. If passed, only nodes/directories that match the specified scope will be searched. Possible values include: 'all' (default): search all available directories. 'derivatives': search all derivatives directories 'raw': search only BIDS-Raw directories <PipelineName>: the name of a BIDS-Derivatives pipeline regex_search (bool or None): Whether to require exact matching (False) or regex search (True) when comparing the query string to each entity. defined_fields (list): Optional list of names of metadata fields that must be defined in JSON sidecars in order to consider the file a match, but which don't need to match any particular value. absolute_paths (bool): Optionally override the instance-wide option to report either absolute or relative (to the top of the dataset) paths. If None, will fall back on the value specified at BIDSLayout initialization. kwargs (dict): Any optional key/values to filter the entities on. Keys are entity names, values are regexes to filter on. For example, passing filter={'subject': 'sub-[12]'} would return only files that match the first two subjects. Returns: A list of BIDSFiles (default) or strings (see return_type). Notes: As of pybids 0.7.0 some keywords have been changed. Namely: 'type' becomes 'suffix', 'modality' becomes 'datatype', 'acq' becomes 'acquisition' and 'mod' becomes 'modality'. Using the wrong version could result in get() silently returning wrong or no results. See the changelog for more details. """ # Warn users still expecting 0.6 behavior if 'type' in kwargs: raise ValueError("As of pybids 0.7.0, the 'type' argument has been" " replaced with 'suffix'.") layouts = self._get_layouts_in_scope(scope) # Create concatenated file, node, and entity lists files, entities, nodes = {}, {}, [] for l in layouts: files.update(l.files) entities.update(l.entities) nodes.extend(l.nodes) # Separate entity kwargs from metadata kwargs ent_kwargs, md_kwargs = {}, {} for k, v in kwargs.items(): if k in entities: ent_kwargs[k] = v else: md_kwargs[k] = v # Provide some suggestions if target is specified and invalid. if target is not None and target not in entities: import difflib potential = list(entities.keys()) suggestions = difflib.get_close_matches(target, potential) if suggestions: message = "Did you mean one of: {}?".format(suggestions) else: message = "Valid targets are: {}".format(potential) raise ValueError(("Unknown target '{}'. " + message) .format(target)) results = [] # Search on entities filters = ent_kwargs.copy() for f in files.values(): if f._matches(filters, extensions, regex_search): results.append(f) # Search on metadata if return_type not in {'dir', 'id'}: if md_kwargs: results = [f.path for f in results] results = self.metadata_index.search(results, defined_fields, **md_kwargs) results = [files[f] for f in results] # Convert to relative paths if needed if absolute_paths is None: # can be overloaded as option to .get absolute_paths = self.absolute_paths if not absolute_paths: for i, f in enumerate(results): f = copy.copy(f) f.path = os.path.relpath(f.path, self.root) results[i] = f if return_type == 'file': results = natural_sort([f.path for f in results]) elif return_type in ['id', 'dir']: if target is None: raise ValueError('If return_type is "id" or "dir", a valid ' 'target entity must also be specified.') results = [x for x in results if target in x.entities] if return_type == 'id': results = list(set([x.entities[target] for x in results])) results = natural_sort(results) elif return_type == 'dir': template = entities[target].directory if template is None: raise ValueError('Return type set to directory, but no ' 'directory template is defined for the ' 'target entity (\"%s\").' % target) # Construct regex search pattern from target directory template template = self.root + template to_rep = re.findall(r'\{(.*?)\}', template) for ent in to_rep: patt = entities[ent].pattern template = template.replace('{%s}' % ent, patt) template += r'[^\%s]*$' % os.path.sep matches = [ f.dirname if absolute_paths else os.path.relpath(f.dirname, self.root) for f in results if re.search(template, f.dirname) ] results = natural_sort(list(set(matches))) else: raise ValueError("Invalid return_type specified (must be one " "of 'tuple', 'file', 'id', or 'dir'.") else: results = natural_sort(results, 'path') return results def get_file(self, filename, scope='all'): ''' Returns the BIDSFile object with the specified path. Args: filename (str): The path of the file to retrieve. Must be either an absolute path, or relative to the root of this BIDSLayout. scope (str, list): Scope of the search space. If passed, only BIDSLayouts that match the specified scope will be searched. See BIDSLayout docstring for valid values. Returns: A BIDSFile, or None if no match was found. ''' filename = os.path.abspath(os.path.join(self.root, filename)) layouts = self._get_layouts_in_scope(scope) for ly in layouts: if filename in ly.files: return ly.files[filename] return None def get_collections(self, level, types=None, variables=None, merge=False, sampling_rate=None, skip_empty=False, **kwargs): """Return one or more variable Collections in the BIDS project. Args: level (str): The level of analysis to return variables for. Must be one of 'run', 'session', 'subject', or 'dataset'. types (str, list): Types of variables to retrieve. All valid values reflect the filename stipulated in the BIDS spec for each kind of variable. Valid values include: 'events', 'physio', 'stim', 'scans', 'participants', 'sessions', and 'regressors'. variables (list): Optional list of variables names to return. If None, all available variables are returned. merge (bool): If True, variables are merged across all observations of the current level. E.g., if level='subject', variables from all subjects will be merged into a single collection. If False, each observation is handled separately, and the result is returned as a list. sampling_rate (int, str): If level='run', the sampling rate to pass onto the returned BIDSRunVariableCollection. skip_empty (bool): Whether or not to skip empty Variables (i.e., where there are no rows/records in a file after applying any filtering operations like dropping NaNs). kwargs: Optional additional arguments to pass onto load_variables. """ from bids.variables import load_variables index = load_variables(self, types=types, levels=level, skip_empty=skip_empty, **kwargs) return index.get_collections(level, variables, merge, sampling_rate=sampling_rate) def get_metadata(self, path, include_entities=False, **kwargs): """Return metadata found in JSON sidecars for the specified file. Args: path (str): Path to the file to get metadata for. include_entities (bool): If True, all available entities extracted from the filename (rather than JSON sidecars) are included in the returned metadata dictionary. kwargs (dict): Optional keyword arguments to pass onto get_nearest(). Returns: A dictionary of key/value pairs extracted from all of the target file's associated JSON sidecars. Notes: A dictionary containing metadata extracted from all matching .json files is returned. In cases where the same key is found in multiple files, the values in files closer to the input filename will take precedence, per the inheritance rules in the BIDS specification. """ f = self.get_file(path) # For querying efficiency, store metadata in the MetadataIndex cache self.metadata_index.index_file(f.path) if include_entities: entities = f.entities results = entities else: results = {} results.update(self.metadata_index.file_index[path]) return results def get_nearest(self, path, return_type='file', strict=True, all_=False, ignore_strict_entities=None, full_search=False, **kwargs): ''' Walk up the file tree from the specified path and return the nearest matching file(s). Args: path (str): The file to search from. return_type (str): What to return; must be one of 'file' (default) or 'tuple'. strict (bool): When True, all entities present in both the input path and the target file(s) must match perfectly. When False, files will be ordered by the number of matching entities, and partial matches will be allowed. all_ (bool): When True, returns all matching files. When False (default), only returns the first match. ignore_strict_entities (list): Optional list of entities to exclude from strict matching when strict is True. This allows one to search, e.g., for files of a different type while matching all other entities perfectly by passing ignore_strict_entities=['type']. full_search (bool): If True, searches all indexed files, even if they don't share a common root with the provided path. If False, only files that share a common root will be scanned. kwargs: Optional keywords to pass on to .get(). ''' path = os.path.abspath(path) # Make sure we have a valid suffix suffix = kwargs.get('suffix') if not suffix: f = self.get_file(path) if 'suffix' not in f.entities: raise ValueError( "File '%s' does not have a valid suffix, most " "likely because it is not a valid BIDS file." % path ) suffix = f.entities['suffix'] kwargs['suffix'] = suffix # Collect matches for all entities entities = {} for ent in self.entities.values(): m = ent.regex.search(path) if m: entities[ent.name] = ent._astype(m.group(1)) # Remove any entities we want to ignore when strict matching is on if strict and ignore_strict_entities is not None: for k in ignore_strict_entities: entities.pop(k, None) results = self.get(return_type='object', **kwargs) # Make a dictionary of directories --> contained files folders = defaultdict(list) for f in results: folders[f.dirname].append(f) # Build list of candidate directories to check search_paths = [] while True: if path in folders and folders[path]: search_paths.append(path) parent = os.path.dirname(path) if parent == path: break path = parent if full_search: unchecked = set(folders.keys()) - set(search_paths) search_paths.extend(path for path in unchecked if folders[path]) def count_matches(f): # Count the number of entities shared with the passed file f_ents = f.entities keys = set(entities.keys()) & set(f_ents.keys()) shared = len(keys) return [shared, sum([entities[k] == f_ents[k] for k in keys])] matches = [] for path in search_paths: # Sort by number of matching entities. Also store number of # common entities, for filtering when strict=True. num_ents = [[f] + count_matches(f) for f in folders[path]] # Filter out imperfect matches (i.e., where number of common # entities does not equal number of matching entities). if strict: num_ents = [f for f in num_ents if f[1] == f[2]] num_ents.sort(key=lambda x: x[2], reverse=True) if num_ents: for f_match in num_ents: matches.append(f_match[0]) if not all_: break matches = [m.path if return_type == 'file' else m for m in matches] return matches if all_ else matches[0] if matches else None def get_bvec(self, path, **kwargs): """ Get bvec file for passed path. """ result = self.get_nearest(path, extensions='bvec', suffix='dwi', all_=True, **kwargs) return listify(result)[0] def get_bval(self, path, **kwargs): """ Get bval file for passed path. """ result = self.get_nearest(path, extensions='bval', suffix='dwi', all_=True, **kwargs) return listify(result)[0] def get_fieldmap(self, path, return_list=False): """ Get fieldmap(s) for specified path. """ fieldmaps = self._get_fieldmaps(path) if return_list: return fieldmaps else: if len(fieldmaps) == 1: return fieldmaps[0] elif len(fieldmaps) > 1: raise ValueError("More than one fieldmap found, but the " "'return_list' argument was set to False. " "Either ensure that there is only one " "fieldmap for this image, or set the " "'return_list' argument to True and handle " "the result as a list.") else: # len(fieldmaps) == 0 return None def _get_fieldmaps(self, path): sub = self.parse_file_entities(path)['subject'] fieldmap_set = [] suffix = '(phase1|phasediff|epi|fieldmap)' files = self.get(subject=sub, suffix=suffix, regex_search=True, extensions=['nii.gz', 'nii']) for file in files: metadata = self.get_metadata(file.path) if metadata and "IntendedFor" in metadata.keys(): intended_for = listify(metadata["IntendedFor"]) if any([path.endswith(_suff) for _suff in intended_for]): cur_fieldmap = {} if file.entities['suffix'] == "phasediff": cur_fieldmap = {"phasediff": file.path, "magnitude1": file.path.replace( "phasediff", "magnitude1"), "suffix": "phasediff"} magnitude2 = file.path.replace( "phasediff", "magnitude2") if os.path.isfile(magnitude2): cur_fieldmap['magnitude2'] = magnitude2 elif file.entities['suffix'] == "phase1": cur_fieldmap["phase1"] = file.path cur_fieldmap["magnitude1"] = \ file.path.replace("phase1", "magnitude1") cur_fieldmap["phase2"] = \ file.path.replace("phase1", "phase2") cur_fieldmap["magnitude2"] = \ file.path.replace("phase1", "magnitude2") cur_fieldmap["suffix"] = "phase" elif file.entities['suffix'] == "epi": cur_fieldmap["epi"] = file.path cur_fieldmap["suffix"] = "epi" elif file.entities['suffix'] == "fieldmap": cur_fieldmap["fieldmap"] = file.path cur_fieldmap["magnitude"] = \ file.path.replace("fieldmap", "magnitude") cur_fieldmap["suffix"] = "fieldmap" fieldmap_set.append(cur_fieldmap) return fieldmap_set def get_tr(self, derivatives=False, **selectors): """ Returns the scanning repetition time (TR) for one or more runs. Args: derivatives (bool): If True, also checks derivatives images. selectors: Optional keywords used to constrain the selected runs. Can be any arguments valid for a .get call (e.g., BIDS entities or JSON sidecar keys). Returns: A single float. Notes: Raises an exception if more than one unique TR is found. """ # Constrain search to functional images selectors.update(suffix='bold', datatype='func') scope = None if derivatives else 'raw' images = self.get(extensions=['.nii', '.nii.gz'], scope=scope, **selectors) if not images: raise ValueError("No functional images that match criteria found.") all_trs = set() for img in images: md = self.get_metadata(img.path, suffix='bold', full_search=True) all_trs.add(round(float(md['RepetitionTime']), 5)) if len(all_trs) > 1: raise ValueError("Unique TR cannot be found given selectors {!r}" .format(selectors)) return all_trs.pop() def build_path(self, source, path_patterns=None, strict=False, scope='all'): ''' Constructs a target filename for a file or dictionary of entities. Args: source (str, BIDSFile, dict): The source data to use to construct the new file path. Must be one of: - A BIDSFile object - A string giving the path of a BIDSFile contained within the current Layout. - A dict of entities, with entity names in keys and values in values path_patterns (list): Optional path patterns to use to construct the new file path. If None, the Layout-defined patterns will be used. Entities should be represented by the name surrounded by curly braces. Optional portions of the patterns should be denoted by square brackets. Entities that require a specific value for the pattern to match can pass them inside carets. Default values can be assigned by specifying a string after the pipe operator. E.g., (e.g., {type<image>|bold} would only match the pattern if the entity 'type' was passed and its value is "image", otherwise the default value "bold" will be used). Example: 'sub-{subject}/[var-{name}/]{id}.csv' Result: 'sub-01/var-SES/1045.csv' strict (bool): If True, all entities must be matched inside a pattern in order to be a valid match. If False, extra entities will be ignored so long as all mandatory entities are found. scope (str, list): The scope of the search space. Indicates which BIDSLayouts' path patterns to use. See BIDSLayout docstring for valid values. By default, uses all available layouts. If two or more values are provided, the order determines the precedence of path patterns (i.e., earlier layouts will have higher precedence). ''' # 'is_file' is a crude check for Path objects if isinstance(source, six.string_types) or hasattr(source, 'is_file'): source = str(source) if source not in self.files: source = os.path.join(self.root, source) source = self.get_file(source) if isinstance(source, BIDSFile): source = source.entities if path_patterns is None: layouts = self._get_layouts_in_scope(scope) path_patterns = [] seen_configs = set() for l in layouts: for c in l.config.values(): if c in seen_configs: continue path_patterns.extend(c.default_path_patterns) seen_configs.add(c) return build_path(source, path_patterns, strict) def copy_files(self, files=None, path_patterns=None, symbolic_links=True, root=None, conflicts='fail', **kwargs): """ Copies one or more BIDSFiles to new locations defined by each BIDSFile's entities and the specified path_patterns. Args: files (list): Optional list of BIDSFile objects to write out. If none provided, use files from running a get() query using remaining **kwargs. path_patterns (str, list): Write patterns to pass to each file's write_file method. symbolic_links (bool): Whether to copy each file as a symbolic link or a deep copy. root (str): Optional root directory that all patterns are relative to. Defaults to current working directory. conflicts (str): Defines the desired action when the output path already exists. Must be one of: 'fail': raises an exception 'skip' does nothing 'overwrite': overwrites the existing file 'append': adds a suffix to each file copy, starting with 1 kwargs (kwargs): Optional key word arguments to pass into a get() query. """ _files = self.get(return_type='objects', **kwargs) if files: _files = list(set(files).intersection(_files)) for f in _files: f.copy(path_patterns, symbolic_link=symbolic_links, root=self.root, conflicts=conflicts) def write_contents_to_file(self, entities, path_patterns=None, contents=None, link_to=None, content_mode='text', conflicts='fail', strict=False): """ Write arbitrary data to a file defined by the passed entities and path patterns. Args: entities (dict): A dictionary of entities, with Entity names in keys and values for the desired file in values. path_patterns (list): Optional path patterns to use when building the filename. If None, the Layout-defined patterns will be used. contents (object): Contents to write to the generate file path. Can be any object serializable as text or binary data (as defined in the content_mode argument). link_to (str): Optional path with which to create a symbolic link to. Used as an alternative to and takes priority over the contents argument. conflicts (str): Defines the desired action when the output path already exists. Must be one of: 'fail': raises an exception 'skip' does nothing 'overwrite': overwrites the existing file 'append': adds a suffix to each file copy, starting with 1 strict (bool): If True, all entities must be matched inside a pattern in order to be a valid match. If False, extra entities """ path = self.build_path(entities, path_patterns, strict) if path is None: raise ValueError("Cannot construct any valid filename for " "the passed entities given available path " "patterns.") write_contents_to_file(path, contents=contents, link_to=link_to, content_mode=content_mode, conflicts=conflicts, root=self.root)
class BIDSLayoutIndexer(object): """ Indexer class for BIDSLayout. Args: layout (BIDSLayout): The BIDSLayout to index. """ def __init__(self, layout): self.layout = layout self.session = layout.session self.validate = layout.validate self.root = layout.root self.config_filename = layout.config_filename self.validator = BIDSValidator(index_associated=True) # Create copies of list attributes we'll modify during indexing self.config = list(layout.config.values()) self.include_patterns = list(layout.force_index) self.exclude_patterns = list(layout.ignore) def _validate_dir(self, d, default=None): if _check_path_matches_patterns(d, self.include_patterns): return True if _check_path_matches_patterns(d, self.exclude_patterns): return False return default def _validate_file(self, f, default=None): # Inclusion takes priority over exclusion if _check_path_matches_patterns(f, self.include_patterns): return True if _check_path_matches_patterns(f, self.exclude_patterns): return False # If inclusion/exclusion is inherited from a parent directory, that # takes precedence over the remaining file-level rules if default is not None: return default # Derivatives are currently not validated. # TODO: raise warning the first time in a session this is encountered if not self.validate or 'derivatives' in self.layout.config: return True # BIDS validator expects absolute paths, but really these are relative # to the BIDS project root. to_check = os.path.relpath(f, self.root) to_check = os.path.join(os.path.sep, to_check) return self.validator.is_bids(to_check) def _index_dir(self, path, config, default_action=None): abs_path = os.path.join(self.root, path) # Derivative directories must always be added separately # and passed as their own root, so terminate if passed. if abs_path.startswith(os.path.join(self.root, 'derivatives')): return config = list(config) # Shallow copy # Check for additional config file in directory layout_file = self.config_filename config_file = os.path.join(abs_path, layout_file) if os.path.exists(config_file): cfg = Config.load(config_file, session=self.session) config.append(cfg) # Track which entities are valid in filenames for this directory config_entities = {} for c in config: config_entities.update(c.entities) for (dirpath, dirnames, filenames) in os.walk(path): # Set the default inclusion/exclusion directive default = self._validate_dir(dirpath, default=default_action) # If layout configuration file exists, delete it if self.config_filename in filenames: filenames.remove(self.config_filename) for f in filenames: bf = self._index_file(f, dirpath, config_entities, default_action=default) if bf is None: continue self.session.commit() # Recursively index subdirectories for d in dirnames: d = os.path.join(dirpath, d) self._index_dir(d, list(config), default_action=default) # prevent subdirectory traversal break def _index_file(self, f, dirpath, entities, default_action=None): """Create DB record for file and its tags. """ abs_fn = os.path.join(dirpath, f) # Skip files that fail validation, unless forcibly indexing if not self._validate_file(abs_fn, default=default_action): return None bf = make_bidsfile(abs_fn) self.session.add(bf) # Extract entity values match_vals = {} for e in entities.values(): m = e.match_file(bf) if m is None and e.mandatory: break if m is not None: match_vals[e.name] = (e, m) # Create Entity <=> BIDSFile mappings if match_vals: for _, (ent, val) in match_vals.items(): tag = Tag(bf, ent, str(val), ent._dtype) self.session.add(tag) return bf def index_files(self): """Index all files in the BIDS dataset. """ self._index_dir(self.root, self.config) def index_metadata(self): """Index metadata for all files in the BIDS dataset. """ # Process JSON files first if we're indexing metadata all_files = self.layout.get(absolute_paths=True) # Track ALL entities we've seen in file names or metadatas all_entities = {} for c in self.config: all_entities.update(c.entities) # If key/value pairs in JSON files duplicate ones extracted from files, # we can end up with Tag collisions in the DB. To prevent this, we # store all filename/entity pairs and the value, and then check against # that before adding each new Tag. all_tags = {} for t in self.session.query(Tag).all(): key = '{}_{}'.format(t.file_path, t.entity_name) all_tags[key] = str(t.value) # We build up a store of all file data as we iterate files. It looks # like: { extension/suffix: dirname: [(entities, payload)]}}. # The payload is left empty for non-JSON files. file_data = {} for bf in all_files: file_ents = bf.entities.copy() suffix = file_ents.pop('suffix', None) ext = file_ents.pop('extension', None) if suffix is not None and ext is not None: key = "{}/{}".format(ext, suffix) if key not in file_data: file_data[key] = defaultdict(list) if ext == 'json': with open(bf.path, 'r') as handle: try: payload = json.load(handle) except json.JSONDecodeError as e: msg = ("Error occurred while trying to decode JSON" " from file '{}'.".format(bf.path)) six.raise_from(IOError(msg), e) else: payload = None to_store = (file_ents, payload, bf.path) file_data[key][bf.dirname].append(to_store) # To avoid integrity errors, track primary keys we've seen seen_assocs = set() def create_association_pair(src, dst, kind, kind2=None): kind2 = kind2 or kind pk1 = '#'.join([src, dst, kind]) if pk1 not in seen_assocs: self.session.add(FileAssociation(src=src, dst=dst, kind=kind)) seen_assocs.add(pk1) pk2 = '#'.join([dst, src, kind2]) if pk2 not in seen_assocs: self.session.add(FileAssociation(src=dst, dst=src, kind=kind2)) seen_assocs.add(pk2) # TODO: Efficiency of everything in this loop could be improved filenames = [bf for bf in all_files if not bf.path.endswith('.json')] for bf in filenames: file_ents = bf.entities.copy() suffix = file_ents.pop('suffix', None) ext = file_ents.pop('extension', None) file_ent_keys = set(file_ents.keys()) if suffix is None or ext is None: continue # Extract metadata associated with the file. The idea is # that we loop over parent directories, and if we find # payloads in the file_data store (indexing by directory # and current file suffix), we check to see if the # candidate JS file's entities are entirely consumed by # the current file. If so, it's a valid candidate, and we # add the payload to the stack. Finally, we invert the # stack and merge the payloads in order. ext_key = "{}/{}".format(ext, suffix) json_key = "json/{}".format(suffix) dirname = bf.dirname payloads = [] ancestors = [] while True: # Get JSON payloads json_data = file_data.get(json_key, {}).get(dirname, []) for js_ents, js_md, js_path in json_data: js_keys = set(js_ents.keys()) if js_keys - file_ent_keys: continue matches = [ js_ents[name] == file_ents[name] for name in js_keys ] if all(matches): payloads.append((js_md, js_path)) # Get all files this file inherits from candidates = file_data.get(ext_key, {}).get(dirname, []) for ents, _, path in candidates: keys = set(ents.keys()) if keys - file_ent_keys: continue matches = [ents[name] == file_ents[name] for name in keys] if all(matches): ancestors.append(path) parent = os.path.dirname(dirname) if parent == dirname: break dirname = parent if not payloads: continue # Create DB records for metadata associations js_file = payloads[-1][1] create_association_pair(js_file, bf.path, 'Metadata') # Consolidate metadata by looping over inherited JSON files file_md = {} for pl, js_file in payloads[::-1]: file_md.update(pl) # Create FileAssociation records for JSON inheritance n_pl = len(payloads) for i, (pl, js_file) in enumerate(payloads): if (i + 1) < n_pl: other = payloads[i + 1][1] create_association_pair(js_file, other, 'Child', 'Parent') # Inheritance for current file n_pl = len(ancestors) for i, src in enumerate(ancestors): if (i + 1) < n_pl: dst = ancestors[i + 1] create_association_pair(src, dst, 'Child', 'Parent') # Files with IntendedFor field always get mapped to targets intended = listify(file_md.get('IntendedFor', [])) for target in intended: # Per spec, IntendedFor paths are relative to sub dir. target = os.path.join(self.root, 'sub-{}'.format(bf.entities['subject']), target) create_association_pair(bf.path, target, 'IntendedFor', 'InformedBy') # Link files to BOLD runs if suffix in ['physio', 'stim', 'events', 'sbref']: images = self.layout.get(extension=['nii', 'nii.gz'], suffix='bold', return_type='filename', **file_ents) for img in images: create_association_pair(bf.path, img, 'IntendedFor', 'InformedBy') # Link files to DWI runs if suffix == 'sbref' or ext in ['bvec', 'bval']: images = self.layout.get(extension=['nii', 'nii.gz'], suffix='dwi', return_type='filename', **file_ents) for img in images: create_association_pair(bf.path, img, 'IntendedFor', 'InformedBy') # Create Tag <-> Entity mappings, and any newly discovered Entities for md_key, md_val in file_md.items(): tag_string = '{}_{}'.format(bf.path, md_key) # Skip pairs that were already found in the filenames if tag_string in all_tags: file_val = all_tags[tag_string] if str(md_val) != file_val: msg = ( "Conflicting values found for entity '{}' in " "filename {} (value='{}') versus its JSON sidecar " "(value='{}'). Please reconcile this discrepancy.") raise ValueError( msg.format(md_key, bf.path, file_val, md_val)) continue if md_key not in all_entities: all_entities[md_key] = Entity(md_key, is_metadata=True) self.session.add(all_entities[md_key]) tag = Tag(bf, all_entities[md_key], md_val) self.session.add(tag) if len(self.session.new) >= 1000: self.session.commit() self.session.commit()
class BIDSLayout(object): """ Layout class representing an entire BIDS dataset. Args: root (str): The root directory of the BIDS dataset. validate (bool): If True, all files are checked for BIDS compliance when first indexed, and non-compliant files are ignored. This provides a convenient way to restrict file indexing to only those files defined in the "core" BIDS spec, as setting validate=True will lead files in supplementary folders like derivatives/, code/, etc. to be ignored. index_associated (bool): Argument passed onto the BIDSValidator; ignored if validate = False. absolute_paths (bool): If True, queries always return absolute paths. If False, queries return relative paths, unless the root argument was left empty (in which case the root defaults to the file system root). derivatives (bool, str, list): Specifies whether and/or which derivatives to to index. If True, all pipelines found in the derivatives/ subdirectory will be indexed. If a str or list, gives the paths to one or more derivatives directories to index. If False or None, the derivatives/ directory is ignored during indexing, and derivatives will have to be added manually via add_derivatives(). config (str, list): Optional name(s) of configuration file(s) to use. By default (None), uses 'bids'. sources (BIDLayout, list): Optional BIDSLayout(s) from which the current BIDSLayout is derived. ignore (str, SRE_Pattern, list): Path(s) to exclude from indexing. Each path is either a string or a SRE_Pattern object (i.e., compiled regular expression). If a string is passed, it must be either an absolute path, or be relative to the BIDS project root. If an SRE_Pattern is passed, the contained regular expression will be matched against the full (absolute) path of all files and directories. force_index (str, SRE_Pattern, list): Path(s) to forcibly index in the BIDSLayout, even if they would otherwise fail validation. See the documentation for the ignore argument for input format details. Note that paths in force_index takes precedence over those in ignore (i.e., if a file matches both ignore and force_index, it *will* be indexed). config_filename (str): Optional name of filename within directories that contains configuration information. regex_search (bool): Whether to require exact matching (True) or regex search (False, default) when comparing the query string to each entity in .get() calls. This sets a default for the instance, but can be overridden in individual .get() requests. """ _default_ignore = { "code", "stimuli", "sourcedata", "models", "derivatives", re.compile(r'^\.') } def __init__(self, root, validate=True, index_associated=True, absolute_paths=True, derivatives=False, config=None, sources=None, ignore=None, force_index=None, config_filename='layout_config.json', regex_search=False): self.root = root self._validator = BIDSValidator(index_associated=index_associated) self.validate = validate self.absolute_paths = absolute_paths self.derivatives = {} self.sources = sources self.regex_search = regex_search self.metadata_index = MetadataIndex(self) self.config_filename = config_filename self.files = {} self.nodes = [] self.entities = {} self.ignore = [ os.path.abspath(os.path.join(self.root, patt)) if isinstance( patt, six.string_types) else patt for patt in listify(ignore or []) ] self.force_index = [ os.path.abspath(os.path.join(self.root, patt)) if isinstance( patt, six.string_types) else patt for patt in listify(force_index or []) ] # Do basic BIDS validation on root directory self._validate_root() # Initialize the BIDS validator and examine ignore/force_index args self._setup_file_validator() # Set up configs if config is None: config = 'bids' config = [Config.load(c) for c in listify(config)] self.config = {c.name: c for c in config} self.root_node = BIDSRootNode(self.root, config, self) # Consolidate entities into master list. Note: no conflicts occur b/c # multiple entries with the same name all point to the same instance. for n in self.nodes: self.entities.update(n.available_entities) # Add derivatives if any are found if derivatives: if derivatives is True: derivatives = os.path.join(root, 'derivatives') self.add_derivatives(derivatives, validate=validate, index_associated=index_associated, absolute_paths=absolute_paths, derivatives=None, config=None, sources=self, ignore=ignore, force_index=force_index) def _validate_root(self): # Validate root argument and make sure it contains mandatory info try: self.root = str(self.root) except: raise TypeError( "root argument must be a string (or a type that " "supports casting to string, such as pathlib.Path)" " specifying the directory containing the BIDS dataset.") self.root = os.path.abspath(self.root) if not os.path.exists(self.root): raise ValueError("BIDS root does not exist: %s" % self.root) target = os.path.join(self.root, 'dataset_description.json') if not os.path.exists(target): if self.validate: raise ValueError( "'dataset_description.json' is missing from project root." " Every valid BIDS dataset must have this file.") else: self.description = None else: with open(target, 'r', encoding='utf-8') as desc_fd: self.description = json.load(desc_fd) if self.validate: for k in ['Name', 'BIDSVersion']: if k not in self.description: raise ValueError("Mandatory '%s' field missing from " "dataset_description.json." % k) def _setup_file_validator(self): # Derivatives get special handling; they shouldn't be indexed normally if self.force_index is not None: for entry in self.force_index: if (isinstance(entry, six.string_types) and os.path.normpath(entry).startswith('derivatives')): msg = ("Do not pass 'derivatives' in the force_index " "list. To index derivatives, either set " "derivatives=True, or use add_derivatives().") raise ValueError(msg) def _validate_dir(self, d): return not check_path_matches_patterns(d, self.ignore) def _validate_file(self, f): # Validate a file. if check_path_matches_patterns(f, self.force_index): return True if check_path_matches_patterns(f, self.ignore): return False if not self.validate: return True # Derivatives are currently not validated. # TODO: raise warning the first time in a session this is encountered if 'derivatives' in self.config: return True # BIDS validator expects absolute paths, but really these are relative # to the BIDS project root. to_check = os.path.relpath(f, self.root) to_check = os.path.join(os.path.sep, to_check) return self._validator.is_bids(to_check) def _get_layouts_in_scope(self, scope): # Determine which BIDSLayouts to search layouts = [] scope = listify(scope) if 'all' in scope or 'raw' in scope: layouts.append(self) for deriv in self.derivatives.values(): if ('all' in scope or 'derivatives' in scope or deriv.description["PipelineDescription"]['Name'] in scope): layouts.append(deriv) return layouts def __getattr__(self, key): ''' Dynamically inspect missing methods for get_<entity>() calls and return a partial function of get() if a match is found. ''' if key.startswith('get_'): ent_name = key.replace('get_', '') # Use inflect to check both singular and plural forms if ent_name not in self.entities: sing = inflect.engine().singular_noun(ent_name) if sing in self.entities: ent_name = sing else: raise AttributeError( "'get_{}' can't be called because '{}' isn't a " "recognized entity name.".format(ent_name, ent_name)) return partial(self.get, return_type='id', target=ent_name) # Spit out default message if we get this far raise AttributeError("%s object has no attribute named %r" % (self.__class__.__name__, key)) def __repr__(self): # A tidy summary of key properties n_sessions = len([ session for isub in self.get_subjects() for session in self.get_sessions(subject=isub) ]) n_runs = len([ run for isub in self.get_subjects() for run in self.get_runs(subject=isub) ]) n_subjects = len(self.get_subjects()) root = self.root[-30:] s = ("BIDS Layout: ...{} | Subjects: {} | Sessions: {} | " "Runs: {}".format(root, n_subjects, n_sessions, n_runs)) return s def clone(self): """ Return a deep copy of the current BIDSLayout. """ return copy.deepcopy(self) def parse_file_entities(self, filename, scope='all', entities=None, config=None, include_unmatched=False): ''' Parse the passed filename for entity/value pairs. Args: filename (str): The filename to parse for entity values scope (str, list): The scope of the search space. Indicates which BIDSLayouts' entities to extract. See BIDSLayout docstring for valid values. By default, extracts all entities entities (list): An optional list of Entity instances to use in extraction. If passed, the scope and config arguments are ignored, and only the Entities in this list are used. config (str, Config, list): One or more Config objects, or paths to JSON config files on disk, containing the Entity definitions to use in extraction. If passed, scope is ignored. include_unmatched (bool): If True, unmatched entities are included in the returned dict, with values set to None. If False (default), unmatched entities are ignored. Returns: A dict, where keys are Entity names and values are the values extracted from the filename. ''' # If either entities or config is specified, just pass through if entities is None and config is None: layouts = self._get_layouts_in_scope(scope) config = chain(*[list(l.config.values()) for l in layouts]) config = list(set(config)) return parse_file_entities(filename, entities, config, include_unmatched) def add_derivatives(self, path, **kwargs): ''' Add BIDS-Derivatives datasets to tracking. Args: path (str, list): One or more paths to BIDS-Derivatives datasets. Each path can point to either a derivatives/ directory containing one more more pipeline directories, or to a single pipeline directory (e.g., derivatives/fmriprep). kwargs (dict): Optional keyword arguments to pass on to BIDSLayout() when initializing each of the derivative datasets. ''' paths = listify(path) deriv_dirs = [] # Collect all paths that contain a dataset_description.json def check_for_description(dir): dd = os.path.join(dir, 'dataset_description.json') return os.path.exists(dd) for p in paths: p = os.path.abspath(p) if os.path.exists(p): if check_for_description(p): deriv_dirs.append(p) else: subdirs = [ d for d in os.listdir(p) if os.path.isdir(os.path.join(p, d)) ] for sd in subdirs: sd = os.path.join(p, sd) if check_for_description(sd): deriv_dirs.append(sd) for deriv in deriv_dirs: dd = os.path.join(deriv, 'dataset_description.json') with open(dd, 'r', encoding='utf-8') as ddfd: description = json.load(ddfd) pipeline_name = description.get('PipelineDescription', {}).get('Name') if pipeline_name is None: raise ValueError("Every valid BIDS-derivatives dataset must " "have a PipelineDescription.Name field set " "inside dataset_description.json.") if pipeline_name in self.derivatives: raise ValueError("Pipeline name '%s' has already been added " "to this BIDSLayout. Every added pipeline " "must have a unique name!") # Default config and sources values kwargs['config'] = kwargs.get('config') or ['bids', 'derivatives'] kwargs['sources'] = kwargs.get('sources') or self self.derivatives[pipeline_name] = BIDSLayout(deriv, **kwargs) # Consolidate all entities post-indexing. Note: no conflicts occur b/c # multiple entries with the same name all point to the same instance. for deriv in self.derivatives.values(): self.entities.update(deriv.entities) def to_df(self, **kwargs): """ Return information for all BIDSFiles tracked in the Layout as a pandas DataFrame. Args: kwargs: Optional keyword arguments passed on to get(). This allows one to easily select only a subset of files for export. Returns: A pandas DataFrame, where each row is a file, and each column is a tracked entity. NaNs are injected whenever a file has no value for a given attribute. """ try: import pandas as pd except ImportError: raise ImportError( "What are you doing trying to export a BIDSLayout" " as a pandas DataFrame when you don't have " "pandas installed? Eh? Eh?") files = self.get(return_type='obj', **kwargs) data = pd.DataFrame.from_records([f.entities for f in files]) data.insert(0, 'path', [f.path for f in files]) return data def get(self, return_type='object', target=None, extensions=None, scope='all', regex_search=False, defined_fields=None, **kwargs): """ Retrieve files and/or metadata from the current Layout. Args: return_type (str): Type of result to return. Valid values: 'object' (default): return a list of matching BIDSFile objects. 'file': return a list of matching filenames. 'dir': return a list of directories. 'id': return a list of unique IDs. Must be used together with a valid target. target (str): Optional name of the target entity to get results for (only used if return_type is 'dir' or 'id'). extensions (str, list): One or more file extensions to filter on. BIDSFiles with any other extensions will be excluded. scope (str, list): Scope of the search space. If passed, only nodes/directories that match the specified scope will be searched. Possible values include: 'all' (default): search all available directories. 'derivatives': search all derivatives directories 'raw': search only BIDS-Raw directories <PipelineName>: the name of a BIDS-Derivatives pipeline regex_search (bool or None): Whether to require exact matching (False) or regex search (True) when comparing the query string to each entity. defined_fields (list): Optional list of names of metadata fields that must be defined in JSON sidecars in order to consider the file a match, but which don't need to match any particular value. kwargs (dict): Any optional key/values to filter the entities on. Keys are entity names, values are regexes to filter on. For example, passing filter={ 'subject': 'sub-[12]'} would return only files that match the first two subjects. Returns: A list of BIDSFiles (default) or strings (see return_type). """ # Warn users still expecting 0.6 behavior if 'type' in kwargs: raise ValueError("As of pybids 0.7.0, the 'type' argument has been" " replaced with 'suffix'.") layouts = self._get_layouts_in_scope(scope) # Create concatenated file, node, and entity lists files, entities, nodes = {}, {}, [] for l in layouts: files.update(l.files) entities.update(l.entities) nodes.extend(l.nodes) # Separate entity kwargs from metadata kwargs ent_kwargs, md_kwargs = {}, {} for k, v in kwargs.items(): if k in entities: ent_kwargs[k] = v else: md_kwargs[k] = v # Provide some suggestions if target is specified and invalid. if target is not None and target not in entities: import difflib potential = list(entities.keys()) suggestions = difflib.get_close_matches(target, potential) if suggestions: message = "Did you mean one of: {}?".format(suggestions) else: message = "Valid targets are: {}".format(potential) raise ValueError( ("Unknown target '{}'. " + message).format(target)) results = [] # Search on entities filters = ent_kwargs.copy() for f in files.values(): if f._matches(filters, extensions, regex_search): results.append(f) # Search on metadata if return_type not in {'dir', 'id'}: if md_kwargs: results = [f.path for f in results] results = self.metadata_index.search(results, defined_fields, **md_kwargs) results = [files[f] for f in results] # Convert to relative paths if needed if not self.absolute_paths: for i, f in enumerate(results): f = copy.copy(f) f.path = os.path.relpath(f.path, self.root) results[i] = f if return_type == 'file': results = natural_sort([f.path for f in results]) elif return_type in ['id', 'dir']: if target is None: raise ValueError('If return_type is "id" or "dir", a valid ' 'target entity must also be specified.') results = [x for x in results if target in x.entities] if return_type == 'id': results = list(set([x.entities[target] for x in results])) results = natural_sort(results) elif return_type == 'dir': template = entities[target].directory if template is None: raise ValueError('Return type set to directory, but no ' 'directory template is defined for the ' 'target entity (\"%s\").' % target) # Construct regex search pattern from target directory template template = template.replace('{{root}}', self.root) to_rep = re.findall(r'\{(.*?)\}', template) for ent in to_rep: patt = entities[ent].pattern template = template.replace('{%s}' % ent, patt) template += r'[^\%s]*$' % os.path.sep matches = [ f.dirname for f in results if re.search(template, f.dirname) ] results = natural_sort(list(set(matches))) else: raise ValueError("Invalid return_type specified (must be one " "of 'tuple', 'file', 'id', or 'dir'.") return results def get_file(self, filename, scope='all'): ''' Returns the BIDSFile object with the specified path. Args: filename (str): The path of the file to retrieve. Must be either an absolute path, or relative to the root of this BIDSLayout. scope (str, list): Scope of the search space. If passed, only BIDSLayouts that match the specified scope will be searched. See BIDSLayout docstring for valid values. Returns: A BIDSFile, or None if no match was found. ''' filename = os.path.abspath(os.path.join(self.root, filename)) layouts = self._get_layouts_in_scope(scope) for ly in layouts: if filename in ly.files: return ly.files[filename] return None def get_collections(self, level, types=None, variables=None, merge=False, sampling_rate=None, skip_empty=False, **kwargs): """Return one or more variable Collections in the BIDS project. Args: level (str): The level of analysis to return variables for. Must be one of 'run', 'session', 'subject', or 'dataset'. types (str, list): Types of variables to retrieve. All valid values reflect the filename stipulated in the BIDS spec for each kind of variable. Valid values include: 'events', 'physio', 'stim', 'scans', 'participants', 'sessions', and 'regressors'. variables (list): Optional list of variables names to return. If None, all available variables are returned. merge (bool): If True, variables are merged across all observations of the current level. E.g., if level='subject', variables from all subjects will be merged into a single collection. If False, each observation is handled separately, and the result is returned as a list. sampling_rate (int, str): If level='run', the sampling rate to pass onto the returned BIDSRunVariableCollection. skip_empty (bool): Whether or not to skip empty Variables (i.e., where there are no rows/records in a file after applying any filtering operations like dropping NaNs). kwargs: Optional additional arguments to pass onto load_variables. """ from bids.variables import load_variables index = load_variables(self, types=types, levels=level, skip_empty=skip_empty, **kwargs) return index.get_collections(level, variables, merge, sampling_rate=sampling_rate) def get_metadata(self, path, include_entities=False, **kwargs): """Return metadata found in JSON sidecars for the specified file. Args: path (str): Path to the file to get metadata for. include_entities (bool): If True, all available entities extracted from the filename (rather than JSON sidecars) are included in the returned metadata dictionary. kwargs (dict): Optional keyword arguments to pass onto get_nearest(). Returns: A dictionary of key/value pairs extracted from all of the target file's associated JSON sidecars. Notes: A dictionary containing metadata extracted from all matching .json files is returned. In cases where the same key is found in multiple files, the values in files closer to the input filename will take precedence, per the inheritance rules in the BIDS specification. """ f = self.get_file(path) # For querying efficiency, store metadata in the MetadataIndex cache self.metadata_index.index_file(f.path) if include_entities: entities = f.entities results = entities else: results = {} results.update(self.metadata_index.file_index[path]) return results def get_nearest(self, path, return_type='file', strict=True, all_=False, ignore_strict_entities=None, full_search=False, **kwargs): ''' Walk up the file tree from the specified path and return the nearest matching file(s). Args: path (str): The file to search from. return_type (str): What to return; must be one of 'file' (default) or 'tuple'. strict (bool): When True, all entities present in both the input path and the target file(s) must match perfectly. When False, files will be ordered by the number of matching entities, and partial matches will be allowed. all_ (bool): When True, returns all matching files. When False (default), only returns the first match. ignore_strict_entities (list): Optional list of entities to exclude from strict matching when strict is True. This allows one to search, e.g., for files of a different type while matching all other entities perfectly by passing ignore_strict_entities=['type']. full_search (bool): If True, searches all indexed files, even if they don't share a common root with the provided path. If False, only files that share a common root will be scanned. kwargs: Optional keywords to pass on to .get(). ''' path = os.path.abspath(path) # Make sure we have a valid suffix suffix = kwargs.get('suffix') if not suffix: f = self.get_file(path) if 'suffix' not in f.entities: raise ValueError( "File '%s' does not have a valid suffix, most " "likely because it is not a valid BIDS file." % path) suffix = f.entities['suffix'] kwargs['suffix'] = suffix # Collect matches for all entities entities = {} for ent in self.entities.values(): m = ent.regex.search(path) if m: entities[ent.name] = ent._astype(m.group(1)) # Remove any entities we want to ignore when strict matching is on if strict and ignore_strict_entities is not None: for k in ignore_strict_entities: entities.pop(k, None) results = self.get(return_type='object', **kwargs) # Make a dictionary of directories --> contained files folders = defaultdict(list) for f in results: folders[f.dirname].append(f) # Build list of candidate directories to check search_paths = [] while True: if path in folders and folders[path]: search_paths.append(path) parent = os.path.dirname(path) if parent == path: break path = parent if full_search: unchecked = set(folders.keys()) - set(search_paths) search_paths.extend(path for path in unchecked if folders[path]) def count_matches(f): # Count the number of entities shared with the passed file f_ents = f.entities keys = set(entities.keys()) & set(f_ents.keys()) shared = len(keys) return [shared, sum([entities[k] == f_ents[k] for k in keys])] matches = [] for path in search_paths: # Sort by number of matching entities. Also store number of # common entities, for filtering when strict=True. num_ents = [[f] + count_matches(f) for f in folders[path]] # Filter out imperfect matches (i.e., where number of common # entities does not equal number of matching entities). if strict: num_ents = [f for f in num_ents if f[1] == f[2]] num_ents.sort(key=lambda x: x[2], reverse=True) if num_ents: for f_match in num_ents: matches.append(f_match[0]) if not all_: break matches = [m.path if return_type == 'file' else m for m in matches] return matches if all_ else matches[0] if matches else None def get_bvec(self, path, **kwargs): """ Get bvec file for passed path. """ result = self.get_nearest(path, extensions='bvec', suffix='dwi', all_=True, **kwargs) return listify(result)[0] def get_bval(self, path, **kwargs): """ Get bval file for passed path. """ result = self.get_nearest(path, extensions='bval', suffix='dwi', all_=True, **kwargs) return listify(result)[0] def get_fieldmap(self, path, return_list=False): """ Get fieldmap(s) for specified path. """ fieldmaps = self._get_fieldmaps(path) if return_list: return fieldmaps else: if len(fieldmaps) == 1: return fieldmaps[0] elif len(fieldmaps) > 1: raise ValueError("More than one fieldmap found, but the " "'return_list' argument was set to False. " "Either ensure that there is only one " "fieldmap for this image, or set the " "'return_list' argument to True and handle " "the result as a list.") else: # len(fieldmaps) == 0 return None def _get_fieldmaps(self, path): sub = self.parse_file_entities(path)['subject'] fieldmap_set = [] suffix = '(phase1|phasediff|epi|fieldmap)' files = self.get(subject=sub, suffix=suffix, regex_search=True, extensions=['nii.gz', 'nii']) for file in files: metadata = self.get_metadata(file.path) if metadata and "IntendedFor" in metadata.keys(): intended_for = listify(metadata["IntendedFor"]) if any([path.endswith(_suff) for _suff in intended_for]): cur_fieldmap = {} if file.suffix == "phasediff": cur_fieldmap = { "phasediff": file.path, "magnitude1": file.path.replace("phasediff", "magnitude1"), "suffix": "phasediff" } magnitude2 = file.path.replace("phasediff", "magnitude2") if os.path.isfile(magnitude2): cur_fieldmap['magnitude2'] = magnitude2 elif file.suffix == "phase1": cur_fieldmap["phase1"] = file.path cur_fieldmap["magnitude1"] = \ file.path.replace("phase1", "magnitude1") cur_fieldmap["phase2"] = \ file.path.replace("phase1", "phase2") cur_fieldmap["magnitude2"] = \ file.path.replace("phase1", "magnitude2") cur_fieldmap["suffix"] = "phase" elif file.suffix == "epi": cur_fieldmap["epi"] = file.path cur_fieldmap["suffix"] = "epi" elif file.suffix == "fieldmap": cur_fieldmap["fieldmap"] = file.path cur_fieldmap["magnitude"] = \ file.path.replace("fieldmap", "magnitude") cur_fieldmap["suffix"] = "fieldmap" fieldmap_set.append(cur_fieldmap) return fieldmap_set def get_tr(self, derivatives=False, **selectors): """ Returns the scanning repetition time (TR) for one or more runs. Args: derivatives (bool): If True, also checks derivatives images. selectors: Optional keywords used to constrain the selected runs. Can be any arguments valid for a .get call (e.g., BIDS entities or JSON sidecar keys). Returns: A single float. Notes: Raises an exception if more than one unique TR is found. """ # Constrain search to functional images selectors.update(suffix='bold', datatype='func') scope = None if derivatives else 'raw' images = self.get(extensions=['.nii', '.nii.gz'], scope=scope, **selectors) if not images: raise ValueError("No functional images that match criteria found.") all_trs = set() for img in images: md = self.get_metadata(img.path, suffix='bold', full_search=True) all_trs.add(round(float(md['RepetitionTime']), 5)) if len(all_trs) > 1: raise ValueError( "Unique TR cannot be found given selectors {!r}".format( selectors)) return all_trs.pop() def build_path(self, source, path_patterns=None, strict=False, scope='all'): ''' Constructs a target filename for a file or dictionary of entities. Args: source (str, BIDSFile, dict): The source data to use to construct the new file path. Must be one of: - A BIDSFile object - A string giving the path of a BIDSFile contained within the current Layout. - A dict of entities, with entity names in keys and values in values path_patterns (list): Optional path patterns to use to construct the new file path. If None, the Layout-defined patterns will be used. strict (bool): If True, all entities must be matched inside a pattern in order to be a valid match. If False, extra entities will be ignored so long as all mandatory entities are found. scope (str, list): The scope of the search space. Indicates which BIDSLayouts' path patterns to use. See BIDSLayout docstring for valid values. By default, uses all available layouts. If two or more values are provided, the order determines the precedence of path patterns (i.e., earlier layouts will have higher precedence). ''' # 'is_file' is a crude check for Path objects if isinstance(source, six.string_types) or hasattr(source, 'is_file'): source = str(source) if source not in self.files: source = os.path.join(self.root, source) source = self.get_file(source) if isinstance(source, BIDSFile): source = source.entities if path_patterns is None: layouts = self._get_layouts_in_scope(scope) path_patterns = [] seen_configs = set() for l in layouts: for c in l.config.values(): if c in seen_configs: continue path_patterns.extend(c.default_path_patterns) seen_configs.add(c) return build_path(source, path_patterns, strict) def copy_files(self, files=None, path_patterns=None, symbolic_links=True, root=None, conflicts='fail', **kwargs): """ Copies one or more BIDSFiles to new locations defined by each BIDSFile's entities and the specified path_patterns. Args: files (list): Optional list of BIDSFile objects to write out. If none provided, use files from running a get() query using remaining **kwargs. path_patterns (str, list): Write patterns to pass to each file's write_file method. symbolic_links (bool): Whether to copy each file as a symbolic link or a deep copy. root (str): Optional root directory that all patterns are relative to. Defaults to current working directory. conflicts (str): Defines the desired action when the output path already exists. Must be one of: 'fail': raises an exception 'skip' does nothing 'overwrite': overwrites the existing file 'append': adds a suffix to each file copy, starting with 1 kwargs (kwargs): Optional key word arguments to pass into a get() query. """ _files = self.get(return_type='objects', **kwargs) if files: _files = list(set(files).intersection(_files)) for f in _files: f.copy(path_patterns, symbolic_link=symbolic_links, root=self.root, conflicts=conflicts) def write_contents_to_file(self, entities, path_patterns=None, contents=None, link_to=None, content_mode='text', conflicts='fail', strict=False): """ Write arbitrary data to a file defined by the passed entities and path patterns. Args: entities (dict): A dictionary of entities, with Entity names in keys and values for the desired file in values. path_patterns (list): Optional path patterns to use when building the filename. If None, the Layout-defined patterns will be used. contents (object): Contents to write to the generate file path. Can be any object serializable as text or binary data (as defined in the content_mode argument). link_to (str): Optional path with which to create a symbolic link to. Used as an alternative to and takes priority over the contents argument. conflicts (str): Defines the desired action when the output path already exists. Must be one of: 'fail': raises an exception 'skip' does nothing 'overwrite': overwrites the existing file 'append': adds a suffix to each file copy, starting with 1 strict (bool): If True, all entities must be matched inside a pattern in order to be a valid match. If False, extra entities """ path = self.build_path(entities, path_patterns, strict) if path is None: raise ValueError("Cannot construct any valid filename for " "the passed entities given available path " "patterns.") write_contents_to_file(path, contents=contents, link_to=link_to, content_mode=content_mode, conflicts=conflicts, root=self.root)
def bids_acquisition_download(data_root_path='', dataset_name=None, force_download=False, behav_path='exp_info/recorded_events', copy_events='n', deface=False, dry_run=False): """Automatically download files from neurospin server to a BIDS dataset. Download-database is based on NeuroSpin server conventions. Options are 'prisma', 'trio' and custom path. Prisma db_path = '/neurospin/acquisition/database/Prisma_fit' Trio db_path = '/neurospin/acquisition/database/TrioTim' The bids dataset is created if necessary before download with some empy mandatory files to be filled like README in case they dont exist. The download depends on the file '[sub-*_][ses-*_]download.csv' contained in the folder 'exp_info'. NIP and acq date of the subjects will be taken automatically from exp_info/participants.tsv file that follows bids standard. The file will be copied in the dataset folder without the NIP column for privacy. Posible exceptions 1) exp_info directory not found 2) participants.tsv not found 3) download files not found 4) Acquisition directory in neurospin server not found 5) There is more than one acquisition directory (Have to ask manip for extra digits for NIP, the NIP then would look like xxxxxxxx-ssss) 6) Event file corresponding to downloaded bold.nii not found """ ### CHECK PATHS AND FILES # exp_info path where is the participants.tsv # print(data_root_path) exp_info_path = os.path.join(data_root_path, 'exp_info') if not os.path.exists(exp_info_path): raise Exception('exp_info directory not found') if not os.path.isfile(os.path.join(exp_info_path, 'participants.tsv')): raise Exception('exp_info/participants.tsv not found') # Determine target path with the name of dataset dataset_name, target_root_path = get_bids_default_path( data_root_path, dataset_name) # Create dataset directories and files if necessary bids_init_dataset(data_root_path, dataset_name) # Manage the report and download information download_report = ('download_report_' + time.strftime("%d-%b-%Y-%H:%M:%S", time.gmtime()) + '.csv') report_path = os.path.join(data_root_path, 'report') if not os.path.exists(report_path): os.makedirs(report_path) download_report = open(os.path.join(report_path, download_report), 'w') #report_line = '%s,%s,%s\n' % ('subject_id', 'session_id', 'download_file') #download_report.write(report_line) list_imported = [] list_already_imported = [] list_warning = [] # Create a dataFrame to store participant information #df_participant = pd.DataFrame() #Dict for info participant #list_all_participants = {} dic_info_participants = OrderedDict() # List for the bacth file for dc2nii_batch command infiles_dcm2nii = [] # List for data to deface files_for_pydeface = [] #Dict of descriptors to be added dict_descriptors = {} ### GETTING FOR INFORMATION TO DOWNLOAD # Download command for each subject/session # one line has the following information # participant_id / NIP / infos_participant / session_label / acq_date / location / to_import # Read the participants.tsv file for getting subjects/sessions to download pop = pd.read_csv(os.path.join(exp_info_path, 'participants.tsv'), dtype=str, sep='\t', index_col=False) #print(df_participant) for row_idx, subject_info in pop.iterrows(): # Fill the partcipant information for the participants.tsv if subject_info[0] in dic_info_participants: existing_items = dic_info_participants[subject_info[0]] dico_add = {} info_participant = json.loads(subject_info['infos_participant']) for k, v in info_participant.items(): if not k in existing_items: dico_add[k] = v #fusion dicos existing_items.update(dico_add) dic_info_participants[subject_info[0]] = existing_items else: dic_info_participants[subject_info[0]] = json.loads( subject_info['infos_participant']) # Determine path to files in NeuroSpin server download_database = subject_info['location'] if download_database in NEUROSPIN_DATABASES: db_path = NEUROSPIN_DATABASES[download_database] else: db_path = download_database # the row_idx for giving either participant_label or participant_id subject_id = subject_info[0] # sub_path = target_root_path + subject_id + ses_path # Mange the optional filters # optional_filters = [('sub', subject_id)] # if session_id is not None: # optional_filters += [('ses', session_id)] if 'session_label' in subject_info.index: if subject_info['session_label'] is not pd.np.nan: session_id = subject_info['session_label'] else: session_id = None if session_id is None: ses_path = '' else: ses_path = 'ses-' + session_id try: int(subject_id) subject_id = 'sub-{0}'.format(subject_id) except: if ('sub-') in subject_id: subject_id = subject_id else: subject_id = subject_id print('**** BIDS IMPORTATION WARMING: SUBJECT ID PROBABLY ' 'NOT CONFORM') sub_path = os.path.join(target_root_path, subject_id, ses_path) if not os.path.exists(sub_path): os.makedirs(sub_path) # Avoid redownloading subjects/sessions if not force_download: check_file = os.path.join(sub_path, 'downloaded') if os.path.isfile(check_file): continue # DATE has to be transformed from BIDS to NeuroSpin server standard # NeuroSpin standard is yyyymmdd -> Bids standard is YYYY-MM-DD acq_date = subject_info['acq_date'].replace('-', '').replace('\n', '') #acq_label acq_label = subject_info['acq_label'] #dir_label #dir_label = subject_info['dir_label'] # nip number nip = subject_info['NIP'] # Get appropriate download file. As specific as possible # specs_path = file_manager_default_file(exp_info_path, # optional_filters, 'download', # file_type='tsv', # allow_other_fields=False) #report_line = '%s,%s,%s\n' % (subject_id, session_id, specs_path) #download_report.write(report_line) #specs = pd.read_csv(specs_path, dtype=str, sep='\t', index_col=False) # Retrieve list of list for seqs to import # One tuple is configured as :(file_to_import;acq_folder;acq_name) # value[0] : num of seq # value[1] : modality # value[2] : part of ht file_name print("Scans for ", subject_info['NIP']) print(subject_info['to_import']) seqs_to_retrieve = literal_eval(subject_info['to_import']) #Convert the first element id there is only one sequence, otherwise #each value will be used as str and note tuple). if isinstance(seqs_to_retrieve[0], str): seqs_to_retrieve = [seqs_to_retrieve] # download data, store information in batch files for anat/fmri # download data for meg data for value in seqs_to_retrieve: #print(seqs_to_retrieve) def get_value(key, text): m = re.search(key + '-(.+?)_', text) if m: return m.group(1) else: return None run_task = get_value('task', value[2]) run_id = get_value('run', value[2]) run_dir = get_value('dir', value[2]) run_session = session_id tag = value[2].split('_')[-1] target_path = os.path.join(sub_path, value[1]) if not os.path.exists(target_path): os.makedirs(target_path) # MEG CASE if value[1] == 'meg': # Create subject path if necessary meg_path = os.path.join(sub_path, 'meg') if not os.path.exists(meg_path): os.makedirs(meg_path) # Create the sub-emptyroom #sub-emptyroom_path = os.path.join(data_root_path, 'sub_emptyroom') #if not os.path.exists(sub-emptyroom_path): # os.makedirs(sub-emptyroom_path) meg_file = os.path.join(db_path, nip, acq_date, value[0]) print(meg_file) filename = get_bids_file_descriptor(subject_id, task_id=run_task, run_id=run_id, run_dir=run_dir, session_id=run_session, file_tag=tag, acq_label=acq_label, file_type='tif') #output_path = os.path.join(target_path, filename) #print(output_path) #shutil.copyfile(meg_file, output_path) raw = mne.io.read_raw_fif(meg_file, allow_maxshield=True) write_raw_bids(raw, filename, target_path, overwrite=True) # add event # create json file #copy the subject emptyroom # ANAT and FUNC case elif (value[1] == 'anat') or (value[1] == 'func') or (value[1] == 'fmap'): download = True dicom_paths = [] path_file_glob = "" nip_dirs = glob.glob( os.path.join(db_path, str(acq_date), str(nip) + '*')) #print(os.path.join(db_path, str(acq_date), str(nip) + '*')) if len(nip_dirs) < 1: list_warning.append( f"\n WARNING: No directory found for given NIP {nip} and SESSION {session_id}" ) #print(message) #download_report.write(message) download = False elif len(nip_dirs) > 1: list_warning.append( f"\n WARNING: Multiple path for given NIP {nip} \ SESSION {session_id} - please \ mention the session of the subject for this date, \ 2 sessions for the same subject the same day are \ possible") #print(message) #download_report.write(message) download = False else: path_file_glob = os.path.join( nip_dirs[0], '{0:06d}_*'.format(int(value[0]))) #print(path_file_glob) dicom_paths = glob.glob(path_file_glob) if not dicom_paths and download: list_warning.append("\n WARNING: file not found " + path_file_glob) #print(message) #download_report.write(message) elif download: dicom_path = dicom_paths[0] list_imported.append("\n IMPORTATION OF " + dicom_path) #print(message) #download_report.write(message) # Expecting page 10 bids specification file name filename = get_bids_file_descriptor(subject_id, task_id=run_task, run_id=run_id, run_dir=run_dir, session_id=run_session, file_tag=tag, acq_label=acq_label, file_type='nii') if value[1] == 'anat' and deface: print("\n Deface with pydeface") files_for_pydeface.append( os.path.join(target_path, filename)) # append list for preparing the batch importation file_to_convert = { 'in_dir': dicom_path, 'out_dir': target_path, 'filename': os.path.splitext(filename)[0] } is_file_to_import = os.path.join( os.path.join(os.getcwd(), target_path, filename)) if (os.path.isfile(is_file_to_import)): list_already_imported.append( f" ALREADY IMPORTED: {is_file_to_import}") else: infiles_dcm2nii.append(file_to_convert) # Add descriptor into the json file if run_task: filename_json = os.path.join(target_path, filename[:-3] + 'json') dict_descriptors.update( {filename_json: { 'TaskName': run_task }}) if len(value) == 4: #print('value[3]', value[3] ) filename_json = os.path.join(target_path, filename[:-3] + 'json') dict_descriptors.update({filename_json: value[3]}) #Importation and conversion of dicom files dcm2nii_batch = dict(Options=dict(isGz='false', isFlipY='false', isVerbose='false', isCreateBIDS='true', isOnlySingleFile='false'), Files=infiles_dcm2nii) dcm2nii_batch_file = os.path.join(exp_info_path, 'batch_dcm2nii.yaml') with open(dcm2nii_batch_file, 'w') as f: data = yaml.dump(dcm2nii_batch, f) print( "\n------------------------------------------------------------------------------------" ) print( "------------------- SUMMARY OF IMPORTATION --------------------------------------" ) print( "--------------------------------------------------------------------------------------\n" ) for i in list_already_imported: print(i) download_report.write(i) print( "\n------------------------------------------------------------------------------------" ) for i in list_imported: print(i) download_report.write(i) print( "\n------------------------------------------------------------------------------------" ) for i in list_warning: print(i) download_report.write(i) print( "\n------------------------------------------------------------------------------------" ) print( "------------------------------------------------------------------------------------\n" ) download_report.close() if dry_run: print("\n NO IMPORTATION, DRY-RUN OPTION IS TRUE \n") else: print('\n') cmd = "dcm2niibatch %s" % (dcm2nii_batch_file) subprocess.call(cmd, shell=True) # loop for checking if downloaded are ok and create the downloaded files # done_file = open(os.path.join(sub_path, 'downloaded'), 'w') # done_file.close() #Data to deface #print(files_for_pydeface) if files_for_pydeface: try: template = ( "/neurospin/unicog/protocols/IRMf/Unicogfmri/BIDS/" "unicog-dev/bids/template_deface/mean_reg2mean.nii.gz") facemask = ("/neurospin/unicog/protocols/IRMf/Unicogfmri/BIDS/" "unicog-dev/bids/template_deface/facemask.nii.gz") except: template = resource_filename( Requirement.parse("unicog"), "bids/template_deface/mean_reg2mean.nii.gz") facemask = resource_filename( Requirement.parse("unicog"), "bids/template_deface/facemask.nii.gz") os.environ['FSLDIR'] = "/i2bm/local/fsl/bin/" os.environ['FSLOUTPUTTYPE'] = "NIFTI_PAIR" os.environ[ 'PATH'] = os.environ['FSLDIR'] + ":" + os.environ['PATH'] for file_to_deface in files_for_pydeface: print(f"\nDeface with pydeface {file_to_deface}") pdu.deface_image(infile=file_to_deface, outfile=file_to_deface, facemask=facemask, template=template, force=True) # Create participants.tsv in dataset folder (take out NIP column) participants_path = os.path.join(target_root_path, 'participants.tsv') df_participant = pd.DataFrame.from_dict(dic_info_participants, orient="index") df_participant.to_csv(participants_path, sep='\t') if dict_descriptors: #print(dict_descriptors) # Adding a new key value pair in a json file such as taskname for k, v in dict_descriptors.items(): with open(k, 'r+') as json_file: for key, val in v.items(): temp_json = json.load(json_file) temp_json[key] = val json_file.seek(0) json.dump(temp_json, json_file) json_file.truncate() # Copy recorded event files if copy_events == "y": bids_copy_events(behav_path, data_root_path, dataset_name) #Validate paths with BIDSValidator #see also http://bids-standard.github.io/bids-validator/ validation_bids = yes_no( '\nDo you want to use a bids validator? (y/n)') if validation_bids: bids_validation_report = os.path.join( report_path, "report_bids_valisation.txt") if shutil.which('bids-validator'): cmd = f"bids-validator {target_root_path} > {bids_validation_report}" subprocess.call(cmd, shell=True) cmd = f"cat < {bids_validation_report}" subprocess.call(cmd, shell=True) print( '\n\nSee the summary of bids validator at {bids_validation_report}' ) else: validator = BIDSValidator() os.chdir(target_root_path) for file_to_test in Path('.').glob('./**/*'): if file_to_test.is_file(): file_to_test = '/' + str(file_to_test) print( '\nTest the following name of file : {name} with BIDSValidator' .format(name=file_to_test)) print(validator.is_bids(file_to_test)) print('\n')
class BIDSLayoutIndexer: """ Indexer class for BIDSLayout. Parameters ---------- validate : bool, optional If True, all files are checked for BIDS compliance when first indexed, and non-compliant files are ignored. This provides a convenient way to restrict file indexing to only those files defined in the "core" BIDS spec, as setting validate=True will lead files in supplementary folders like derivatives/, code/, etc. to be ignored. ignore : str or SRE_Pattern or list Path(s) to exclude from indexing. Each path is either a string or a SRE_Pattern object (i.e., compiled regular expression). If a string is passed, it must be either an absolute path, or be relative to the BIDS project root. If an SRE_Pattern is passed, the contained regular expression will be matched against the full (absolute) path of all files and directories. By default, indexing ignores all files in 'code/', 'stimuli/', 'sourcedata/', 'models/', and any hidden files/dirs beginning with '.' at root level. force_index : str or SRE_Pattern or list Path(s) to forcibly index in the BIDSLayout, even if they would otherwise fail validation. See the documentation for the ignore argument for input format details. Note that paths in force_index takes precedence over those in ignore (i.e., if a file matches both ignore and force_index, it *will* be indexed). Note: NEVER include 'derivatives' here; use the derivatives argument (or :obj:`bids.layout.BIDSLayout.add_derivatives`) for that. index_metadata : bool If True, all metadata files are indexed. If False, metadata will not be available (but indexing will be faster). config_filename : str Optional name of filename within directories that contains configuration information. **filters keyword arguments passed to the .get() method of a :obj:`bids.layout.BIDSLayout` object. These keyword arguments define what files get selected for metadata indexing. """ def __init__(self, validate=True, ignore=None, force_index=None, index_metadata=True, config_filename='layout_config.json', **filters): self.validate = validate self.ignore = ignore self.force_index = force_index self.index_metadata = index_metadata self.config_filename = config_filename self.filters = filters self.validator = BIDSValidator(index_associated=True) # Layout-dependent attributes to be set in __call__() self._layout = None self._config = None self._include_patterns = None self._exclude_patterns = None def __call__(self, layout): self._layout = layout self._config = list(layout.config.values()) ignore, force = validate_indexing_args(self.ignore, self.force_index, self._layout.root) self._include_patterns = force self._exclude_patterns = ignore self._index_dir(self._layout.root, self._config) if self.index_metadata: self._index_metadata() @property def session(self): return self._layout.connection_manager.session def _validate_dir(self, d, default=None): if _check_path_matches_patterns(d, self._include_patterns): return True if _check_path_matches_patterns(d, self._exclude_patterns): return False return default def _validate_file(self, f, default=None): # Inclusion takes priority over exclusion if _check_path_matches_patterns(f, self._include_patterns): return True if _check_path_matches_patterns(f, self._exclude_patterns): return False # If inclusion/exclusion is inherited from a parent directory, that # takes precedence over the remaining file-level rules if default is not None: return default # Derivatives are currently not validated. # TODO: raise warning the first time in a session this is encountered if not self.validate or 'derivatives' in self._layout.config: return True # BIDS validator expects absolute paths, but really these are relative # to the BIDS project root. to_check = os.path.relpath(f, self._layout.root) to_check = os.path.join(os.path.sep, to_check) to_check = Path(to_check).as_posix() # bids-validator works with posix paths only return self.validator.is_bids(to_check) def _index_dir(self, path, config, default_action=None): abs_path = os.path.join(self._layout.root, path) # Derivative directories must always be added separately # and passed as their own root, so terminate if passed. if abs_path.startswith(os.path.join(self._layout.root, 'derivatives')): return config = list(config) # Shallow copy # Check for additional config file in directory layout_file = self.config_filename config_file = os.path.join(abs_path, layout_file) if os.path.exists(config_file): cfg = Config.load(config_file, session=self.session) config.append(cfg) # Track which entities are valid in filenames for this directory config_entities = {} for c in config: config_entities.update(c.entities) for (dirpath, dirnames, filenames) in os.walk(path): # Set the default inclusion/exclusion directive default = self._validate_dir(dirpath, default=default_action) # If layout configuration file exists, delete it if self.config_filename in filenames: filenames.remove(self.config_filename) for f in filenames: bf = self._index_file(f, dirpath, config_entities, default_action=default) if bf is None: continue self.session.commit() # Recursively index subdirectories for d in dirnames: d = os.path.join(dirpath, d) self._index_dir(d, list(config), default_action=default) # Prevent subdirectory traversal break def _index_file(self, f, dirpath, entities, default_action=None): """Create DB record for file and its tags. """ abs_fn = os.path.join(dirpath, f) # Skip files that fail validation, unless forcibly indexing if not self._validate_file(abs_fn, default=default_action): return None bf = make_bidsfile(abs_fn) self.session.add(bf) # Extract entity values match_vals = {} for e in entities.values(): m = e.match_file(bf) if m is None and e.mandatory: break if m is not None: match_vals[e.name] = (e, m) # Create Entity <=> BIDSFile mappings if match_vals: for _, (ent, val) in match_vals.items(): tag = Tag(bf, ent, str(val), ent._dtype) self.session.add(tag) return bf def _index_metadata(self): """Index metadata for all files in the BIDS dataset. """ dot = '.' if bids.config.get_option('extension_initial_dot') else '' filters = self.filters if filters: # ensure we are returning objects filters['return_type'] = 'object' # until 0.11.0, user can specify extension or extensions ext_key = 'extensions' if 'extensions' in filters else 'extension' if filters.get(ext_key): filters[ext_key] = listify(filters[ext_key]) # ensure json files are being indexed # XXX 0.14: dot always == '.' json_ext = dot + 'json' if json_ext not in filters[ext_key]: filters[ext_key].append(json_ext) # Process JSON files first if we're indexing metadata all_files = self._layout.get(absolute_paths=True, **filters) # Track ALL entities we've seen in file names or metadatas all_entities = {} for c in self._config: all_entities.update(c.entities) # If key/value pairs in JSON files duplicate ones extracted from files, # we can end up with Tag collisions in the DB. To prevent this, we # store all filename/entity pairs and the value, and then check against # that before adding each new Tag. all_tags = {} for t in self.session.query(Tag).all(): key = '{}_{}'.format(t.file_path, t.entity_name) all_tags[key] = str(t.value) # We build up a store of all file data as we iterate files. It looks # like: { extension/suffix: dirname: [(entities, payload)]}}. # The payload is left empty for non-JSON files. file_data = {} for bf in all_files: file_ents = bf.entities.copy() suffix = file_ents.pop('suffix', None) ext = file_ents.pop('extension', None) if suffix is not None and ext is not None: key = "{}/{}".format(ext, suffix) if key not in file_data: file_data[key] = defaultdict(list) if ext == dot + 'json': with open(bf.path, 'r') as handle: try: payload = json.load(handle) except json.JSONDecodeError as e: msg = ("Error occurred while trying to decode JSON" " from file '{}'.".format(bf.path)) raise IOError(msg) from e else: payload = None to_store = (file_ents, payload, bf.path) file_data[key][bf.dirname].append(to_store) # To avoid integrity errors, track primary keys we've seen seen_assocs = set() def create_association_pair(src, dst, kind, kind2=None): kind2 = kind2 or kind pk1 = '#'.join([src, dst, kind]) if pk1 not in seen_assocs: self.session.add(FileAssociation(src=src, dst=dst, kind=kind)) seen_assocs.add(pk1) pk2 = '#'.join([dst, src, kind2]) if pk2 not in seen_assocs: self.session.add(FileAssociation(src=dst, dst=src, kind=kind2)) seen_assocs.add(pk2) # TODO: Efficiency of everything in this loop could be improved filenames = [bf for bf in all_files if not bf.path.endswith('.json')] for bf in filenames: file_ents = bf.entities.copy() suffix = file_ents.pop('suffix', None) ext = file_ents.pop('extension', None) file_ent_keys = set(file_ents.keys()) if suffix is None or ext is None: continue # Extract metadata associated with the file. The idea is # that we loop over parent directories, and if we find # payloads in the file_data store (indexing by directory # and current file suffix), we check to see if the # candidate JS file's entities are entirely consumed by # the current file. If so, it's a valid candidate, and we # add the payload to the stack. Finally, we invert the # stack and merge the payloads in order. ext_key = "{}/{}".format(ext, suffix) json_key = dot + "json/{}".format(suffix) dirname = bf.dirname payloads = [] ancestors = [] while True: # Get JSON payloads json_data = file_data.get(json_key, {}).get(dirname, []) for js_ents, js_md, js_path in json_data: js_keys = set(js_ents.keys()) if js_keys - file_ent_keys: continue matches = [js_ents[name] == file_ents[name] for name in js_keys] if all(matches): payloads.append((js_md, js_path)) # Get all files this file inherits from candidates = file_data.get(ext_key, {}).get(dirname, []) for ents, _, path in candidates: keys = set(ents.keys()) if keys - file_ent_keys: continue matches = [ents[name] == file_ents[name] for name in keys] if all(matches): ancestors.append(path) parent = os.path.dirname(dirname) if parent == dirname: break dirname = parent if not payloads: continue # Create DB records for metadata associations js_file = payloads[-1][1] create_association_pair(js_file, bf.path, 'Metadata') # Consolidate metadata by looping over inherited JSON files file_md = {} for pl, js_file in payloads[::-1]: file_md.update(pl) # Create FileAssociation records for JSON inheritance n_pl = len(payloads) for i, (pl, js_file) in enumerate(payloads): if (i + 1) < n_pl: other = payloads[i + 1][1] create_association_pair(js_file, other, 'Child', 'Parent') # Inheritance for current file n_pl = len(ancestors) for i, src in enumerate(ancestors): if (i + 1) < n_pl: dst = ancestors[i + 1] create_association_pair(src, dst, 'Child', 'Parent') # Files with IntendedFor field always get mapped to targets intended = listify(file_md.get('IntendedFor', [])) for target in intended: # Per spec, IntendedFor paths are relative to sub dir. target = os.path.join( self._layout.root, 'sub-{}'.format(bf.entities['subject']), target) create_association_pair(bf.path, target, 'IntendedFor', 'InformedBy') # Link files to BOLD runs if suffix in ['physio', 'stim', 'events', 'sbref']: images = self._layout.get( extension=['.nii', '.nii.gz'], suffix='bold', return_type='filename', **file_ents) for img in images: create_association_pair(bf.path, img, 'IntendedFor', 'InformedBy') # Link files to DWI runs if suffix == 'sbref' or ext in ['bvec', 'bval']: images = self._layout.get( extension=['.nii', '.nii.gz'], suffix='dwi', return_type='filename', **file_ents) for img in images: create_association_pair(bf.path, img, 'IntendedFor', 'InformedBy') # Create Tag <-> Entity mappings, and any newly discovered Entities for md_key, md_val in file_md.items(): tag_string = '{}_{}'.format(bf.path, md_key) # Skip pairs that were already found in the filenames if tag_string in all_tags: file_val = all_tags[tag_string] if str(md_val) != file_val: msg = ( "Conflicting values found for entity '{}' in " "filename {} (value='{}') versus its JSON sidecar " "(value='{}'). Please reconcile this discrepancy." ) raise BIDSConflictingValuesError( msg.format(md_key, bf.path, file_val, md_val)) continue if md_key not in all_entities: all_entities[md_key] = Entity(md_key, is_metadata=True) self.session.add(all_entities[md_key]) tag = Tag(bf, all_entities[md_key], md_val) self.session.add(tag) if len(self.session.new) >= 1000: self.session.commit() self.session.commit()
def test_is_bids(fname): """Test that is_bids returns true for each file in a valid BIDS dataset.""" validator = BIDSValidator() assert validator.is_bids(fname)
def __init__(self, path): self.path = path self.subjects = [ op.basename(s).split('-')[1] for s in sorted(glob(op.join(self.path, 'sub-*'))) if op.isdir(s) ] self.submessage = f"Found {len(self.subjects)} participant(s) {self.subjects}" sessions = [] sessmessage = [] for this_sub in self.subjects: these_ses = [ op.basename(s).split('-')[1] for s in sorted( glob(op.join(self.path, f'sub-{this_sub}', 'ses-*'))) if op.isdir(s) ] sessmessage.append( f"Found {len(these_ses)} session(s) for sub-{this_sub} {these_ses}" ) sessions.append(these_ses) self.sessions = sessions self.sessmessage = sessmessage tasks = [] taskmessage = [] for this_sub, these_ses in zip(self.subjects, self.sessions): these_task = [] for this_ses in these_ses: if this_ses is None: tmp = glob( op.join(self.path, f'sub-{this_sub}', 'func', f"*{'.nii'}*")) else: tmp = glob( op.join(self.path, f'sub-{this_sub}', f'ses-{this_ses}', 'func', f"*{'.nii'}*")) these_ses_task = list( set([ op.basename(f).split('task-')[1].split('_')[0] for f in tmp ])) nullstring = "" if this_ses is None else f"and ses-{this_ses}" taskmessage.append( f"Found {len(these_ses_task)} task(s) for sub-{this_sub} {nullstring} {these_ses_task}" ) these_task.append(these_ses_task) self.taskmessage = taskmessage tasks.append(these_task) self.tasks = tasks sessions = [] for this_sub in self.subjects: these_ses = [ op.basename(s).split('-')[1] for s in sorted( glob(op.join(self.path, f'sub-{this_sub}', 'ses-*'))) if op.isdir(s) ] # Check BIDS validity invalid = [] validator = BIDSValidator() for path, subdirs, files in os.walk(self.path): for file in files: rel_path = os.path.relpath(path, self.path) if validator.is_bids(os.path.join(rel_path, file)) == False: invalid.append(os.path.join(rel_path, file)) self.bids_invalid = invalid if invalid: warnings.warn( "One or more files does not conform to BIDS standard. See self.bids_invalid for a list of files." )