def test_derivative_getters(): synth_path = join(get_test_data_path(), 'synthetic') bare_layout = BIDSLayout(synth_path, derivatives=False) full_layout = BIDSLayout(synth_path, derivatives=True) with pytest.raises(AttributeError): bare_layout.get_spaces() assert set(full_layout.get_spaces()) == {'MNI152NLin2009cAsym', 'T1w'}
def layout_ds005_multi_derivs(): data_dir = join(get_test_data_path(), 'ds005') layout = BIDSLayout(data_dir) deriv_dir1 = join(get_test_data_path(), 'ds005_derivs') deriv_dir2 = join(data_dir, 'derivatives', 'events') layout.add_derivatives([deriv_dir1, deriv_dir2]) return layout
def test_deriv_indexing(): data_dir = join(get_test_data_path(), 'ds005') deriv_dir = join(data_dir, 'derivatives', 'bbr') # missing dataset_description.json with pytest.warns(UserWarning): layout = BIDSLayout(data_dir, derivatives=deriv_dir) # Should work fine deriv_dir = join(data_dir, 'derivatives', 'events') layout = BIDSLayout(data_dir, derivatives=deriv_dir) assert layout.get(scope='derivatives') assert layout.get(scope='events') assert not layout.get(scope='nonexistent')
def collection(): if 'ds005' not in cached_collections: layout_path = join(get_test_data_path(), 'ds005') layout = BIDSLayout(layout_path) cached_collections['ds005'] = layout.get_collections( 'run', types=['events'], scan_length=SCAN_LENGTH, merge=True, sampling_rate=10, subject=SUBJECTS ) # Always return a clone! return cached_collections['ds005'].clone()
def write_full_report(report_dict, run_context, deriv_dir): fl_layout = BIDSLayout(deriv_dir, config=['bids', 'derivatives', 'fitlins']) env = jinja2.Environment(loader=jinja2.FileSystemLoader( searchpath=pkgr.resource_filename('fitlins', '/'))) tpl = env.get_template('data/full_report.tpl') model = snake_to_camel(report_dict['model']['name']) target_file = op.join( deriv_dir, fl_layout.build_path({'model': model}, PATH_PATTERNS, validate=False)) html = tpl.render( deroot({ **report_dict, **run_context }, op.dirname(target_file))) Path(target_file).write_text(html)
def test_layout_repr_overshadow_run(tmp_path): """A test creating a layout to replicate #681.""" shutil.copytree(join(get_test_data_path(), '7t_trt'), tmp_path / "7t_trt") (tmp_path / "7t_trt" / "sub-01" / "ses-1" / "sub-01_ses-1_scans.json").write_text( json.dumps({"run": { "Description": "metadata to cause #681" }})) assert "Subjects: 10 | Sessions: 20 | Runs: 20" in str( BIDSLayout(tmp_path / "7t_trt"))
def test_load_layout(layout_synthetic_nodb, db_dir): db_path = str(db_dir / 'tmp_db') layout_synthetic_nodb.save(db_path) reloaded = BIDSLayout.load(db_path) assert sorted(layout_synthetic_nodb.get(return_type='file')) == \ sorted(reloaded.get(return_type='file')) cm1 = layout_synthetic_nodb.connection_manager cm2 = reloaded.connection_manager for attr in ['root', 'absolute_paths', 'config', 'derivatives']: assert getattr(cm1.layout_info, attr) == getattr(cm2.layout_info, attr)
def _run_interface(self, runtime): subject_id = self.inputs.scan_info['subject_id'] session = self.inputs.scan_info['session'] if 'run' in (self.inputs.scan_info.keys()): run = self.inputs.scan_info['run'] else: run = self.inputs.run from bids.layout import BIDSLayout layout = BIDSLayout(self.inputs.bids_dir, validate=False) try: if run is None: # if there is no run spec to search, don't include it in the search file_list = layout.get(subject=subject_id, session=session, extension=['nii', 'nii.gz'], suffix=self.inputs.suffix, return_type='filename') else: file_list = layout.get(subject=subject_id, session=session, run=run, extension=['nii', 'nii.gz'], suffix=self.inputs.suffix, return_type='filename') if len(file_list) > 1: raise ValueError( 'Provided BIDS spec lead to duplicates: %s' % (str(self.inputs.suffix) + ' sub-' + subject_id + ' ses-' + session + ' run-' + str(run))) elif len(file_list) == 0: raise ValueError( 'No file for found corresponding to the following BIDS spec: %s' % (str(self.inputs.suffix) + ' sub-' + subject_id + ' ses-' + session + ' run-' + str(run))) except: raise ValueError('Error with BIDS spec: %s' % (str(self.inputs.suffix) + ' sub-' + subject_id + ' ses-' + session + ' run-' + str(run))) setattr(self, 'out_file', file_list[0]) return runtime
def _run_interface(self, runtime): from bids.layout import BIDSLayout layout = BIDSLayout(self.inputs.bids_dir, derivatives=True) entities = [] extensions = ['preproc_bold.nii.gz'] for subject in np.sort(layout.get_subjects()): file = layout.get(subject=subject, task=layout.get_tasks(), extensions=extensions) if file == []: pass else: entity = {'subject': subject} #, 'session': session} entities.append(entity) self._results['entities'] = entities return runtime
def __init__(self, layout, model): if not isinstance(layout, BIDSLayout): layout = BIDSLayout(layout) self.layout = layout if isinstance(model, str): model = json.load(open(model)) self.model = model self._load_blocks(model['blocks'])
def __init__(self, base_dir, task_id, sub_ids=None, verbose=True): assert type(base_dir) == str, "base_dir should be type(str)" assert type(task_id) == str, "task_id should be type(str)" self.base_dir = base_dir self.layout = BIDSLayout(base_dir) self.task_id = task_id if sub_ids == None: self.sub_ids = self.layout.get_subjects(task=task_id) else: assert type(sub_ids) == list, "sub_ids should be type(list)" for i in sub_ids: assert type( i) == str, "elements in sub_ids should be type(str)" assert any(i == j for j in self.layout.get_subjects(task=task_id) ), f"cannot find sub_ids with {task} task data" if verbose: print(f'{len(self.sub_ids)} subjects in {self.task_id} task')
def _run_interface(self, runtime): from bids.layout import BIDSLayout derivatives = self.inputs.derivatives layout = BIDSLayout(self.inputs.bids_dir, derivatives=derivatives) fmri_prep = [] conf_raw = [] entities = [] for ents in self.inputs.entities: selectors = {**self.inputs.selectors, **ents} fmri_file = layout.get(extensions=['preproc_bold.nii.gz'], **selectors) if len(fmri_file) == 0: raise FileNotFoundError( "Could not find BOLD file in {} with entities {}" "".format(self.inputs.bids_dir, selectors)) elif len(fmri_file) > 1: raise ValueError( "Non-unique BOLD file in {} with entities {}.\n" "Matches:\n\t{}" "".format( self.inputs.bids_dir, selectors, "\n\t".join('{} ({})'.format( f.path, layout.files[f.path].entities) for f in fmri_file))) confounds = layout.get(extensions=['confounds_regressors.tsv'], **selectors) fmri_prep.append(fmri_file[0].path) conf_raw.append(confounds[0].path) self._results['fmri_prep'] = fmri_prep self._results['conf_raw'] = conf_raw self._results['entities'] = self.inputs.entities #entities return runtime
def _run_interface(self, runtime): from bids.layout import BIDSLayout from bids.modeling import BIDSStatsModelsGraph layout = BIDSLayout.load(database_path=self.inputs.database_path) selectors = self.inputs.selectors graph = BIDSStatsModelsGraph(layout, self.inputs.model) graph.load_collections(**selectors) self._results['all_specs'] = self._load_graph(runtime, graph) return runtime
def test_nested_include_exclude_with_regex(): # ~same as above test, but use regexps instead of strings patt1 = re.compile('.*dels$') patt2 = re.compile('xtra') data_dir = join(get_test_data_path(), 'ds005') target1 = join(data_dir, 'models', 'ds-005_type-test_model.json') target2 = join(data_dir, 'models', 'extras', 'ds-005_type-test_model.json') layout = BIDSLayout(data_dir, ignore=[patt2], force_index=[patt1]) assert layout.get_file(target1) assert not layout.get_file(target2) layout = BIDSLayout(data_dir, ignore=[patt1], force_index=[patt2]) assert not layout.get_file(target1) assert layout.get_file(target2)
def get_files(): with open('dset_config.json', 'r') as fo: CONFIG = json.load(fo) DATA_DIR = op.abspath('/home/data/nbc/external-datasets/ds001491/') all_info = {} for dset_name in list(CONFIG.keys())[:3]: layout = BIDSLayout(op.join(DATA_DIR, dset_name)) cfg = CONFIG[dset_name] task = cfg['task'] dset_info = {} for sub in layout.get_subjects(): runs = layout.get_runs(subject=sub, task=task) sub_info = {} for run in runs: run_info = {} run_info['files'] = [] run_info['echo_times'] = [] for echo in sorted( layout.get_echoes(subject=sub, task=task, run=run)): raw_files = layout.get(subject=sub, task=task, run=run, echo=echo, extensions='.nii.gz') preproc_files = layout.get(subject=sub, task=task, run=run, root='afni-step1', echo=echo, extensions='.nii.gz', desc='realign') preproc_files = raw_files[:] if len(preproc_files) != 1: print(preproc_files) raise Exception('BAD') # Replace filename with path when using new version of bids run_info['files'].append(preproc_files[0].filename) metadata = layout.get_metadata(preproc_files[0].filename) run_info['echo_times'].append(metadata['EchoTime']) sub_info[run] = run_info dset_info[sub] = sub_info all_info[dset_name] = dset_info with open('all_files.json', 'w') as fo: json.dump(all_info, fo, indent=4, sort_keys=True)
def init(cls): """Create a new BIDS Layout accessible with :attr:`~execution.layout`.""" if cls._layout is None: import re from bids.layout import BIDSLayout work_dir = cls.work_dir / 'bids.db' work_dir.mkdir(exist_ok=True, parents=True) cls._layout = BIDSLayout( str(cls.bids_dir), validate=False, # database_path=str(work_dir), ignore=("code", "stimuli", "sourcedata", "models", "derivatives", re.compile(r'^\.'))) cls.layout = cls._layout
def _run_interface(self, runtime): from bids.analysis import Analysis from bids.layout import BIDSLayout layout = BIDSLayout.load(database_path=self.inputs.database_path) selectors = self.inputs.selectors analysis = Analysis(model=self.inputs.model, layout=layout) analysis.setup(drop_na=False, **selectors) self._load_level1(runtime, analysis) self._load_higher_level(runtime, analysis) return runtime
def load_bids_data(self): """ Loads the BIDS study using the BIDSLayout function (part of the pybids package) and return the object. :return: bids structure """ bids_config = os.environ['LORIS_MRI'] + "/python/lib/bids.json" exclude_arr = ['/code/', '/sourcedata/', '/log/', '.git/'] bids_layout = BIDSLayout(root=self.bids_dir, config=bids_config, ignore=exclude_arr) return bids_layout
def init(cls): """Create a new BIDS Layout accessible with :attr:`~execution.layout`.""" if cls._layout is None: import re from bids.layout.index import BIDSLayoutIndexer from bids.layout import BIDSLayout _db_path = cls.bids_database_dir or ( cls.work_dir / cls.run_uuid / "bids_db" ) _db_path.mkdir(exist_ok=True, parents=True) # Recommended after PyBIDS 12.1 _indexer = BIDSLayoutIndexer( validate=False, ignore=( "code", "stimuli", "sourcedata", "models", "derivatives", "scripts", re.compile(r"^\."), # Exclude modalities and contrasts ignored by MRIQC (doesn't know how to QC) re.compile( r"sub-[a-zA-Z0-9]+(/ses-[a-zA-Z0-9]+)?/(dwi|fmap|perf)/" ), re.compile( r"sub-[a-zA-Z0-9]+(/ses-[a-zA-Z0-9]+)?/anat/.*_" r"(PDw|T2starw|FLAIR|inplaneT1|inplaneT2|PDT2|angio|T2star" r"|FLASH|PD|T1map|T2map|T2starmap|R1map|R2map|R2starmap|PDmap" r"|MTRmap|MTsat|UNIT1|T1rho|MWFmap|MTVmap|PDT2map|Chimap" r"|S0map|M0map|defacemask|MESE|MEGRE|VFA|IRT1|MP2RAGE|MPM|MTS|MTR)\." ), re.compile( r"sub-[a-zA-Z0-9]+(/ses-[a-zA-Z0-9]+)?/func/.*" r"_(cbv|sbref|phase|events|physio|stim)\." ), ), ) cls._layout = BIDSLayout( str(cls.bids_dir), database_path=_db_path, reset_database=cls.bids_database_dir is None, indexer=_indexer, ) cls.bids_database_dir = _db_path cls.layout = cls._layout
def _run_interface(self, runtime): from bids.layout import BIDSLayout layout = BIDSLayout.load(database_path=self.inputs.database_path) bold_files = [] mask_files = [] entities = [] for ents in self.inputs.entities: selectors = {'desc': 'preproc', **ents, **self.inputs.selectors} bold_file = layout.get(**selectors) if len(bold_file) == 0: raise FileNotFoundError( "Could not find BOLD file in {} with entities {}" "".format(layout.root, selectors) ) elif len(bold_file) > 1: raise ValueError( "Non-unique BOLD file in {} with entities {}.\n" "Matches:\n\t{}" "".format( layout.root, selectors, "\n\t".join( '{} ({})'.format(f.path, layout.files[f.path].entities) for f in bold_file ), ) ) # Select exactly matching mask file (may be over-cautious) bold_ents = layout.parse_file_entities(bold_file[0].path) bold_ents['suffix'] = 'mask' bold_ents['desc'] = 'brain' bold_ents['extension'] = ['.nii', '.nii.gz'] mask_file = layout.get(**bold_ents) bold_ents.pop('suffix') bold_ents.pop('desc') bold_files.append(bold_file[0].path) mask_files.append(mask_file[0].path if mask_file else None) entities.append(bold_ents) self._results['bold_files'] = bold_files self._results['mask_files'] = mask_files self._results['entities'] = entities return runtime
def main(bids_dir, subject=None, session=None): print(subject, session, bids_dir) layout = BIDSLayout(bids_dir) bolds = layout.get(subject=subject, session=session, extensions='nii', datatype='func', suffix='bold') for bold in bolds: print(bold) epi = layout.get(suffix='epi', subject=subject, session=session, extensions='nii', acquisition=bold.acquisition, run=bold.run) print(epi) assert (len(epi) == 1), 'No EPI found for {}'.format(bold.path) epi = epi[0] json_d = { 'PhaseEncodingDirection': 'i', 'TotalReadoutTime': 0.04, 'IntendedFor': bold.path.replace('sub-{}/'.format(subject), '') } print(json_d) json_filename = epi.path.replace("nii", "json") print(json_filename) with open(json_filename, 'w') as f: json.dump(json_d, f)
def init_preproc_workflow(bids_dir, output_dir, work_dir, subject_list, session_label, task_label, run_label): """ A workflow for preprocessing complex-valued multi-echo fMRI data with single-band reference images and available T1s. """ # setup workflow participant_wf = pe.Workflow(name='participant_wf') participant_wf.base_dir = os.path.join(work_dir, 'complex_preprocessing') os.makedirs(participant_wf.base_dir, exist_ok=True) # Read in dataset, but don't validate because phase isn't supported yet layout = BIDSLayout(bids_dir, validate=False) for subject_label in subject_list: # collect the necessary inputs subject_data = collect_data(layout, subject_label, task=task_label, run=run_label, ses=session_label) single_subject_wf = init_single_subject_wf( name='single_subject_' + subject_label + '_wf', output_dir=output_dir, layout=layout, bold_mag_files=subject_data['bold_mag_files'], bold_mag_metadata=subject_data['bold_mag_metadata'], bold_phase_files=subject_data['bold_phase_files'], bold_phase_metadata=subject_data['bold_phase_metadata'], sbref_mag_files=subject_data['sbref_mag_files'], sbref_mag_metadata=subject_data['sbref_mag_metadata'], sbref_phase_files=subject_data['sbref_phase_files'], sbref_phase_metadata=subject_data['sbref_phase_metadata'], t1w_files=subject_data['t1w_files'], t1w_metadata=subject_data['t1w_metadata'], t2w_files=subject_data['t2w_files'], t2w_metadata=subject_data['t2w_metadata'], ) single_subject_wf.config['execution']['crashdump_dir'] = os.path.join( output_dir, 'sub-' + subject_label, 'log') for node in single_subject_wf._get_all_nodes(): node.config = deepcopy(single_subject_wf.config) participant_wf.add_nodes([single_subject_wf]) return participant_wf
def test_automodel_runs(model): layout_path = join(get_test_data_path(), 'ds005') layout = BIDSLayout(layout_path) # Test to make sure an analaysis can be setup from the generated model graph = BIDSStatsModelsGraph(layout, model) graph.load_collections(scan_length=480, subject=["01", "02"]) outputs = graph["Run"].run() assert len(outputs) == 6 cis = list(chain(*[op.contrasts for op in outputs])) assert len(cis) == 6 outputs = graph["Subject"].run(cis) # 2 subjects x 1 contrast assert len(outputs) == 2 cis = list(chain(*[op.contrasts for op in outputs])) assert len(cis) == 2
def _run_interface(self, runtime): import os from bids.layout import BIDSLayout layout = BIDSLayout(self.inputs.bids_dir) try: if self.inputs.datatype == 'func': bids_file = layout.get(subject=self.inputs.subject_id, session=self.inputs.session, run=self.inputs.run, extension=['nii', 'nii.gz'], datatype=self.inputs.datatype) func = layout.get(subject=self.inputs.subject_id, session=self.inputs.session, run=self.inputs.run, extension=['nii', 'nii.gz'], datatype=self.inputs.datatype, return_type='filename') file = func[0] elif self.inputs.datatype == 'anat': bids_file = layout.get(subject=self.inputs.subject_id, session=self.inputs.session, extension=['nii', 'nii.gz'], datatype=self.inputs.datatype) anat = layout.get(subject=self.inputs.subject_id, session=self.inputs.session, extension=['nii', 'nii.gz'], datatype=self.inputs.datatype, return_type='filename') file = anat[0] else: raise ValueError('Wrong datatype %s' % (self.inputs.datatype)) if len(bids_file) > 1: raise ValueError( 'Provided BIDS spec lead to duplicates: %s' % (str(self.inputs.datatype + '_' + self.inputs.subject_id + '_' + self.inputs.session + '_' + self.inputs.run))) except: raise ValueError( 'Error with BIDS spec: %s' % (str(self.inputs.datatype + '_' + self.inputs.subject_id + '_' + self.inputs.session + '_' + self.inputs.run))) nii_format = bids_file[0].get_entities()['extension'] #RABIES only work with compressed .nii for now if nii_format == 'nii': os.system('gzip %s' % (file, )) file = file + '.gz' setattr(self, 'out_file', file) return runtime
def test_resampling_edge_case(tmpdir, TR, nvols): tmpdir.chdir() os.makedirs('sub-01/func') with open('sub-01/func/sub-01_task-task_events.tsv', 'w') as fobj: fobj.write('onset\tduration\tval\n1\t0.1\t1\n') with open('sub-01/func/sub-01_task-task_bold.json', 'w') as fobj: json.dump({'RepetitionTime': TR}, fobj) dataobj = np.zeros((5, 5, 5, nvols), dtype=np.int16) affine = np.diag((2.5, 2.5, 2.5, 1)) img = nb.Nifti1Image(dataobj, affine) img.header.set_zooms((2.5, 2.5, 2.5, TR)) img.to_filename('sub-01/func/sub-01_task-task_bold.nii.gz') layout = BIDSLayout('.', validate=False) coll = load_variables(layout).get_collections('run')[0] dense_var = coll.variables['val'].to_dense(coll.sampling_rate) regressor = dense_var.resample(1.0 / TR).values assert regressor.shape == (nvols, 1)
def init(cls): """Create a new BIDS Layout accessible with :attr:`~execution.layout`.""" if cls.fs_license_file and Path(cls.fs_license_file).is_file(): os.environ["FS_LICENSE"] = str(cls.fs_license_file) if cls._layout is None: import re from bids.layout import BIDSLayout _db_path = cls.bids_database_dir or ( cls.work_dir / cls.run_uuid / "bids_db" ) _db_path.mkdir(exist_ok=True, parents=True) cls._layout = BIDSLayout( str(cls.bids_dir), validate=False, database_path=_db_path, reset_database=cls.bids_database_dir is None, ignore=( "code", "stimuli", "sourcedata", "models", re.compile(r"^\."), ), ) cls.bids_database_dir = _db_path cls.layout = cls._layout if cls.bids_filters: from bids.layout import Query # unserialize pybids Query enum values for acq, filters in cls.bids_filters.items(): cls.bids_filters[acq] = { k: getattr(Query, v[7:-4]) if not isinstance(v, Query) and "Query" in v else v for k, v in filters.items() } if "all" in cls.debug: cls.debug = list(DEBUG_MODES)
def create_datasource_indiv_params(data_dir, indiv_params, subjects=None, sessions=None, acquisitions=None): """ Create a datasource node that have iterables following BIDS format, including a indiv_params file""" bids_datasource = pe.Node(interface=BIDSDataGrabberParams(indiv_params), name='bids_datasource') bids_datasource.inputs.base_dir = data_dir bids_datasource.inputs.output_query = { 'T1': { "datatype": "anat", "suffix": "T1w", "extensions": ["nii", ".nii.gz"] }, 'T2': { "datatype": "anat", "suffix": "T2w", "extensions": ["nii", ".nii.gz"] } } layout = BIDSLayout(data_dir) # Verbose print("BIDS layout:", layout) print("\t", layout.get_subjects()) print("\t", layout.get_sessions()) if subjects is None: subjects = layout.get_subjects() if sessions is None: sessions = layout.get_sessions() iterables = [] iterables.append(('subject', subjects)) iterables.append(('session', sessions)) if acquisitions is not None: iterables.append(('acquisition', acquisitions)) bids_datasource.iterables = iterables return bids_datasource
def load_bids_data(self): """ Loads the BIDS study using the BIDSLayout function (part of the pybids package) and return the object. :return: bids structure """ if self.verbose: print('Loading the BIDS dataset with BIDS layout library...\n') bids_config = os.environ['LORIS_MRI'] + "/python/lib/bids.json" exclude_arr = ['/code/', '/sourcedata/', '/log/', '.git/'] bids_layout = BIDSLayout(root=self.bids_dir, config=bids_config, ignore=exclude_arr) if self.verbose: print('\t=> BIDS dataset loaded with BIDS layout\n') return bids_layout
def get_files(): DSET_DIR = op.abspath('/home/data/nbc/external-datasets/ds001491/') layout = BIDSLayout(DSET_DIR) layout.add_derivatives( '/home/data/nbc/external-datasets/ds001491/derivatives/afni-step1/') task = 'images' info = {} for sub in sorted(layout.get_subjects()): print(sub) sub_info = {'files': [], 'echo_times': []} for echo in sorted(layout.get_echoes(subject=sub, task=task)): raw_files = layout.get(subject=sub, task=task, echo=echo, extensions='.nii.gz') preproc_files = layout.get(subject=sub, task=task, root='afni-step1', extensions='.nii.gz') # For some reason pybids doesn't index echo in derivatives preproc_files = [ p for p in preproc_files if 'echo-{0}'.format(echo) in p.filename ] if len(preproc_files) != 1: print(preproc_files) raise Exception('BAD') # Replace filename with path when using new version of bids sub_info['files'].append(preproc_files[0].path) metadata = raw_files[0].metadata sub_info['echo_times'].append(metadata['EchoTime']) info[sub] = sub_info with open('preproc_files.json', 'w') as fo: json.dump(info, fo, indent=4, sort_keys=True)
def init(cls): """Create a new BIDS Layout accessible with :attr:`~execution.layout`.""" if cls.fs_license_file and Path(cls.fs_license_file).is_file(): os.environ["FS_LICENSE"] = str(cls.fs_license_file) if cls._layout is None: import re from bids.layout import BIDSLayout work_dir = cls.work_dir / 'bids.db' work_dir.mkdir(exist_ok=True, parents=True) cls._layout = BIDSLayout( str(cls.bids_dir), validate=False, # database_path=str(work_dir), ignore=("code", "stimuli", "sourcedata", "models", "derivatives", re.compile(r'^\.'))) cls.layout = cls._layout if cls.bids_filters: from bids.layout import Query # unserialize pybids Query enum values for acq, filters in cls.bids_filters.items(): cls.bids_filters[acq] = { k: getattr(Query, v[7:-4]) if not isinstance(v, Query) and 'Query' in v else v for k, v in filters.items()}
def _run_interface(self, runtime): from bids.analysis import Analysis from bids.layout import BIDSLayout include = self.inputs.include_pattern exclude = self.inputs.exclude_pattern derivatives = self.inputs.derivatives if not isdefined(include): include = None if not isdefined(exclude): exclude = None if not isdefined(derivatives): exclude = False layout = BIDSLayout(self.inputs.bids_dir, include=include, exclude=exclude, derivatives=derivatives) selectors = self.inputs.selectors analysis = Analysis(model=self.inputs.model, layout=layout) analysis.setup(drop_na=False, desc='preproc', **selectors) self._load_level1(runtime, analysis) self._load_higher_level(runtime, analysis) return runtime
def test_auto_model_analysis(model): layout_path = join(get_test_data_path(), 'ds005') layout = BIDSLayout(layout_path) # Test to make sure an analaysis can be setup from the generated model analysis = Analysis(layout, model) analysis.setup(scan_length=480) assert model['Name'] == 'ds005_mixedgamblestask' # run level block = model['Steps'][0] assert block['Name'] == 'Run' assert block['Level'] == 'Run' assert block['Transformations'][0]['Name'] == 'Factor' assert block['Contrasts'][0]['Name'] == 'run_parametric gain' assert block['Contrasts'][0]['Weights'] == [1] assert block['Contrasts'][0]['Type'] == 't' # subject level block = model['Steps'][1] assert block['Name'] == 'Subject' assert block['Level'] == 'Subject' assert block['Model']['X'][0] == 'run_parametric gain' assert block['Contrasts'][0]['Name'] == 'subject_run_parametric gain' assert block['Contrasts'][0]['Type'] == 'FEMA' # dataset level block = model['Steps'][2] assert block['Name'] == 'Dataset' assert block['Level'] == 'Dataset' assert block['Model']['X'][0] == 'subject_run_parametric gain' assert block['Contrasts'][0][ 'Name'] == 'dataset_subject_run_parametric gain' assert block['Contrasts'][0]['Type'] == 't'
def create_datasource_indiv_params(output_query, data_dir, indiv_params, subjects=None, sessions=None, acquisitions=None, reconstructions=None): """ Create a datasource node that have iterables following BIDS format, including a indiv_params file""" bids_datasource = pe.Node(interface=BIDSDataGrabberParams(indiv_params), name='bids_datasource') bids_datasource.inputs.base_dir = data_dir bids_datasource.inputs.output_query = output_query layout = BIDSLayout(data_dir) # Verbose print("BIDS layout:", layout) print("\t", layout.get_subjects()) print("\t", layout.get_sessions()) if subjects is None: subjects = layout.get_subjects() if sessions is None: sessions = layout.get_sessions() iterables = [] iterables.append(('subject', subjects)) iterables.append(('session', sessions)) if acquisitions is not None: iterables.append(('acquisition', acquisitions)) if reconstructions is not None: iterables.append(('reconstruction', reconstructions)) bids_datasource.iterables = iterables return bids_datasource
def run_coll(): path = join(get_test_data_path(), 'ds005') layout = BIDSLayout(path) return layout.get_collections('run', types=['events'], merge=True, scan_length=480)