def test_nested_include_exclude(): data_dir = join(get_test_data_path(), 'ds005') target1 = join(data_dir, 'models', 'ds-005_type-test_model.json') target2 = join(data_dir, 'models', 'extras', 'ds-005_type-test_model.json') # Nest a directory exclusion within an inclusion layout = BIDSLayout(data_dir, validate=True, force_index=['models'], ignore=[os.path.join('models', 'extras')]) assert layout.get_file(target1) assert not layout.get_file(target2) # Nest a directory inclusion within an exclusion layout = BIDSLayout(data_dir, validate=True, ignore=['models'], force_index=[os.path.join('models', 'extras')]) assert not layout.get_file(target1) assert layout.get_file(target2) # Force file inclusion despite directory-level exclusion models = ['models', target2] layout = BIDSLayout(data_dir, validate=True, force_index=models, ignore=[os.path.join('models', 'extras')]) assert layout.get_file(target1) assert layout.get_file(target2)
def test_derivative_getters(): synth_path = join(get_test_data_path(), 'synthetic') bare_layout = BIDSLayout(synth_path, derivatives=False) full_layout = BIDSLayout(synth_path, derivatives=True) with pytest.raises(AttributeError): bare_layout.get_spaces() assert set(full_layout.get_spaces()) == {'MNI152NLin2009cAsym', 'T1w'}
def test_load_layout_config_not_overwritten(layout_synthetic_nodb, tmpdir): modified_dataset_path = tmpdir / "modified" shutil.copytree(layout_synthetic_nodb.root, modified_dataset_path) # Save index db_path = str(tmpdir / 'tmp_db') BIDSLayout(modified_dataset_path).save(db_path) # Update dataset_description.json dataset_description = modified_dataset_path / "dataset_description.json" with dataset_description.open('r') as f: description = json.load(f) description["DatasetType"] = "derivative" with dataset_description.open('w') as f: json.dump(description, f) # Reload db_layout = BIDSLayout(modified_dataset_path, database_path=db_path) fresh_layout = BIDSLayout(modified_dataset_path, validate=False) cm1 = db_layout.connection_manager cm2 = fresh_layout.connection_manager for attr in ['root', 'absolute_paths', 'derivatives']: assert getattr(cm1.layout_info, attr) == getattr(cm2.layout_info, attr) assert cm1.layout_info.config != cm2.layout_info.config
def test_layout_with_validation(): data_dir = join(get_test_data_path(), '7t_trt') layout1 = BIDSLayout(data_dir, validate=True) layout2 = BIDSLayout(data_dir, validate=False) assert len(layout1.files) < len(layout2.files) # Not a valid BIDS file badfile = join(data_dir, 'test.bval') assert (badfile not in layout1.files) assert (badfile in layout2.files)
def test_dataset_without_datasettype_parsed_as_raw(self): dataset_path = Path("ds005_derivs", "format_errs", "no_dataset_type") unvalidated = BIDSLayout(Path(get_test_data_path()) / dataset_path, validate=False) assert len(unvalidated.get()) == 4 with pytest.raises(ValueError): unvalidated.get(desc="preproc") validated = BIDSLayout(Path(get_test_data_path()) / dataset_path) assert len(validated.get()) == 1
def synthetic(request): root = join(get_test_data_path(), 'synthetic') default_preproc = get_option('loop_preproc') if request.param == 'preproc': set_option('loop_preproc', True) layout = BIDSLayout((root, ['bids', 'derivatives'])) else: set_option('loop_preproc', default_preproc) layout = BIDSLayout(root, exclude='derivatives') yield request.param, load_variables(layout, skip_empty=True) set_option('loop_preproc', default_preproc)
def synthetic(request): root = join(get_test_data_path(), 'synthetic') if request.param == 'preproc': layout = BIDSLayout(root, derivatives=True) dataset = load_variables(layout, skip_empty=True, desc='preproc', space='T1w') else: layout = BIDSLayout(root) dataset = load_variables(layout, skip_empty=True) yield request.param, dataset
def test_restricted_words_in_path(tmpdir): orig_path = join(get_test_data_path(), 'synthetic') parent_dir = str(tmpdir / 'derivatives' / 'pipeline') os.makedirs(parent_dir) new_path = join(parent_dir, 'sourcedata') os.symlink(orig_path, new_path) orig_layout = BIDSLayout(orig_path) new_layout = BIDSLayout(new_path) orig_files = set(f.replace(orig_path, '') for f in orig_layout.files) new_files = set(f.replace(new_path, '') for f in new_layout.files) assert orig_files == new_files
def test_path_arguments(): data_dir = join(get_test_data_path(), 'ds005') deriv_dir = join(data_dir, 'derivatives', 'events') layout = BIDSLayout(Path(data_dir), derivatives=Path(deriv_dir)) assert layout.get(scope='derivatives') assert layout.get(scope='events') assert not layout.get(scope='nonexistent') layout = BIDSLayout(Path(data_dir), derivatives=[Path(deriv_dir)]) assert layout.get(scope='derivatives') assert layout.get(scope='events') assert not layout.get(scope='nonexistent')
def test_deriv_indexing(): data_dir = join(get_test_data_path(), 'ds005') deriv_dir = join(data_dir, 'derivatives', 'bbr') # missing dataset_description.json with pytest.warns(UserWarning): layout = BIDSLayout(data_dir, derivatives=deriv_dir) # Should work fine deriv_dir = join(data_dir, 'derivatives', 'events') layout = BIDSLayout(data_dir, derivatives=deriv_dir) assert layout.get(scope='derivatives') assert layout.get(scope='events') assert not layout.get(scope='nonexistent')
def test_ignore_files(layout_ds005): data_dir = join(get_test_data_path(), 'ds005') target1 = join(data_dir, 'models', 'ds-005_type-test_model.json') target2 = join(data_dir, 'models', 'extras', 'ds-005_type-test_model.json') layout1 = BIDSLayout(data_dir, validate=False) assert target1 not in layout_ds005.files assert target1 not in layout1.files assert target2 not in layout1.files # now the models/ dir should show up, because passing ignore explicitly # overrides the default - but 'model/extras/' should still be ignored # because of the regex. ignore = [re.compile('xtra'), 'dummy'] layout2 = BIDSLayout(data_dir, validate=False, ignore=ignore) assert target1 in layout2.files assert target2 not in layout2.files
def test_nested_include_exclude_with_regex(): # ~same as above test, but use regexps instead of strings patt1 = re.compile('.*dels$') patt2 = re.compile('xtra') data_dir = join(get_test_data_path(), 'ds005') target1 = join(data_dir, 'models', 'ds-005_type-test_model.json') target2 = join(data_dir, 'models', 'extras', 'ds-005_type-test_model.json') layout = BIDSLayout(data_dir, ignore=[patt2], force_index=[patt1]) assert layout.get_file(target1) assert not layout.get_file(target2) layout = BIDSLayout(data_dir, ignore=[patt1], force_index=[patt2]) assert not layout.get_file(target1) assert layout.get_file(target2)
def layout_ds005_multi_derivs(): data_dir = join(get_test_data_path(), 'ds005') layout = BIDSLayout(data_dir) deriv_dir1 = join(get_test_data_path(), 'ds005_derivs') deriv_dir2 = join(data_dir, 'derivatives', 'events') layout.add_derivatives([deriv_dir1, deriv_dir2]) return layout
def main(): print('Running subjects:', str(SUBJECTS)) if not os.path.isdir(MEM_DIR): os.mkdir(MEM_DIR) mem = Memory(base_dir=MEM_DIR) layout = BIDSLayout(BIDS_DIR) # func_files[subject_index][run_index] if num_runs > 1: func_files = [[ layout.get(type='bold', task=task, run=i + 1, subject=subj, extensions='nii.gz')[0] for i in range(num_runs) ] for subj in SUBJECTS] else: func_files = [ layout.get(type='bold', task=task, subject=subj, extensions='nii.gz') for subj in SUBJECTS ] events = get_events(func_files) confounds = get_confounds(func_files) info = get_info(events, confounds) specify_model_results = specify_model(layout, func_files, info) level1design_results = lv1_design(mem, layout, func_files, specify_model_results) modelgen_results = feat_model(mem, level1design_results) mask_results = masking(mem, func_files) film_gls(mem, mask_results, modelgen_results)
def test_auto_model_analysis(model): layout_path = join(get_test_data_path(), 'ds005') layout = BIDSLayout(layout_path) # Test to make sure an analaysis can be setup from the generated model analysis = Analysis(layout, model) analysis.setup(scan_length=480) assert model['name'] == 'ds005_mixedgamblestask' # run level block = model['blocks'][0] assert block['name'] == 'run' assert block['level'] == 'run' assert block['transformations'][0]['name'] == 'factor' assert block['model']['HRF_variables'][0] == 'trial_type.parametric gain' assert block['contrasts'][0]['name'] == 'run_parametric gain' assert block['contrasts'][0]['weights'] == [1] # subject level block = model['blocks'][1] assert block['name'] == 'subject' assert block['level'] == 'subject' assert block['model']['variables'][0] == 'run_parametric gain' assert block['contrasts'][0]['name'] == 'subject_run_parametric gain' # dataset level block = model['blocks'][2] assert block['name'] == 'dataset' assert block['level'] == 'dataset' assert block['model']['variables'][0] == 'subject_run_parametric gain' assert block['contrasts'][0][ 'name'] == 'dataset_subject_run_parametric gain'
def test_downsampling(tmpdir): tmpdir.chdir() os.makedirs('sub-01/func') import numpy as np TR, newTR, nvols, newvols = 2.00000, 6.0, 90, 30 Fs = 1 / TR t = np.linspace(0, int(nvols / Fs), nvols, endpoint=False) values = np.sin(0.025 * 2 * np.pi * t) + np.cos(0.1166 * 2 * np.pi * t) with open('sub-01/func/sub-01_task-task_events.tsv', 'w') as fobj: fobj.write('onset\tduration\tval\n') for idx, val in enumerate(values): fobj.write('%f\t%f\t%f\n' % (idx * TR, TR, val)) with open('sub-01/func/sub-01_task-task_bold.json', 'w') as fobj: json.dump({'RepetitionTime': TR}, fobj) dataobj = np.zeros((5, 5, 5, nvols), dtype=np.int16) affine = np.diag((2.5, 2.5, 2.5, 1)) img = nb.Nifti1Image(dataobj, affine) img.header.set_zooms((2.5, 2.5, 2.5, TR)) img.to_filename('sub-01/func/sub-01_task-task_bold.nii.gz') layout = BIDSLayout('.', validate=False) coll = load_variables(layout).get_collections('run')[0] dense_var = coll.variables['val'].to_dense(1.0 / TR) regressor = dense_var.resample(1.0 / newTR).values assert regressor.shape == (newvols, 1) # This checks that the filtering has happened. If it has not, then # this value for this frequency bin will be an alias and have a # very different amplitude assert np.allclose( np.abs(np.fft.fft(regressor.values.ravel()))[9], 0.46298273) # This checks that the signal (0.025 Hz) within the new Nyquist # rate actually gets passed through. assert np.allclose( np.abs(np.fft.fft(regressor.values.ravel()))[4], 8.88189504)
def _run_interface(self, runtime): from bids.analysis import Analysis from bids.layout import BIDSLayout import re force_index = [ # If entry looks like `/<pattern>/`, treat `<pattern>` as a regex re.compile(ign[1:-1]) if (ign[0], ign[-1]) == ('/', '/') else ign # Iterate over empty tuple if undefined for ign in self.inputs.force_index or () ] ignore = [ # If entry looks like `/<pattern>/`, treat `<pattern>` as a regex re.compile(ign[1:-1]) if (ign[0], ign[-1]) == ('/', '/') else ign # Iterate over empty tuple if undefined for ign in self.inputs.ignore or () ] # If empty, then None derivatives = self.inputs.derivatives or None layout = BIDSLayout(self.inputs.bids_dir, force_index=force_index, ignore=ignore, derivatives=derivatives) selectors = self.inputs.selectors analysis = Analysis(model=self.inputs.model, layout=layout) analysis.setup(drop_na=False, **selectors) self._load_level1(runtime, analysis) self._load_higher_level(runtime, analysis) return runtime
def collection(): layout_path = join(get_test_data_path(), 'ds005') layout = BIDSLayout(layout_path) collection = layout.get_collections('run', types=['events'], scan_length=480, merge=True, sampling_rate=10) return collection
def main(): """Convert behavioral files for all of the subjects. """ temp_dir = '/home/data/nbc/Sutherland_HIVCB/raw/converted-csv/' dset_dir = '/home/data/nbc/Sutherland_HIVCB/dset/' in_dir = '/home/data/nbc/DICOM/MSUT_HIV_CB/Behavioral/' layout = BIDSLayout(dset_dir) subjects = layout.get_subjects() for sid in subjects: events_files = glob( join(dset_dir, 'sub-{0}/func/sub-{0}_task-*_events.tsv'.format(sid))) for events_file in events_files: remove(events_file) file_ = join(in_dir, 'P{0}/EAT_SCAN/EAT_scanner_all-{0}-2.txt'.format(sid)) if not isfile(file_): file_ = join(in_dir, 'P{0}/EAT_SCAN/EAT_scanner_all-{0}-1.txt'.format(sid)) if isfile(file_): try: convert_eat(file_, sid, temp_dir, dset_dir) except: print('{} failed.'.format(sid)) else: print('No file found for {0}'.format(sid))
def _list_outputs(self): from bids.layout import BIDSLayout base_dir = self.inputs.base_directory os.makedirs(base_dir, exist_ok=True) layout = BIDSLayout(base_dir, validate=False) path_patterns = self.inputs.path_patterns if not isdefined(path_patterns): path_patterns = None out_files = [] for entities, in_file in zip(self.inputs.entities, self.inputs.in_file): ents = {**self.inputs.fixed_entities} ents.update(entities) ext = bids_split_filename(in_file)[2] ents['extension'] = self._extension_map.get(ext, ext) # In some instances, name/contrast could have the following # format (eg: gain.Range, gain.EqualIndifference). # This prevents issues when creating/searching files for the report for k, v in ents.items(): if k in ("node", "name", "contrast", "stat"): ents.update({k: to_alphanum(str(v))}) out_fname = os.path.join( base_dir, layout.build_path(ents, path_patterns, validate=False) ) os.makedirs(os.path.dirname(out_fname), exist_ok=True) _copy_or_convert(in_file, out_fname) out_files.append(out_fname) return {'out_file': out_files}
def __init__(self, base_dir, task_id, sub_ids=None, verbose=True): assert type(base_dir) == str, "base_dir should be type(str)" assert type(task_id) == str, "task_id should be type(str)" self.base_dir = base_dir self.layout = BIDSLayout(base_dir) self.task_id = task_id if sub_ids == None: self.sub_ids = self.layout.get_subjects(task=self.task_id) else: assert type( sub_ids ) == list, "sub_ids should be type(list) (e.g., ['101', '102', '103'])" self.sub_ids = sub_ids for i in sub_ids: assert type( i) == str, "elements in sub_ids should be type(str)" assert any( i == j for j in self.layout.get_subjects(task=self.task_id) ), f"cannot find sub_ids with {self.task_id} task data" if verbose: print(f'{len(self.sub_ids)} subjects in {self.task_id} task')
def test_indexing_tag_conflict(): data_dir = join(get_test_data_path(), 'ds005_conflict') with pytest.raises(ValueError) as exc: layout = BIDSLayout(data_dir) print(exc.value.message) assert exc.value.message.startswith("Conflicting values found") assert 'run' in exc.value.message
def model(): layout_path = join(get_test_data_path(), 'ds005') layout = BIDSLayout(layout_path) models = auto_model(layout, scan_length=480, one_vs_rest=True) return models[0]
def test_layout_save(tmp_path, layout_7t_trt): layout_7t_trt.save(str(tmp_path / "f.sqlite"), replace_connection=False) data_dir = join(get_test_data_path(), '7t_trt') layout = BIDSLayout(data_dir, database_path=str(tmp_path / "f.sqlite")) oldfies = set(layout_7t_trt.get(suffix='events', return_type='file')) newfies = set(layout.get(suffix='events', return_type='file')) assert oldfies == newfies
def analysis(): layout_path = join(get_test_data_path(), "ds005") layout = BIDSLayout(layout_path) json_file = join(layout_path, "models", "ds-005_type-test_model.json") analysis = Analysis(layout, json_file) analysis.setup(scan_length=480, subject=["01", "02"]) return analysis
def analysis(): layout_path = join(get_test_data_path(), 'ds005') layout = BIDSLayout(layout_path, exclude='derivatives/') json_file = join(layout_path, 'models', 'ds-005_type-test_model.json') analysis = Analysis(layout, json_file) analysis.setup(scan_length=480, subject=['01', '02']) return analysis
def __init__(self, layout, model): if not isinstance(layout, BIDSLayout): layout = BIDSLayout(layout) self.layout = layout self._load_model(model)
def run_coll(): path = join(get_test_data_path(), 'ds005') layout = BIDSLayout(path) return layout.get_collections('run', types=['events'], merge=True, scan_length=480)
def _list_outputs(self): from bids.layout import BIDSLayout base_dir = self.inputs.base_directory os.makedirs(base_dir, exist_ok=True) layout = BIDSLayout(base_dir, validate=False) path_patterns = self.inputs.path_patterns if not isdefined(path_patterns): path_patterns = None out_files = [] for entities, in_file in zip(self.inputs.entities, self.inputs.in_file): ents = {**self.inputs.fixed_entities} ents.update(entities) ents = {k: snake_to_camel(str(v)) for k, v in ents.items()} out_fname = os.path.join(base_dir, layout.build_path(ents, path_patterns)) makedirs(os.path.dirname(out_fname), exist_ok=True) _copy_or_convert(in_file, out_fname) out_files.append(out_fname) return {'out_file': out_files}
def build_analysis(analysis, predictor_events, bids_dir, run_id=None, build=True): tmp_dir = Path(mkdtemp()) entities = [{}] if run_id is not None: # Get entities of runs, and add to kwargs for rid in run_id: for run in analysis['runs']: if rid == run['id']: entities.append(get_entities(run)) break entities = merge_dictionaries(*entities) entities['scan_length'] = max([r['duration'] for r in analysis['runs']]) entities['task'] = analysis['task_name'] # Write out all events paths = writeout_events(analysis, predictor_events, tmp_dir) if build is False: bids_analysis = None else: # Load events and try applying transformations bids_layout = BIDSLayout(bids_dir, derivatives=str(tmp_dir), validate=False) bids_analysis = BIDSAnalysis(bids_layout, deepcopy(analysis.get('model'))) bids_analysis.setup(**entities) return tmp_dir, paths, bids_analysis