def main(bids_dir, 
         subject=None,
         session=None):

    print(subject, session, bids_dir)
    layout = BIDSLayout(bids_dir, absolute_paths=False)
    bolds = layout.get(subject=subject, 
                       session=session,
                       extensions='nii', 
                       datatype='func',
                       suffix='bold')
    
    for bold in bolds:
        epi = layout.get(suffix='epi',
                         subject=subject,
                         session=session,
                         extensions='nii',
                         run=bold.run)

        print(epi)
        assert(len(epi) == 1), 'No EPI found for {}'.format(bold.filename)
        epi = epi[0]

        json_d = {'PhaseEncodingDirection':'i',
                  'TotalReadoutTime':0.04, 
                  'IntendedFor':bold.filename.replace('sub-{}/'.format(subject), '')}

        print(json_d)

        json_filename = os.path.join(layout.root,
                                     epi.filename.replace("nii", "json"))
        print(json_filename)

        with open(json_filename, 'w') as f:
            json.dump(json_d, f)
Exemplo n.º 2
0
def main():
    print('Running subjects:', str(SUBJECTS))
    if not os.path.isdir(MEM_DIR):
        os.mkdir(MEM_DIR)
    mem = Memory(base_dir=MEM_DIR)
    layout = BIDSLayout(BIDS_DIR)
    # func_files[subject_index][run_index]
    if num_runs > 1:
        func_files = [[
            layout.get(type='bold',
                       task=task,
                       run=i + 1,
                       subject=subj,
                       extensions='nii.gz')[0] for i in range(num_runs)
        ] for subj in SUBJECTS]
    else:
        func_files = [
            layout.get(type='bold',
                       task=task,
                       subject=subj,
                       extensions='nii.gz') for subj in SUBJECTS
        ]
    events = get_events(func_files)
    confounds = get_confounds(func_files)
    info = get_info(events, confounds)
    specify_model_results = specify_model(layout, func_files, info)
    level1design_results = lv1_design(mem, layout, func_files,
                                      specify_model_results)
    modelgen_results = feat_model(mem, level1design_results)
    mask_results = masking(mem, func_files)
    film_gls(mem, mask_results, modelgen_results)
Exemplo n.º 3
0
def get_magandphase(bids_dir, subject_id):
    from bids.layout import BIDSLayout
    from nipype.utils.filemanip import split_filename

    layout = BIDSLayout(bids_dir, validate=False)
    maglist = layout.get(
        subject=subject_id,
        datatype="func",
        suffix="bold",
        extension=[".nii", ".nii.gz"],
    )
    phaselist = layout.get(
        subject=subject_id,
        datatype="func",
        suffix="phase",
        extension=[".nii", ".nii.gz"],
    )
    print(
        f"Found {max(len(maglist), len(phaselist))} functional runs for {subject_id}."
    )

    # get list of phase runs
    phaseruns = [f.run for f in phaselist]

    maglist_final = []
    phaselist_final = []
    # TODO: Match when there are multiple types of different runs
    for f in maglist:
        if f.run in phaseruns:
            pf = phaselist[phaseruns.index(f.run)]

            # Check that runs have matching prefixes
            _, fname, _ = split_filename(f.filename)
            _, pfname, _ = split_filename(pf.filename)
            if fname[:-4] != pfname[:-5]:
                continue

            # Check that runs have matching length, if not exclude.
            if "dcmmeta_shape" in f.get_metadata().keys():
                if f.get_metadata()["dcmmeta_shape"][-1] != pf.get_metadata(
                )["dcmmeta_shape"][-1]:
                    continue

            # Check that runs have matching acq time, if not exclude.
            if "AcquisitionTime" in f.get_metadata().keys():
                if f.get_metadata()["AcquisitionTime"] != pf.get_metadata(
                )["AcquisitionTime"]:
                    continue

            maglist_final.append(f)
            phaselist_final.append(phaselist[phaseruns.index(f.run)])

    print(
        f"{len(maglist_final)} runs with phase and magnitude were found for {subject_id}."
    )
    print("Runs: ", [f.run for f in maglist_final])
    print("These runs have a matching length, name, and acquisition times.\n")

    return maglist_final, phaselist_final
Exemplo n.º 4
0
    def test_dataset_without_datasettype_parsed_as_raw(self):
        dataset_path = Path("ds005_derivs", "format_errs", "no_dataset_type")
        unvalidated = BIDSLayout(Path(get_test_data_path()) / dataset_path,
                                 validate=False)
        assert len(unvalidated.get()) == 4
        with pytest.raises(ValueError):
            unvalidated.get(desc="preproc")

        validated = BIDSLayout(Path(get_test_data_path()) / dataset_path)
        assert len(validated.get()) == 1
Exemplo n.º 5
0
    def _run_interface(self, runtime):
        import os
        from bids.layout import BIDSLayout
        layout = BIDSLayout(self.inputs.bids_dir)
        try:
            if self.inputs.datatype == 'func':
                bids_file = layout.get(subject=self.inputs.subject_id,
                                       session=self.inputs.session,
                                       run=self.inputs.run,
                                       extension=['nii', 'nii.gz'],
                                       datatype=self.inputs.datatype)
                func = layout.get(subject=self.inputs.subject_id,
                                  session=self.inputs.session,
                                  run=self.inputs.run,
                                  extension=['nii', 'nii.gz'],
                                  datatype=self.inputs.datatype,
                                  return_type='filename')
                file = func[0]
            elif self.inputs.datatype == 'anat':
                bids_file = layout.get(subject=self.inputs.subject_id,
                                       session=self.inputs.session,
                                       extension=['nii', 'nii.gz'],
                                       datatype=self.inputs.datatype)
                anat = layout.get(subject=self.inputs.subject_id,
                                  session=self.inputs.session,
                                  extension=['nii', 'nii.gz'],
                                  datatype=self.inputs.datatype,
                                  return_type='filename')
                file = anat[0]
            else:
                raise ValueError('Wrong datatype %s' % (self.inputs.datatype))
            if len(bids_file) > 1:
                raise ValueError(
                    'Provided BIDS spec lead to duplicates: %s' %
                    (str(self.inputs.datatype + '_' + self.inputs.subject_id +
                         '_' + self.inputs.session + '_' + self.inputs.run)))
        except:
            raise ValueError(
                'Error with BIDS spec: %s' %
                (str(self.inputs.datatype + '_' + self.inputs.subject_id +
                     '_' + self.inputs.session + '_' + self.inputs.run)))

        nii_format = bids_file[0].get_entities()['extension']
        #RABIES only work with compressed .nii for now
        if nii_format == 'nii':
            os.system('gzip %s' % (file, ))
            file = file + '.gz'

        setattr(self, 'out_file', file)

        return runtime
Exemplo n.º 6
0
def test_deriv_indexing():
    data_dir = join(get_test_data_path(), 'ds005')
    deriv_dir = join(data_dir, 'derivatives', 'bbr')

    # missing dataset_description.json
    with pytest.warns(UserWarning):
        layout = BIDSLayout(data_dir, derivatives=deriv_dir)

    # Should work fine
    deriv_dir = join(data_dir, 'derivatives', 'events')
    layout = BIDSLayout(data_dir, derivatives=deriv_dir)
    assert layout.get(scope='derivatives')
    assert layout.get(scope='events')
    assert not layout.get(scope='nonexistent')
Exemplo n.º 7
0
def test_deriv_indexing():
    data_dir = join(get_test_data_path(), 'ds005')
    deriv_dir = join(data_dir, 'derivatives', 'bbr')

    # missing dataset_description.json
    with pytest.warns(UserWarning):
        layout = BIDSLayout(data_dir, derivatives=deriv_dir)

    # Should work fine
    deriv_dir = join(data_dir, 'derivatives', 'events')
    layout = BIDSLayout(data_dir, derivatives=deriv_dir)
    assert layout.get(scope='derivatives')
    assert layout.get(scope='events')
    assert not layout.get(scope='nonexistent')
Exemplo n.º 8
0
def get_files():
    with open('dset_config.json', 'r') as fo:
        CONFIG = json.load(fo)

    DATA_DIR = op.abspath('/home/data/nbc/external-datasets/ds001491/')

    all_info = {}
    for dset_name in list(CONFIG.keys())[:3]:
        layout = BIDSLayout(op.join(DATA_DIR, dset_name))
        cfg = CONFIG[dset_name]
        task = cfg['task']
        dset_info = {}
        for sub in layout.get_subjects():
            runs = layout.get_runs(subject=sub, task=task)
            sub_info = {}
            for run in runs:
                run_info = {}
                run_info['files'] = []
                run_info['echo_times'] = []
                for echo in sorted(
                        layout.get_echoes(subject=sub, task=task, run=run)):
                    raw_files = layout.get(subject=sub,
                                           task=task,
                                           run=run,
                                           echo=echo,
                                           extensions='.nii.gz')
                    preproc_files = layout.get(subject=sub,
                                               task=task,
                                               run=run,
                                               root='afni-step1',
                                               echo=echo,
                                               extensions='.nii.gz',
                                               desc='realign')
                    preproc_files = raw_files[:]
                    if len(preproc_files) != 1:
                        print(preproc_files)
                        raise Exception('BAD')

                    # Replace filename with path when using new version of bids
                    run_info['files'].append(preproc_files[0].filename)
                    metadata = layout.get_metadata(preproc_files[0].filename)
                    run_info['echo_times'].append(metadata['EchoTime'])
                sub_info[run] = run_info
            dset_info[sub] = sub_info
        all_info[dset_name] = dset_info

    with open('all_files.json', 'w') as fo:
        json.dump(all_info, fo, indent=4, sort_keys=True)
Exemplo n.º 9
0
def test_layout_save(tmp_path, layout_7t_trt):
    layout_7t_trt.save(str(tmp_path / "f.sqlite"), replace_connection=False)
    data_dir = join(get_test_data_path(), '7t_trt')
    layout = BIDSLayout(data_dir, database_path=str(tmp_path / "f.sqlite"))
    oldfies = set(layout_7t_trt.get(suffix='events', return_type='file'))
    newfies = set(layout.get(suffix='events', return_type='file'))
    assert oldfies == newfies
Exemplo n.º 10
0
def test_find_path(subject, session):
    bids_dir = create_dummy_bids_path(2, 2)
    bids_layout = BIDSLayout(bids_dir, derivatives=True)

    test_dwi_path = bids_layout.get(subject=subject,
                                    session=session,
                                    return_type="filename",
                                    suffix="dwi",
                                    extension="nii.gz")[0]

    mask_file = MaskFile("seg", {'scope': 'synthetic'})
    mask_file.find_path(bids_layout, test_dwi_path, subject, session)

    assert mask_file.fnames[session][subject] == op.join(
        bids_dir, "derivatives", "dmriprep", "sub-" + subject,
        "ses-" + session, "anat", "seg.nii.gz")

    other_sub = "01" if subject == "02" else "02"
    with pytest.raises(ValueError):
        mask_file.find_path(
            bids_layout,
            test_dwi_path,
            subject=other_sub,
            session=session,
        )
Exemplo n.º 11
0
def test_index_metadata(index_metadata, query, result, mock_config):
    data_dir = join(get_test_data_path(), '7t_trt')
    layout = BIDSLayout(data_dir, index_metadata=index_metadata, **query)
    sample_file = layout.get(task='rest', extension='.nii.gz',
                             acquisition='fullbrain')[0]
    metadata = sample_file.get_metadata()
    assert metadata.get('RepetitionTime') == result
Exemplo n.º 12
0
def test_generated_reportlets(bids_sessions, ordering):
    # make independent report
    out_dir = tempfile.mkdtemp()
    report = Report(
        Path(out_dir),
        "fakeuuid",
        reportlets_dir=Path(bids_sessions),
        subject_id="01",
        packagename="fmriprep",
    )
    config = Path(pkgrf("niworkflows", "reports/default.yml"))
    settings = load(config.read_text())
    # change settings to only include some missing ordering
    settings["sections"][3]["ordering"] = ordering
    report.index(settings["sections"])
    # expected number of reportlets
    expected_reportlets_num = len(report.layout.get(extension=".svg"))
    # bids_session uses these entities
    needed_entities = ["session", "task", "ceagent", "run"]
    # the last section is the most recently run
    reportlets_num = len(report.sections[-1].reportlets)
    # get the number of figures in the output directory
    out_layout = BIDSLayout(out_dir, config="figures", validate=False)
    out_figs = len(out_layout.get())
    # if ordering does not contain all the relevent entities
    # then there should be fewer reportlets than expected
    if all(ent in ordering for ent in needed_entities):
        assert reportlets_num == expected_reportlets_num == out_figs
    else:
        assert reportlets_num < expected_reportlets_num == out_figs
Exemplo n.º 13
0
    def _run_interface(self, runtime):
        subject_id = self.inputs.scan_info['subject_id']
        session = self.inputs.scan_info['session']
        if 'run' in (self.inputs.scan_info.keys()):
            run = self.inputs.scan_info['run']
        else:
            run = self.inputs.run

        from bids.layout import BIDSLayout
        layout = BIDSLayout(self.inputs.bids_dir, validate=False)
        try:
            file_list = layout.get(subject=subject_id,
                                   session=session,
                                   run=run,
                                   extension=['nii', 'nii.gz'],
                                   suffix=self.inputs.suffix,
                                   return_type='filename')
            if len(file_list) > 1:
                raise ValueError(
                    'Provided BIDS spec lead to duplicates: %s' %
                    (str(self.inputs.suffix) + ' sub-' + subject_id + ' ses-' +
                     session + ' run-' + str(run)))
        except:
            raise ValueError('Error with BIDS spec: %s' %
                             (str(self.inputs.suffix) + ' sub-' + subject_id +
                              ' ses-' + session + ' run-' + str(run)))

        setattr(self, 'out_file', file_list[0])

        return runtime
Exemplo n.º 14
0
def test_generated_reportlets(bids_sessions, ordering):
    # make independent report
    out_dir = tempfile.mkdtemp()
    report = Report(Path(bids_sessions),
                    Path(out_dir),
                    'fakeiuud',
                    subject_id='01',
                    packagename='fmriprep')
    config = Path(pkgrf('niworkflows', 'reports/fmriprep.yml'))
    settings = load(config.read_text())
    # change settings to only include some missing ordering
    settings['sections'][3]['ordering'] = ordering
    report.index(settings['sections'])
    # expected number of reportlets
    expected_reportlets_num = len(report.layout.get(extension='svg'))
    # bids_session uses these entities
    needed_entities = ['session', 'task', 'ceagent', 'run']
    # the last section is the most recently run
    reportlets_num = len(report.sections[-1].reportlets)
    # get the number of figures in the output directory
    out_layout = BIDSLayout(out_dir, config='figures', validate=False)
    out_figs = len(out_layout.get())
    # if ordering does not contain all the relevent entities
    # then there should be fewer reportlets than expected
    if all(ent in ordering for ent in needed_entities):
        assert reportlets_num == expected_reportlets_num == out_figs
    else:
        assert reportlets_num < expected_reportlets_num == out_figs
Exemplo n.º 15
0
def parse_directory(deriv_dir, work_dir, analysis):
    fl_layout = BIDSLayout(
        deriv_dir,
        config=['bids', 'derivatives', 'fitlins'],
        validate=False)
    wd_layout = BIDSLayout(
        str(Path(work_dir) / 'reportlets' / 'fitlins'),
        validate=False)
    contrast_svgs = fl_layout.get(extensions='.svg', suffix='contrasts')

    analyses = []
    for contrast_svg in contrast_svgs:
        ents = contrast_svg.entities
        ents.pop('suffix')
        ents.setdefault('subject', None)
        correlation_matrix = fl_layout.get(extensions='.svg', suffix='corr',
                                           **ents)
        design_matrix = fl_layout.get(extensions='.svg', suffix='design', **ents)
        job_desc = {
            'ents': {k: v for k, v in ents.items() if v is not None},
            'dataset': analysis.layout.root,
            'model_name': analysis.model['name'],
            'contrasts_svg': contrast_svg.path,
            }
        if ents.get('subject'):
            job_desc['subject_id'] = ents.get('subject')
        if correlation_matrix:
            job_desc['correlation_matrix_svg'] = correlation_matrix[0].path
        if design_matrix:
            job_desc['design_matrix_svg'] = design_matrix[0].path

        snippet = wd_layout.get(extensions='.html', suffix='snippet', **ents)
        if snippet:
            with open(snippet[0].path) as fobj:
                job_desc['warning'] = fobj.read()

        contrasts = fl_layout.get(extensions='.png', suffix='ortho', **ents)
        # TODO: Split contrasts from estimates
        job_desc['contrasts'] = [{'image_file': c.path,
                                  'name':
                                      fl_layout.parse_file_entities(
                                          c.path)['contrast']}
                                 for c in contrasts]
        analyses.append(job_desc)

    return analyses
Exemplo n.º 16
0
    def _run_interface(self, runtime):
        from bids.layout import BIDSLayout

        derivatives = self.inputs.derivatives
        layout = BIDSLayout(self.inputs.bids_dir, derivatives=derivatives)

        bold_files = []
        mask_files = []
        entities = []
        for ents in self.inputs.entities:
            selectors = {**self.inputs.selectors, **ents}
            bold_file = layout.get(**selectors)

            if len(bold_file) == 0:
                raise FileNotFoundError(
                    "Could not find BOLD file in {} with entities {}"
                    "".format(self.inputs.bids_dir, selectors))
            elif len(bold_file) > 1:
                raise ValueError(
                    "Non-unique BOLD file in {} with entities {}.\n"
                    "Matches:\n\t{}"
                    "".format(
                        self.inputs.bids_dir, selectors,
                        "\n\t".join('{} ({})'.format(
                            f.path, layout.files[f.path].entities)
                                    for f in bold_file)))

            # Select exactly matching mask file (may be over-cautious)
            bold_ents = layout.parse_file_entities(bold_file[0].path)
            bold_ents['suffix'] = 'mask'
            bold_ents['desc'] = 'brain'
            bold_ents['extension'] = ['.nii', '.nii.gz']
            mask_file = layout.get(**bold_ents)
            bold_ents.pop('suffix')
            bold_ents.pop('desc')

            bold_files.append(bold_file[0].path)
            mask_files.append(mask_file[0].path if mask_file else None)
            entities.append(bold_ents)

        self._results['bold_files'] = bold_files
        self._results['mask_files'] = mask_files
        self._results['entities'] = entities

        return runtime
Exemplo n.º 17
0
def test_layout_save(layout_7t_trt):
    _, f = tempfile.mkstemp(suffix='.db')
    layout_7t_trt.save(f, replace_connection=False)
    data_dir = join(get_test_data_path(), '7t_trt')
    layout = BIDSLayout(data_dir, database_file=f)
    oldfies = set(layout_7t_trt.get(suffix='events', return_type='file'))
    newfies = set(layout.get(suffix='events', return_type='file'))
    assert oldfies == newfies
    os.unlink(f)
Exemplo n.º 18
0
def test_index_metadata(index_metadata, query, result):
    data_dir = join(get_test_data_path(), '7t_trt')
    layout = BIDSLayout(data_dir, index_metadata=index_metadata)
    if not index_metadata and query is not None:
        indexer = BIDSLayoutIndexer(layout)
        indexer.index_metadata(**query)
    sample_file = layout.get(task='rest', extension='nii.gz',
                             acq='fullbrain')[0]
    metadata = sample_file.get_metadata()
    assert metadata.get('RepetitionTime') == result
Exemplo n.º 19
0
    def _run_interface(self, runtime):
        subject_id = self.inputs.scan_info['subject_id']
        session = self.inputs.scan_info['session']
        if 'run' in (self.inputs.scan_info.keys()):
            run = self.inputs.scan_info['run']
        else:
            run = self.inputs.run

        from bids.layout import BIDSLayout
        layout = BIDSLayout(self.inputs.bids_dir, validate=False)
        try:
            if run is None:  # if there is no run spec to search, don't include it in the search
                file_list = layout.get(subject=subject_id,
                                       session=session,
                                       extension=['nii', 'nii.gz'],
                                       suffix=self.inputs.suffix,
                                       return_type='filename')
            else:
                file_list = layout.get(subject=subject_id,
                                       session=session,
                                       run=run,
                                       extension=['nii', 'nii.gz'],
                                       suffix=self.inputs.suffix,
                                       return_type='filename')
            if len(file_list) > 1:
                raise ValueError(
                    'Provided BIDS spec lead to duplicates: %s' %
                    (str(self.inputs.suffix) + ' sub-' + subject_id + ' ses-' +
                     session + ' run-' + str(run)))
            elif len(file_list) == 0:
                raise ValueError(
                    'No file for found corresponding to the following BIDS spec: %s'
                    % (str(self.inputs.suffix) + ' sub-' + subject_id +
                       ' ses-' + session + ' run-' + str(run)))
        except:
            raise ValueError('Error with BIDS spec: %s' %
                             (str(self.inputs.suffix) + ' sub-' + subject_id +
                              ' ses-' + session + ' run-' + str(run)))

        setattr(self, 'out_file', file_list[0])

        return runtime
Exemplo n.º 20
0
    def _run_interface(self, runtime):
        from bids.layout import BIDSLayout

        derivatives = self.inputs.derivatives
        layout = BIDSLayout(self.inputs.bids_dir, derivatives=derivatives)

        fmri_prep = []
        conf_raw = []
        entities = []

        for ents in self.inputs.entities:
            selectors = {**self.inputs.selectors, **ents}
            fmri_file = layout.get(extensions=['preproc_bold.nii.gz'],
                                   **selectors)
            if len(fmri_file) == 0:
                raise FileNotFoundError(
                    "Could not find BOLD file in {} with entities {}"
                    "".format(self.inputs.bids_dir, selectors))
            elif len(fmri_file) > 1:
                raise ValueError(
                    "Non-unique BOLD file in {} with entities {}.\n"
                    "Matches:\n\t{}"
                    "".format(
                        self.inputs.bids_dir, selectors,
                        "\n\t".join('{} ({})'.format(
                            f.path, layout.files[f.path].entities)
                                    for f in fmri_file)))

            confounds = layout.get(extensions=['confounds_regressors.tsv'],
                                   **selectors)

            fmri_prep.append(fmri_file[0].path)
            conf_raw.append(confounds[0].path)

        self._results['fmri_prep'] = fmri_prep
        self._results['conf_raw'] = conf_raw
        self._results['entities'] = self.inputs.entities  #entities

        return runtime
Exemplo n.º 21
0
def get_files():
    DSET_DIR = op.abspath('/home/data/nbc/external-datasets/ds001491/')

    layout = BIDSLayout(DSET_DIR)
    layout.add_derivatives(
        '/home/data/nbc/external-datasets/ds001491/derivatives/afni-step1/')
    task = 'images'
    info = {}
    for sub in sorted(layout.get_subjects()):
        print(sub)
        sub_info = {'files': [], 'echo_times': []}
        for echo in sorted(layout.get_echoes(subject=sub, task=task)):
            raw_files = layout.get(subject=sub,
                                   task=task,
                                   echo=echo,
                                   extensions='.nii.gz')
            preproc_files = layout.get(subject=sub,
                                       task=task,
                                       root='afni-step1',
                                       extensions='.nii.gz')
            # For some reason pybids doesn't index echo in derivatives
            preproc_files = [
                p for p in preproc_files
                if 'echo-{0}'.format(echo) in p.filename
            ]
            if len(preproc_files) != 1:
                print(preproc_files)
                raise Exception('BAD')

            # Replace filename with path when using new version of bids
            sub_info['files'].append(preproc_files[0].path)
            metadata = raw_files[0].metadata
            sub_info['echo_times'].append(metadata['EchoTime'])
        info[sub] = sub_info

    with open('preproc_files.json', 'w') as fo:
        json.dump(info, fo, indent=4, sort_keys=True)
Exemplo n.º 22
0
def test_path_arguments():
    data_dir = join(get_test_data_path(), 'ds005')
    deriv_dir = join(data_dir, 'derivatives', 'events')

    layout = BIDSLayout(Path(data_dir), derivatives=Path(deriv_dir))
    assert layout.get(scope='derivatives')
    assert layout.get(scope='events')
    assert not layout.get(scope='nonexistent')

    layout = BIDSLayout(Path(data_dir), derivatives=[Path(deriv_dir)])
    assert layout.get(scope='derivatives')
    assert layout.get(scope='events')
    assert not layout.get(scope='nonexistent')
Exemplo n.º 23
0
    def _run_interface(self, runtime):
        from bids.layout import BIDSLayout

        layout = BIDSLayout(self.inputs.bids_dir, derivatives=True)

        entities = []
        extensions = ['preproc_bold.nii.gz']

        for subject in np.sort(layout.get_subjects()):
            file = layout.get(subject=subject,
                              task=layout.get_tasks(),
                              extensions=extensions)
            if file == []:
                pass
            else:
                entity = {'subject': subject}  #, 'session': session}
                entities.append(entity)
                self._results['entities'] = entities

        return runtime
Exemplo n.º 24
0
def main():
    """Entry point"""
    from os import cpu_count
    from multiprocessing import set_start_method
    from bids.layout import BIDSLayout
    from nipype import logging as nlogging
    set_start_method('forkserver')

    opts = get_parser().parse_args()

    # Retrieve logging level
    log_level = int(max(25 - 5 * opts.verbose_count, logging.DEBUG))
    # Set logging
    logger.setLevel(log_level)
    nlogging.getLogger('nipype.workflow').setLevel(log_level)
    nlogging.getLogger('nipype.interface').setLevel(log_level)
    nlogging.getLogger('nipype.utils').setLevel(log_level)

    # Resource management options
    plugin_settings = {
        'plugin': 'MultiProc',
        'plugin_args': {
            'n_procs': opts.ncpus,
            ''
            'raise_insufficient': False,
            'maxtasksperchild': 1,
        }
    }
    # Permit overriding plugin config with specific CLI options
    if not opts.ncpus or opts.ncpus < 1:
        plugin_settings['plugin_args']['n_procs'] = cpu_count()

    nthreads = opts.nthreads
    if not nthreads or nthreads < 1:
        nthreads = cpu_count()

    derivatives_dir = opts.derivatives_dir.resolve()
    bids_dir = opts.bids_dir or derivatives_dir.parent

    # Get absolute path to BIDS directory
    bids_dir = opts.bids_dir.resolve()
    layout = BIDSLayout(str(bids_dir), validate=False, derivatives=str(derivatives_dir))
    query = {'domains': 'derivatives', 'desc': 'preproc',
             'suffix': 'bold', 'extensions': ['.nii', '.nii.gz']}

    if opts.participant_label:
        query['subject'] = '|'.join(opts.participant_label)
    if opts.run:
        query['run'] = '|'.join(opts.run)
    if opts.task:
        query['task'] = '|'.join(opts.task)
    if opts.space:
        query['space'] = opts.space
        if opts.space == 'template':
            query['space'] = '|'.join(get_tpl_list())

    # Preprocessed files that are input to the workflow
    prepped_bold = layout.get(**query)
    if not prepped_bold:
        print('No preprocessed files found under the given derivatives '
              'folder "%s".' % derivatives_dir, file=sys.stderr)

    # The magic happens here
    if 'participant' in opts.analysis_level:
        from workflows import first_level_wf

        output_dir = opts.output_dir.resolve()
        output_dir.mkdir(exist_ok=True, parents=True)
        logger.info('Writting 1st level outputs to "%s".', output_dir)
        base_entities = set(['subject', 'session', 'task', 'run', 'acquisition', 'reconstruction'])
        inputs = {}
        for part in prepped_bold:
            entities = part.entities
            sub = entities['subject']
            inputs[sub] = {}
            base = base_entities.intersection(entities)
            subquery = {k: v for k, v in entities.items() if k in base}
            inputs[sub]['bold'] = part.path
            inputs[sub]['mask'] = layout.get(
                domains='derivatives',
                suffix='mask',
                return_type='file',
                extensions=['.nii', '.nii.gz'],
                space=query['space'],
                **subquery)[0]
            inputs[sub]['events'] = layout.get(
                suffix='events', return_type='file', **subquery)[0]
            inputs[sub]['regressors'] = layout.get(
                domains='derivatives',
                suffix='regressors',
                return_type='file',
                extensions=['.tsv'],
                **subquery)[0]
            inputs[sub]['tr'] = part.metadata.get('RepetitionTime')

        workflow = first_level_wf(inputs, output_dir)
        workflow.base_dir = opts.work_dir
        workflow.run(**plugin_settings)

    if 'group' in opts.analysis_level:
        from workflows import second_level_wf
        import re

        output_dir = opts.output_dir.resolve()
        metafile = '{}/FSLAnalysis/dataset_description.json'.format(output_dir)
        with open(metafile, 'w') as metafile:
            json.dump(metadata, metafile, indent=4)
        glayout = BIDSLayout(str(bids_dir), validate=False, derivatives=str(output_dir))

        base_entities = set(['subject', 'session', 'task', 'run', 'acquisition', 'reconstruction'])
        in_copes = []
        in_varcopes = []
        for part in prepped_bold:
            entities = part.entities
            base = base_entities.intersection(entities)
            subquery = {k: v for k, v in entities.items() if k in base}
            in_copes.append(glayout.get(
                domains='derivatives',
                suffix='cope',
                return_type='file',
                extensions=['.nii', '.nii.gz'],
                space=query['space'],
                **subquery)[0])
            in_varcopes.append(glayout.get(
                domains='derivatives',
                suffix='varcope',
                return_type='file',
                extensions=['.nii', '.nii.gz'],
                space=query['space'],
                **subquery)[0])
        bids_ref = re.sub('sub-[0-9]+', 'sub-all', prepped_bold[0].path)

        group_mask = tpl_get(entities['space'],
                             resolution=2,
                             desc='brain',
                             suffix='mask')

        group_out = output_dir / 'FSLAnalysis'
        group_out.mkdir(exist_ok=True, parents=True)

        workflow = second_level_wf(group_out, bids_ref)

        # set inputs
        workflow.inputs.inputnode.group_mask = str(group_mask)
        workflow.inputs.inputnode.in_copes = in_copes
        workflow.inputs.inputnode.in_varcopes = in_varcopes

        workflow.base_dir = opts.work_dir
        workflow.run(**plugin_settings)

    return 0
applymask = Node(fsl.ApplyMask(),
                 name='applymask')


###########
#
# READ TASK INFO IN PREPRARATION FOR THE ANALYSIS
#
###########

# Creating the layout object for this BIDS data set
layout = BIDSLayout(dataDir)

# task information file
fileEvent = layout.get(suffix='events',
                       task=task_name,
                       extension='tsv',
                       return_type='file')[0]

# Offset for onset times
onsetOffset = nDelfMRI * TR  # time adjustement due to deleted fMRI volumes

## Getting experiment info from the event file, into a Bunch object
trialInfo = pd.read_csv(fileEvent, sep='\t')
conditions = sorted(list(set(trialInfo.trial_type)))
onsets = []
durations = []

for itrial in conditions:
    onsets.append(list(trialInfo[trialInfo.trial_type==itrial].onset-onsetOffset))
    durations.append(list(trialInfo[trialInfo.trial_type==itrial].duration))
Exemplo n.º 26
0
def init_single_subject_wf(subject_id, name, reportlets_dir, output_dir,
                           bids_dir, low_mem, omp_nthreads, recon_input,
                           recon_spec, sloppy):
    """
    This workflow organizes the reconstruction pipeline for a single subject.
    Reconstruction is performed using a separate workflow for each dwi series.

    Parameters

        subject_id : str
            List of subject labels
        name : str
            Name of workflow
        low_mem : bool
            Write uncompressed .nii files in some cases to reduce memory usage
        omp_nthreads : int
            Maximum number of threads an individual process may use
        reportlets_dir : str
            Directory in which to save reportlets
        output_dir : str
            Directory in which to save derivatives
        bids_dir : str
            Root directory of BIDS dataset
        recon_input : str
            Root directory of the output from qsiprep
        recon_spec : str
            Path to a JSON file that specifies how to run reconstruction
        sloppy : bool
            Use bad parameters for reconstruction to make the workflow faster.
    """
    if name in ('single_subject_wf', 'single_subject_test_recon_wf'):
        # a fake spec
        spec = {
            "name": "fake",
            "atlases": [],
            "space": "T1w",
            "anatomical": [],
            "nodes": []
        }
        space = spec['space']
        # for documentation purposes
        dwi_files = ['/made/up/outputs/sub-X_dwi.nii.gz']
        layout = None
    else:
        # If recon_input is specified without qsiprep, check if we can find the subject dir
        subject_dir = 'sub-' + subject_id
        if not op.exists(op.join(recon_input, subject_dir)):
            qp_recon_input = op.join(recon_input, "qsiprep")
            LOGGER.info("%s not in %s, trying recon_input=%s", subject_dir,
                        recon_input, qp_recon_input)
            if not op.exists(op.join(qp_recon_input, subject_dir)):
                raise Exception(
                    "Unable to find subject directory in %s or %s" %
                    (recon_input, qp_recon_input))

        spec = _load_recon_spec(recon_spec, sloppy=sloppy)
        space = spec['space']
        layout = BIDSLayout(recon_input, validate=False, absolute_paths=True)
        # Get all the output files that are in this space
        dwi_files = [
            f.path for f in layout.get(suffix="dwi",
                                       subject=subject_id,
                                       absolute_paths=True,
                                       extension=['nii', 'nii.gz'])
            if 'space-' + space in f.filename
        ]
        LOGGER.info("found %s in %s", dwi_files, recon_input)

        # Find the corresponding mask files

    workflow = Workflow('sub-{}_{}'.format(subject_id, spec['name']))
    workflow.__desc__ = """
Reconstruction was
performed using *QSIprep* {qsiprep_ver},
which is based on *Nipype* {nipype_ver}
(@nipype1; @nipype2; RRID:SCR_002502).

""".format(qsiprep_ver=__version__, nipype_ver=nipype_ver)
    workflow.__postdesc__ = """

Many internal operations of *qsiprep* use
*Nilearn* {nilearn_ver} [@nilearn, RRID:SCR_001362] and
*Dipy* {dipy_ver}[@dipy].
For more details of the pipeline, see [the section corresponding
to workflows in *qsiprep*'s documentation]\
(https://qsiprep.readthedocs.io/en/latest/workflows.html \
"qsiprep's documentation").


### References

    """.format(nilearn_ver=nilearn_ver, dipy_ver=dipy_ver)

    if len(dwi_files) == 0:
        LOGGER.info("No dwi files found for %s", subject_id)
        return workflow

    # Was there a forced normalization during preprocessing?
    template_transform = [
        f.path for f in layout.get(
            subject_id=subject_id, suffix='xfm', extension=['.h5'])
        if 'to-T1w' in f.path and 'from-MNI152NLin2009cAsym' in f.path
    ]
    if template_transform:
        template_transform = template_transform[0]
        has_transform = True
    else:
        has_transform = False

    t1w_brain_mask = layout.get(subject_id=subject_id,
                                desc='brain',
                                extension=['nii', 'nii.gz'],
                                modality='anat')
    has_t1w_brain_mask = bool(t1w_brain_mask)

    anat_ingress_wf = init_recon_anatomical_wf(subject_id=subject_id,
                                               recon_input_dir=recon_input,
                                               extras_to_make=spec.get(
                                                   'anatomical', []),
                                               name='anat_ingress_wf')

    to_connect = [('outputnode.' + name, 'inputnode.' + name)
                  for name in anatomical_input_fields]
    # create a processing pipeline for the dwis in each session
    dwi_recon_wf = init_dwi_recon_workflow(dwi_files=dwi_files,
                                           workflow_spec=spec,
                                           reportlets_dir=reportlets_dir,
                                           output_dir=output_dir,
                                           has_t1w=has_t1w_brain_mask,
                                           has_t1w_transform=has_transform,
                                           omp_nthreads=omp_nthreads)
    workflow.connect([(anat_ingress_wf, dwi_recon_wf, to_connect)])

    return workflow
Exemplo n.º 27
0
# subjects
subjList = layout.get_subjects()

# sessions
sesList = layout.get_sessions()

# tasks
taskList = layout.get_tasks()

# runs
runList = layout.get_runs()

# List of all fMRI data for subject 01
fMRI_sub01 = layout.get(subject='01',
                        suffix='bold',
                        extension=['nii', 'nii.gz'],
                        return_type='file')

# Lets focus on test session
fMRI_sub01_test = layout.get(subject='01',
                             session='test',
                             suffix='bold',
                             extension=['nii', 'nii.gz'],
                             return_type='file')

# A list of files associated with the covert verb generation
# (covertverbgeneration) task
list_covertverbgen = layout.get(task='covertverbgeneration',
                                extension=['tsv', 'json'],
                                return_type='file')
Exemplo n.º 28
0
        print(line)
        if line == '' and process.poll() is not None:
            break

    if process.returncode != 0:
        raise Exception("Non zero return code: {0}\n"
                        "{1}\n\n{2}".format(process.returncode, command,
                                            process.stdout.read()))


if __name__ == '__main__':
    dset_dir = '/home/data/nbc/Sutherland_HIVCB/dset/'
    layout = BIDSLayout(dset_dir)
    log_files = glob('*.log')
    log_niis = [lf.replace('.log', '') for lf in log_files]

    cmd = ('/home/data/nbc/tools/deface/mri_deface {0} '
           '/home/data/nbc/tools/deface/talairach_mixed_with_skull.gca '
           '/home/data/nbc/tools/deface/face.gca {0}')

    subjects = layout.get_subjects()
    for subj in subjects:
        # Anatomical scans
        scans = layout.get(subject=subj, extensions='nii.gz', type='T1w')
        for scan in scans:
            f = scan.filename
            if not any([ln in f for ln in log_niis]):
                scan_cmd = cmd.format(f)
                run(scan_cmd)
        
    the File object.
    """
    out_dict = {}
    for f in file_list:
        fn = f.path
        with open(fn, 'r') as fi:
            data = json.load(fi)
        dt = parse(data['AcquisitionTime'])
        out_dict[dt] = f
    return out_dict


# Get json files for field maps
fmap_jsons = layout.get(subject=subj,
                        session=sess,
                        datatype='fmap',
                        acquisition='dwi',
                        extension='json')

for dir_ in ['AP', 'PA']:
    # Run field map directions independently
    dir_jsons = [
        fm for fm in fmap_jsons if '_dir-{0}_'.format(dir_) in fm.filename
    ]
    fmap_dict = files_to_dict(dir_jsons)
    dts = sorted(fmap_dict.keys())
    intendedfor_dict = {fmap.path: [] for fmap in dir_jsons}
    # Get all scans with associated field maps (bold + dwi)
    func_jsons = layout.get(subject=subj,
                            session=sess,
                            datatype='dwi',
Exemplo n.º 30
0
    subjects_to_analyze = [
        subject_dir.split("-")[-1] for subject_dir in subject_dirs
    ]
# only use a subset of sessions
if args.session_label:
    session_to_analyze = dict(session=args.session_label)
else:
    session_to_analyze = dict()

# running participant level
if args.analysis_level == "participant":
    # find all T1s and skullstrip them
    for subject_label in subjects_to_analyze:
        t1ws = [
            f.path for f in layout.get(subject=subject_label,
                                       suffix='T1w',
                                       extensions=["nii.gz", "nii"],
                                       **session_to_analyze)
        ]
        t2ws = [
            f.path for f in layout.get(subject=subject_label,
                                       suffix='T2w',
                                       extensions=["nii.gz", "nii"],
                                       **session_to_analyze)
        ]
        assert (len(t1ws) >
                0), "No T1w files found for subject %s!" % subject_label
        assert (len(t2ws) >
                0), "No T2w files found for subject %s!" % subject_label

        available_resolutions = ["0.7", "0.8", "1"]
        t1_zooms = nibabel.load(t1ws[0]).get_header().get_zooms()
Exemplo n.º 31
0
def init_single_subject_wf(subject_id, name, reportlets_dir, output_dir,
                           bids_dir, low_mem, omp_nthreads, recon_input,
                           recon_spec):
    """
    This workflow organizes the reconstruction pipeline for a single subject.
    Reconstruction is performed using a separate workflow for each dwi series.

    Parameters

        subject_id : str
            List of subject labels
        name : str
            Name of workflow
        low_mem : bool
            Write uncompressed .nii files in some cases to reduce memory usage
        omp_nthreads : int
            Maximum number of threads an individual process may use
        reportlets_dir : str
            Directory in which to save reportlets
        output_dir : str
            Directory in which to save derivatives
        bids_dir : str
            Root directory of BIDS dataset
        recon_input : str
            Root directory of the output from qsiprep
        recon_spec : str
            Path to a JSON file that specifies how to run reconstruction
    """
    if name in ('single_subject_wf', 'single_subject_test_recon_wf'):
        # a fake spec
        spec = {"name": "fake", "atlases": [], "space": "T1w", "nodes": []}
        space = spec['space']
        # for documentation purposes
        dwi_files = ['/made/up/outputs/sub-X_dwi.nii.gz']
        layout = None
    else:
        # If recon_input is specified without qsiprep, check if we can find the subject dir
        subject_dir = 'sub-' + subject_id
        if not op.exists(op.join(recon_input, subject_dir)):
            qp_recon_input = op.join(recon_input, "qsiprep")
            LOGGER.info("%s not in %s, trying recon_input=%s", subject_dir,
                        recon_input, qp_recon_input)
            if not op.exists(op.join(qp_recon_input, subject_dir)):
                raise Exception(
                    "Unable to find subject directory in %s or %s" %
                    (recon_input, qp_recon_input))

        with open(recon_spec, "r") as f:
            spec = json.load(f)
        space = spec['space']
        layout = BIDSLayout(recon_input)
        LOGGER.info("found %s in %s",
                    layout.get(type="dwi", extensions=['nii', 'nii.gz']),
                    recon_input)
        # Get all the output files that are in this space
        dwi_files = [
            f.filename
            for f in layout.get(type="dwi", extensions=['nii', 'nii.gz'])
            if 'space-' + space in f.filename
        ]

    workflow = pe.Workflow(name=spec['name'])
    if len(dwi_files) == 0:
        LOGGER.info("No dwi files found for %s", subject_id)
        return workflow

    anat_src = pe.Node(QsiprepAnatomicalIngress(subject_id=subject_id,
                                                recon_input_dir=recon_input),
                       name='anat_src')

    # create a processing pipeline for the dwis in each session
    for dwi_file in dwi_files:
        dwi_recon_wf = init_dwi_recon_workflow(dwi_file=dwi_file,
                                               workflow_spec=spec,
                                               reportlets_dir=reportlets_dir,
                                               output_dir=output_dir,
                                               omp_nthreads=omp_nthreads)
        workflow.connect([(anat_src, dwi_recon_wf, [
            ('t1_aparc', 'inputnode.t1_aparc'), ('t1_seg', 'inputnode.t1_seg'),
            ('t1_aseg', 'inputnode.t1_aseg'),
            ('t1_brain_mask', 'inputnode.t1_brain_mask'),
            ('t1_preproc', 'inputnode.t1_preproc'),
            ('t1_csf_probseg', 'inputnode.t1_csf_probseg'),
            ('t1_gm_probseg', 'inputnode.t1_gm_probseg'),
            ('t1_wm_probseg', 'inputnode.t1_wm_probseg'),
            ('left_inflated_surf', 'inputnode.left_inflated_surf'),
            ('left_midthickness_surf', 'inputnode.left_midthickness_surf'),
            ('left_pial_surf', 'inputnode.left_pial_surf'),
            ('left_smoothwm_surf', 'inputnode.left_smoothwm_surf'),
            ('right_inflated_surf', 'inputnode.right_inflated_surf'),
            ('right_midthickness_surf', 'inputnode.right_midthickness_surf'),
            ('right_pial_surf', 'inputnode.right_pial_surf'),
            ('right_smoothwm_surf', 'inputnode.right_smoothwm_surf'),
            ('orig_to_t1_mode_forward_transform',
             'inputnode.orig_to_t1_mode_forward_transform'),
            ('t1_2_fsnative_forward_transform',
             'inputnode.t1_2_fsnative_forward_transform'),
            ('t1_2_mni_reverse_transform',
             'inputnode.t1_2_mni_reverse_transform'),
            ('t1_2_mni_forward_transform',
             'inputnode.t1_2_mni_forward_transform'),
            ('template_brain_mask', 'inputnode.template_brain_mask'),
            ('template_preproc', 'inputnode.template_preproc'),
            ('template_seg', 'inputnode.template_seg'),
            ('template_csf_probseg', 'inputnode.template_csf_probseg'),
            ('template_gm_probseg', 'inputnode.template_gm_probseg'),
            ('template_wm_probseg', 'inputnode.template_wm_probseg')
        ])])

    return workflow