Ejemplo n.º 1
0
def get_bids_files(input_bids_dir,
                   space="MNI152NLin2009cAsym",
                   subject=bids.layout.Query(2),
                   validate=False,
                   save_cache=False,
                   load_cache=False):
    '''Extract the mask, and imaging entities from the fmriprep output'''
    database_path = os.path.join(input_bids_dir, ".pybids_cache")
    if load_cache:
        layout = bids.BIDSLayout(input_bids_dir,
                                 validate=validate,
                                 database_path=database_path)
    else:
        layout = bids.BIDSLayout(input_bids_dir, validate=validate)
    layout.add_derivatives(input_bids_dir)
    if save_cache:
        if os.path.exists(database_path):
            shutil.rmtree(database_path)
        layout.save(database_path)

    bids_images = layout.get(scope="derivatives",
                             space=space,
                             subject=subject,
                             desc="preproc",
                             suffix=["T1w", "bold"],
                             extension="nii.gz")
    bids_masks = layout.get(scope="derivatives",
                            space=space,
                            subject=subject,
                            desc="brain",
                            suffix="mask",
                            extension="nii.gz")

    return bids_images, bids_masks
Ejemplo n.º 2
0
    def __init__(self,
                 bids_dir,
                 selected_pipeline,
                 bids_filter=None,
                 bidsvalidator=False,
                 update_pipeline=True,
                 history=None,
                 exist_ok=False,
                 layout=None,
                 nettsv='nn-t'):

        if layout is None:
            self.BIDSLayout = bids.BIDSLayout(bids_dir,
                                              derivatives=True,
                                              validate=bidsvalidator)
        else:
            self.BIDSLayout = layout
        self.bids_dir = bids_dir
        self.selected_pipeline = selected_pipeline
        self.nettsv = nettsv
        self.bids_filter = {} if bids_filter is None else bids_filter
        if history is not None:
            self.history = {}
        self.exist_ok = exist_ok
        self.update_pipeline = update_pipeline

        with open(tenetopath[0] +
                  '/config/tenetobids/tenetobids_description.json') as f:
            self.tenetobids_description = json.load(f)
        self.tenetobids_description['PipelineDescription'][
            'Version'] = tenetoversion

        with open(tenetopath[0] +
                  '/config/tenetobids/tenetobids_structure.json') as f:
            self.tenetobids_structure = json.load(f)
Ejemplo n.º 3
0
def main():

    args = parse_args()

    pybids_cache_path = os.path.join(args.bids_path, PYBIDS_CACHE_PATH)

    layout = bids.BIDSLayout(
        args.bids_path,
        database_path=pybids_cache_path,
        reset_database=args.force_reindex,
        ignore=(
            "code",
            "stimuli",
            "sourcedata",
            "models",
            re.compile(r"^\."),
        ) + load_bidsignore(args.bids_path),
    )

    job_path = os.path.join(layout.root, SLURM_JOB_DIR)
    if not os.path.exists(job_path):
        os.mkdir(job_path)
        # add .slurm to .gitignore
        with open(os.path.join(layout.root, ".gitignore"), "a+") as f:
            f.seek(0)
            if not any([SLURM_JOB_DIR in l for l in f.readlines()]):
                f.write(f"{SLURM_JOB_DIR}\n")

    for job_file in run_mriqc(layout, args, args.preproc):
        if not args.no_submit:
            submit_slurm_job(job_file)
Ejemplo n.º 4
0
    def _list_outputs(self):
        import bids
        base_dir = self.inputs.base_directory

        os.makedirs(base_dir, exist_ok=True)

        layout = bids.BIDSLayout(base_dir)
        path_patterns = self.inputs.path_patterns
        if not isdefined(path_patterns):
            path_patterns = None

        out_files = []
        for entities, in_file in zip(self.inputs.entities,
                                     self.inputs.in_file):
            ents = {**self.inputs.fixed_entities}
            ents.update(entities)

            ents = {k: snake_to_camel(str(v)) for k, v in ents.items()}

            out_fname = os.path.join(base_dir,
                                     layout.build_path(ents, path_patterns))
            makedirs(os.path.dirname(out_fname), exist_ok=True)

            _copy_or_convert(in_file, out_fname)
            out_files.append(out_fname)

        return {'out_file': out_files}
Ejemplo n.º 5
0
    def _run_interface(self, runtime):
        import bids
        from bids.analysis import auto_model
        models = self.inputs.model
        if not isinstance(models, list):
            # model is not yet standardized, so validate=False
            layout = bids.BIDSLayout(self.inputs.bids_dir, validate=False)

            if not isdefined(models):
                models = layout.get(suffix='smdl', return_type='file')
                if not models:
                    raise ValueError("No models found")
            elif models == 'default':
                models = auto_model(layout)

        models = [_ensure_model(m) for m in models]

        if self.inputs.selectors:
            # This is almost certainly incorrect
            models = [
                model for model in models
                if all(val in model['input'].get(key, [val])
                       for key, val in self.inputs.selectors.items())
            ]

        self._results['model_spec'] = models

        return runtime
Ejemplo n.º 6
0
    def _run_interface(self, runtime):
        import bids
        bids.config.set_options(loop_preproc=True)
        include = self.inputs.include_pattern
        exclude = self.inputs.exclude_pattern
        if not isdefined(include):
            include = None
        if not isdefined(exclude):
            exclude = None

        paths = [(self.inputs.bids_dir, 'bids')]
        if isdefined(self.inputs.preproc_dir):
            paths.append((self.inputs.preproc_dir, ['bids', 'derivatives']))
        layout = bids.BIDSLayout(paths, include=include, exclude=exclude)

        selectors = self.inputs.selectors

        analysis = bids.Analysis(model=self.inputs.model, layout=layout)
        analysis.setup(drop_na=False, **selectors)
        self._load_level1(runtime, analysis)
        self._load_higher_level(runtime, analysis)

        # Debug - remove, eventually
        runtime.analysis = analysis

        return runtime
Ejemplo n.º 7
0
    def _run_interface(self, runtime):
        import bids
        from bids.analysis import auto_model
        models = self.inputs.model
        if not isinstance(models, list):
            database_path = self.inputs.database_path
            layout = bids.BIDSLayout.load(database_path=database_path)

            if not isdefined(models):
                # model is not yet standardized, so validate=False
                # Ignore all subject directories and .git/ and .datalad/ directories
                small_layout = bids.BIDSLayout(
                    layout.root, derivatives=[d.root for d in layout.derivatives.values()],
                    validate=False,
                    ignore=[re.compile(r'sub-'),
                            re.compile(r'\.(git|datalad)')])
                # PyBIDS can double up, so find unique models
                models = list(set(small_layout.get(suffix='smdl', return_type='file')))
                if not models:
                    raise ValueError("No models found")
            elif models == 'default':
                models = auto_model(layout)

        models = [_ensure_model(m) for m in models]

        if self.inputs.selectors:
            # This is almost certainly incorrect
            models = [model for model in models
                      if all(val in model['input'].get(key, [val])
                             for key, val in self.inputs.selectors.items())]

        self._results['model_spec'] = models

        return runtime
Ejemplo n.º 8
0
def main():

    args = parse_args()
    print("\n### Running fmriprep-slurm\n")
    print(vars(args))

    print("\n# Loading pyBIDS database (it might take few hours for a big dataset)...\n")
    sing_bids_path = os.path.join(
        SINGULARITY_DATA_PATH, os.path.basename(args.bids_path))
    layout = bids.BIDSLayout(
        sing_bids_path,
        reset_database=args.force_reindex,
        ignore=(
            "code",
            "stimuli",
            "sourcedata",
            "models",
            re.compile(r"^\."),
        )
        + load_bidsignore(sing_bids_path),
    )
    job_path = os.path.join(SINGULARITY_OUTPUT_PATH, SLURM_JOB_DIR)
    if not os.path.exists(job_path):
        os.mkdir(job_path)
    
    print("\n# Prefectch templateflow templates ...\n")
    # prefectch templateflow templates
    os.environ["TEMPLATEFLOW_HOME"] = TEMPLATEFLOW_HOME
    tf_api.get(args.output_spaces + ["OASIS30ANTs", "fsLR", "fsaverage"])

    print("\n# Processing slurm files into {}\n".format(
        os.path.join(args.output_path, SLURM_JOB_DIR)))
    for job_file in run_fmriprep(layout, args):
        if args.submit:
            submit_slurm_job(job_file)
Ejemplo n.º 9
0
def parse_directory(deriv_dir, work_dir, analysis):
    fl_layout = bids.BIDSLayout((deriv_dir, [
        'bids', 'derivatives',
        pkgr.resource_filename('fitlins', 'data/fitlins.json')
    ]))
    wd_layout = bids.BIDSLayout(str(Path(work_dir) / 'reportlets' / 'fitlins'))
    contrast_svgs = fl_layout.get(extensions='.svg', type='contrasts')

    analyses = []
    for contrast_svg in contrast_svgs:
        ents = fl_layout.parse_file_entities(contrast_svg.filename)
        ents.pop('type')
        ents.setdefault('subject', None)
        correlation_matrix = fl_layout.get(extensions='.svg',
                                           type='corr',
                                           **ents)
        design_matrix = fl_layout.get(extensions='.svg', type='design', **ents)
        job_desc = {
            'ents': {k: v
                     for k, v in ents.items() if v is not None},
            'dataset': analysis.layout.root,
            'model_name': analysis.model['name'],
            'contrasts_svg': contrast_svg.filename,
        }
        if ents.get('subject'):
            job_desc['subject_id'] = ents.get('subject')
        if correlation_matrix:
            job_desc['correlation_matrix_svg'] = correlation_matrix[0].filename
        if design_matrix:
            job_desc['design_matrix_svg'] = design_matrix[0].filename

        snippet = wd_layout.get(extensions='.html', type='snippet', **ents)
        if snippet:
            with open(snippet[0].filename) as fobj:
                job_desc['warning'] = fobj.read()

        contrasts = fl_layout.get(extensions='.png', type='ortho', **ents)
        # TODO: Split contrasts from estimates
        job_desc['contrasts'] = [{
            'image_file':
            c.filename,
            'name':
            fl_layout.parse_file_entities(c.filename)['contrast']
        } for c in contrasts]
        analyses.append(job_desc)

    return analyses
Ejemplo n.º 10
0
    def __init__(self, bdir, subjects=None, sessions=None, pipeline='dwi'):
        self.bdir = bdir
        self.layout = bids.BIDSLayout(bdir)
        if subjects is None:
            subjects = self.layout.get_subjects()
        if sessions is None:
            sessions = self.layout.get_sessions()

        # get list of subject / session pairs
        self.pairs = self.get_pairs(subjects=subjects, sessions=sessions, pipeline=pipeline)
Ejemplo n.º 11
0
    def query(self):
        """
        Get a queryable object representing the entire Bids dataset.

        Returns
        -------
        A BidsLayout object

        """
        return bids.BIDSLayout(self.bids_root)
Ejemplo n.º 12
0
def idconn_workflow(dset_dir, atlas, task, out_dir, space="MNI152NLin2009cAsym", conn=None, bids_db=None, confounds=None):
    print('Getting started!')

    if not confounds:
        confounds = [
            "cosine00", "cosine01", "cosine02",
            "trans_x", "trans_x_derivative1", "trans_x_power2", "trans_x_derivative1_power2",
            "trans_y", "trans_y_derivative1", "trans_y_derivative1_power2", "trans_y_power2",
            "trans_z", "trans_z_derivative1", "trans_z_power2", "trans_z_derivative1_power2",
            "rot_x", "rot_x_derivative1", "rot_x_power2", "rot_x_derivative1_power2",
            "rot_y", "rot_y_derivative1", "rot_y_power2", "rot_y_derivative1_power2",
            "rot_z", "rot_z_derivative1", "rot_z_derivative1_power2", "rot_z_power2",
            "a_comp_cor_00", "a_comp_cor_01", "a_comp_cor_02", "a_comp_cor_03", "a_comp_cor_04", "a_comp_cor_05", "a_comp_cor_06"
        ]

    print(f"Atlas: {atlas}\nConnectivity measure: {conn}")

    assert exists(dset_dir), f"Specified dataset doesn't exist:\n{dset_dir} not found.\n\nPlease check the filepath."
    layout = bids.BIDSLayout(dset_dir, derivatives=True, database_path=bids_db)
    subjects = layout.get(return_type='id', target='subject', suffix='bold')
    print(f"Subjects: {subjects}")
    #runs = layout.get(return_type='id', target='session', suffix='bold')
    preproc_subjects = layout.get(return_type='id', target='subject', task=task, space=space, desc='preproc', suffix='bold')
    if len(subjects) != len(preproc_subjects):
        print(f'{len(subjects)} subjects found in dset, only {len(preproc_subjects)} have preprocessed BOLD data. Pipeline is contniuing anyway, please double check preprocessed data if this doesn\'t seem right.')

    example_events = layout.get(return_type='filename', suffix='events', task=task, subject=preproc_subjects[0])
    events_df = pd.read_csv(example_events[0], header=0, index_col=0, sep='\t')
    conditions = events_df['trial_type'].unique()

    print(f"Computing connectivity matrices using {atlas}")
    for subject in preproc_subjects:
        print(f"Subject {subject}")
        sessions = layout.get(return_type='id', target='session', task=task, subject=subject, suffix='bold')
        print(f"Sessions with task-{task} found for {subject}: {sessions}")
        for session in sessions:
            print(f"Session {session}")
            print(f"here are the inputs: {layout, subject, session, task, atlas, conn, space, confounds}")
            if 'rest' in task:
                try:
                    adj_matrix = build_networks.connectivity(layout, subject, session, task, atlas, conn, space, confounds)
                except Exception as e:
                    print(f'Error building corrmat for sub-{subject}, ses-{session}, task-{task}: {e}')
            if len(conditions) < 1:
                try:
                    adj_matrix = build_networks.connectivity(layout, subject, session, task, atlas, conn, space, confounds)
                except Exception as e:
                    print(f'Error building corrmat for sub-{subject}, ses-{session}, task-{task}: {e}')
            else:
                try:
                    adj_matrix = build_networks.task_connectivity(layout=layout, subject=subject, session=session, task=task, atlas=atlas, confounds=confounds, connectivity_metric=conn)
                except Exception as e:
                    print(f'Error building corrmat for sub-{subject}, ses-{session}, task-{task}: {e}')
Ejemplo n.º 13
0
def get_subjects(input_bids_dir,
                 validate=False,
                 save_cache=False,
                 load_cache=False):
    '''Get the subjects IDs from the fmriprep output'''
    database_path = os.path.join(input_bids_dir, ".pybids_cache")
    if load_cache:
        layout = bids.BIDSLayout(input_bids_dir,
                                 validate=validate,
                                 database_path=database_path)
    else:
        layout = bids.BIDSLayout(input_bids_dir, validate=validate)
    layout.add_derivatives(input_bids_dir)
    if save_cache:
        if os.path.exists(database_path):
            shutil.rmtree(database_path)
        layout.save(database_path)

    sub_ids = layout.get_subjects()

    return sub_ids
Ejemplo n.º 14
0
def test_extension_initial_dot(mock_config):
    ds117 = os.path.join(get_test_data_path(), 'ds000117')

    # Warn if creating a layout without declaring a mode
    bids.config.set_option('extension_initial_dot', None)
    with pytest.warns(FutureWarning, match='To suppress this warning'):
        layout = bids.BIDSLayout(ds117)
    assert layout.get(extension='nii.gz')[0].entities['extension'] == 'nii.gz'

    # Warn if setting the mode to False... this isn't sticking around
    with pytest.warns(FutureWarning, match='will be disabled'):
        bids.config.set_option('extension_initial_dot', False)
    with pytest.warns(None) as record:
        layout = bids.BIDSLayout(ds117)
    assert len(record) == 0
    assert layout.get(extension='nii.gz')[0].entities['extension'] == 'nii.gz'

    # No warnings to move to dot mode
    with pytest.warns(None) as record:
        bids.config.set_option('extension_initial_dot', True)
        layout = bids.BIDSLayout(ds117)
    assert len(record) == 0
    assert layout.get(extension='nii.gz')[0].entities['extension'] == '.nii.gz'
Ejemplo n.º 15
0
def test_extension_initial_dot(mock_config):
    ds117 = os.path.join(get_test_data_path(), 'ds000117')

    # Setting False is disabled
    with pytest.raises(ValueError, match='Cannot set'):
        bids.config.set_option('extension_initial_dot', False)
    with pytest.raises(ValueError, match='Cannot set'):
        bids.config.set_option('extension_initial_dot', None)

    # Setting True warns
    with pytest.warns(FutureWarning, match='will be removed'):
        bids.config.set_option('extension_initial_dot', True)

    # No warnings on layout construction
    with pytest.warns(None) as record:
        layout = bids.BIDSLayout(ds117)
    assert len(record) == 0
    assert layout.get(extension='nii.gz')[0].entities['extension'] == '.nii.gz'
Ejemplo n.º 16
0
    def _run_interface(self, runtime):
        import bids
        paths = [(self.inputs.bids_dir, 'bids')]
        if isdefined(self.inputs.preproc_dir):
            paths.append((self.inputs.preproc_dir, ['bids', 'derivatives']))
        layout = bids.BIDSLayout(paths)

        bold_files = []
        mask_files = []
        entities = []
        for ents in self.inputs.entities:
            selectors = {**self.inputs.selectors, **ents}
            bold_file = layout.get(extensions=['.nii', '.nii.gz'], **selectors)

            if len(bold_file) == 0:
                raise FileNotFoundError(
                    "Could not find BOLD file in {} with entities {}"
                    "".format(self.inputs.bids_dir, selectors))
            elif len(bold_file) > 1:
                raise ValueError(
                    "Non-unique BOLD file in {} with entities {}.\n"
                    "Matches:\n\t{}"
                    "".format(
                        self.inputs.bids_dir, selectors,
                        "\n\t".join('{} ({})'.format(
                            f.filename, layout.files[f.filename].entities)
                                    for f in bold_file)))

            # Select exactly matching mask file (may be over-cautious)
            bold_ents = layout.parse_file_entities(bold_file[0].filename)
            bold_ents['type'] = 'brainmask'
            mask_file = layout.get(extensions=['.nii', '.nii.gz'], **bold_ents)
            bold_ents.pop('type')

            bold_files.append(bold_file[0].filename)
            mask_files.append(mask_file[0].filename if mask_file else None)
            entities.append(bold_ents)

        self._results['bold_files'] = bold_files
        self._results['mask_files'] = mask_files
        self._results['entities'] = entities

        return runtime
Ejemplo n.º 17
0
def write_report(level, report_dicts, run_context, deriv_dir):
    fl_layout = bids.BIDSLayout(
        (deriv_dir, ['bids', 'derivatives',
                     pkgr.resource_filename('fitlins', 'data/fitlins.json')]))

    env = jinja2.Environment(
        loader=jinja2.FileSystemLoader(
            searchpath=pkgr.resource_filename('fitlins', '/')))

    tpl = env.get_template('data/report.tpl')

    for context in report_dicts:
        ents = context['ents'].copy()
        ents['model'] = snake_to_camel(context['model_name'])
        target_file = op.join(deriv_dir, fl_layout.build_path(ents, PATH_PATTERNS))
        html = tpl.render(deroot({'level': level, **context, **run_context},
                                 op.dirname(target_file)))
        with open(target_file, 'w') as fobj:
            fobj.write(html)
Ejemplo n.º 18
0
def bids_data(entry):
    import os
    import glob
    import bids
    import json

    bids.config.set_option('extension_initial_dot', True)

    layout = bids.BIDSLayout(entry.inputs,
                             derivatives=False,
                             absolute_paths=True)

    if not os.path.exists(entry.outputs + '/FDT') or os.path.exists(
            entry.outputs + '/FDT/' + 'dataset_description.json'):
        os.makedirs(entry.outputs, exist_ok=True)
        os.makedirs(entry.outputs + '/FDT', exist_ok=True)

        # make dataset_description file...
        import json

        data = {
            'Name':
            'FSL Diffusion Toolbox Minimal Preprocessing',
            "BIDSVersion":
            "1.1.1",
            "PipelineDescription": {
                "Name": "FSL Diffusion Toolbox",
                "Version": "0.0.1",
                "CodeURL": "https://github.com/amyhegarty/FDT"
            },
            "CodeURL":
            "https://github.com/amyhegarty/FDT",
            "HowToAcknowledge":
            "Please cite all relevant works for FSL tools: topup, eddy, dtifit and python tools: pybids ( https://doi.org/10.21105/joss.01294,  https://doi.org/10.21105/joss.01294)"
        }

        with open(entry.outputs + '/FDT/' + 'dataset_description.json',
                  'w') as outfile:
            json.dump(data, outfile, indent=2)

    return layout
Ejemplo n.º 19
0
def main():

    args = docopt(__doc__)
    fmriprep_dir = args['<fmriprep_dir>']
    output_dir = args['<output_dir>']
    ignore_fields = args['--ignore']
    space = args['--space'] or 'T1w'

    layout = bids.BIDSLayout(fmriprep_dir, derivatives=True, validate=False)

    #Generate participants.tsv template
    participants_tsv(layout, output_dir, ignore_fields)

    #Make anatomical QC pages
    anat_qc = os.path.join(output_dir, 'anat')
    makedir(anat_qc)
    make_anatomical_qc(layout, anat_qc)

    #Make functional QC pages
    func_qc = os.path.join(output_dir, 'func')
    makedir(func_qc)
    make_functional_qc(layout, func_qc, space)
Ejemplo n.º 20
0
def main():

    args = parse_args()

    pybids_cache_path = os.path.join(args.bids_path, PYBIDS_CACHE_PATH)

    layout = bids.BIDSLayout(
        args.bids_path,
        database_path=pybids_cache_path,
        reset_database=args.force_reindex,
        ignore=(
            "code",
            "stimuli",
            "sourcedata",
            "models",
            re.compile(r"^\."),
        ) + load_bidsignore(args.bids_path),
    )

    job_path = os.path.join(layout.root, SLURM_JOB_DIR)
    if not os.path.exists(job_path):
        os.mkdir(job_path)
        # add .slurm to .gitignore
        with open(os.path.join(layout.root, ".gitignore"), "a+") as f:
            f.seek(0)
            if not any([SLURM_JOB_DIR in l for l in f.readlines()]):
                f.write(f"{SLURM_JOB_DIR}\n")

    # prefectch templateflow templates
    os.environ["TEMPLATEFLOW_HOME"] = TEMPLATEFLOW_HOME
    import templateflow.api as tf_api

    tf_api.get(OUTPUT_TEMPLATES + ["OASIS30ANTs", "fsLR", "fsaverage"])

    for job_file in run_fmriprep(layout, args, args.preproc):
        if not args.no_submit:
            submit_slurm_job(job_file)
Ejemplo n.º 21
0
def is_bids(input_dir):
    """
    Make sure that the input data is BIDs-formatted.
    If it's BIDs-formatted, except for a `dataset_description.json` file, return True.
    
    Returns
    -------
    bool
        True if the input directory is BIDs-formatted
    
    Raises
    ------
    ValueError
        Occurs if the input directory is not formatted properly.
    """
    try:
        l = bids.BIDSLayout(input_dir)
        return l.validate
    except ValueError as e:
        p = "'dataset_description.json' is missing from project root. Every valid BIDS dataset must have this file."
        if str(e) != p:
            raise ValueError(e)
        create_datadescript(input_dir)
        return is_bids(input_dir)
Ejemplo n.º 22
0
def main():

    args = parse_args()
    logging.basicConfig(level=logging.getLevelName(args.debug_level.upper()))

    pybids_cache_path = os.path.join(args.bids_path, PYBIDS_CACHE_PATH)

    layout = bids.BIDSLayout(
        args.bids_path,
        database_path=pybids_cache_path,
        reset_database=args.force_reindex,
        index_metadata=False,
        validate=False,
    )

    if args.datalad:
        annex_repo = AnnexRepo(args.bids_path)

    subject_list = (
        args.participant_label if args.participant_label else bids.layout.Query.ANY
    )
    session_list = args.session_label if args.session_label else bids.layout.Query.ANY
    filters = dict(
        subject=subject_list,
        session=session_list,
        **args.ref_bids_filters,
        extension=['nii','nii.gz'])
    deface_ref_images = layout.get(**filters)

    if not len(deface_ref_images):
        logging.info(f"no reference image found with condition {filters}")
        return

    new_files, modified_files = [], []

    script_dir = os.path.dirname(__file__)

    mni_path = os.path.abspath(os.path.join(script_dir, MNI_PATH))
    mni_mask_path = os.path.abspath(os.path.join(script_dir, MNI_MASK_PATH))
    # if the MNI template image is not available locally
    if not os.path.exists(os.path.realpath(mni_path)):
        datalad.api.get(mni_path, dataset=datalad.api.Dataset(script_dir + "/../../"))
    tmpl_image = nb.load(mni_path)
    tmpl_image_mask = nb.load(mni_mask_path)
    tmpl_defacemask = generate_deface_ear_mask(tmpl_image)
    brain_xtractor = Extractor()

    for ref_image in deface_ref_images:
        subject = ref_image.entities["subject"]
        session = ref_image.entities["session"]

        datalad.api.get(ref_image.path)
        ref_image_nb = ref_image.get_image()

        matrix_path = ref_image.path.replace(
            "_%s.%s" % (ref_image.entities["suffix"], ref_image.entities["extension"]),
            "_mod-%s_defacemaskreg.mat" % ref_image.entities["suffix"],
        )

        if os.path.exists(matrix_path):
            logging.info("reusing existing registration matrix")
            ref2tpl_affine = AffineMap(np.loadtxt(matrix_path))
        else:
            logging.info(f"running registration of reference serie: {ref_image.path}")
            brain_mask = (brain_xtractor.run(ref_image_nb.get_fdata()) > 0.99).astype(
                np.uint8
            )
            brain_mask[:] = scipy.ndimage.morphology.binary_dilation(
                brain_mask, iterations=4
            )
            brain_mask_nb = nb.Nifti1Image(brain_mask, ref_image_nb.affine)
            ref2tpl_affine = registration(
                tmpl_image, ref_image_nb, tmpl_image_mask, brain_mask_nb
            )
            np.savetxt(matrix_path, ref2tpl_affine.affine)
            new_files.append(matrix_path)

        if args.debug_images:
            output_debug_images(tmpl_image, ref_image, ref2tpl_affine)

        series_to_deface = []
        for filters in args.other_bids_filters:
            series_to_deface.extend(
                layout.get(
                    extension=["nii", "nii.gz"],
                    subject=subject,
                    session=session,
                    **filters,
                )
            )

        # unlock before making any change to avoid unwanted save
        if args.datalad:
            annex_repo.unlock([serie.path for serie in series_to_deface])

        for serie in series_to_deface:
            if args.datalad:
                if (
                    next(annex_repo.get_metadata(serie.path))[1].get(
                        "distribution-restrictions"
                    )
                    is None
                ):
                    logging.info(
                        f"skip {serie.path} as there are no distribution restrictions metadata set."
                    )
                    continue
            logging.info(f"defacing {serie.path}")

            datalad.api.get(serie.path)
            serie_nb = serie.get_image()
            warped_mask = warp_mask(tmpl_defacemask, serie_nb, ref2tpl_affine)
            if args.save_all_masks or serie == ref_image:
                warped_mask_path = serie.path.replace(
                    "_%s" % serie.entities["suffix"],
                    "_mod-%s_defacemask" % serie.entities["suffix"],
                )
                if os.path.exists(warped_mask_path):
                    logging.warning(
                        f"{warped_mask_path} already exists : will not overwrite, clean before rerun"
                    )
                else:
                    warped_mask.to_filename(warped_mask_path)
                    new_files.append(warped_mask_path)

            masked_serie = nb.Nifti1Image(
                np.asanyarray(serie_nb.dataobj) * np.asanyarray(warped_mask.dataobj),
                serie_nb.affine,
                serie_nb.header,
            )
            masked_serie.to_filename(serie.path)
            modified_files.append(serie.path)

    if args.datalad and len(modified_files):
        logging.info("saving files and metadata changes in datalad")
        annex_repo.set_metadata(
            modified_files, remove={"distribution-restrictions": "sensitive"}
        )
        datalad.api.save(
            modified_files + new_files,
            message="deface %d series/images and update distribution-restrictions"
            % len(modified_files),
        )
Ejemplo n.º 23
0
 def create_layout(self):
     # create bids layout
     self.layout = bids.BIDSLayout(self.base_dir,
                                   derivatives=True,
                                   database_path=self.database_path,
                                   reset_database=True)
Ejemplo n.º 24
0
def main():

    args = docopt(__doc__)

    fmriprep_dir = args['<fmriprep_dir>']
    output_dir = args['<output_dir>']
    ignore_fields = args['--ignore']

    layout = bids.BIDSLayout(fmriprep_dir,
                             validate=False,
                             index_metadata=False)

    # Generate participants.tsv template
    participants_tsv(layout, output_dir, ignore_fields)

    #Now loop through each participant's scans and start building QC pages
    html_series = []
    subjects = layout.get_subjects()
    no_task = []
    prev_task_htmls = []
    for ind, s in enumerate(subjects):

        broad_name = 'sub-{}_{}.html'.format(s, ind)

        #Get relevant files for subject
        fig_dir = os.path.join(layout.root, 'sub-{}'.format(s), 'figures')
        sub_figs = os.listdir(fig_dir)
        sub_files = layout.get(subject=s, extension='nii.gz')

        #Get broad QC markup
        broad_html = make_broad_html(s, sub_figs, sub_files, output_dir,
                                     fig_dir)

        #Write in link to the previous task html
        if ind > 0:
            broad_html += [add_link(prev_task_htmls[-1][0], 'Previous Page')]

        #Get functional markup (one per taskfile)
        task_files = layout.get(subject=s,
                                extension='nii.gz',
                                suffix='bold',
                                space='T1w')
        task_files = sorted(task_files, key=lambda x: get_task_ordering_key(x))
        task_htmls = [
            make_task_html(s, output_dir, fig_dir, sub_figs, t)
            for t in task_files
        ]

        #Step 1: Write the first task_html name into broad_html
        try:
            broad_html += [add_link(task_htmls[0][0], 'Next Page')]
        except IndexError:
            no_task.append(s)
            continue

        #Step 2: Link broad html to first task_html
        task_htmls[0][1] += [add_link(broad_name, 'Previous Page')]

        #Step 3: Link each task HTML to the previous
        for i in range(1, len(task_htmls)):
            task_htmls[i][1] += [
                add_link(task_htmls[i - 1][0], 'Previous Page')
            ]

        #Step 4: Link each task HTML to the next
        for i in range(0, len(task_htmls) - 1):
            task_htmls[i][1] += [add_link(task_htmls[i + 1][0], 'Next Page')]

        #Step 5: Link broad_html to the next subject
        if ind != len(subjects) - 1:
            broad_html += [
                add_link('sub-{}_{}.html'.format(subjects[ind + 1], ind + 1),
                         'Next Subject')
            ]
            task_htmls[-1][1] += [
                add_link('sub-{}_{}.html'.format(subjects[ind + 1], ind + 1),
                         'Next Page')
            ]

        #Write out files
        with open(os.path.join(output_dir, broad_name), 'w') as f:
            f.writelines(broad_html)

        #Write out task files
        for t in task_htmls:
            with open(os.path.join(output_dir, t[0]), 'w') as f:
                f.writelines(t[1])

        #Store previous task htmls
        prev_task_htmls = task_htmls

    print('Subjects with no task files:')
    print('\n'.join(no_task))
Ejemplo n.º 25
0
ANTS_JOBS = 8
PIPELINE_JOBS = 48

import sys, os
import re
import glob
import numpy as np
import pandas as pd
import bids
import transforms3d
import SimpleITK as sitk
from nipype.pipeline import MapNode, Workflow
from nipype.interfaces import ants
from blends import registration

ppmi = bids.BIDSLayout(DATA_PATH, validate=False)


def remove_repeats(lsScans):
    # Remove repeated scans for the same subject
    lsOutScans = []
    lsSubjects = []
    for scan in lsScans:
        if scan.entities['subject'] not in lsSubjects:
            lsSubjects += [scan.entities['subject']]
            lsOutScans += [scan.path]
    return lsOutScans


def remove_rigidbody(strAffinePath):
    # Isolate and remove rigid body components (rotation and translation) from
Ejemplo n.º 26
0
 def update_bids_layout(self):
     """
     Function that upddates to new bids l
     """
     self.BIDSLayout = bids.BIDSLayout(self.bids_dir, derivatives=True)
Ejemplo n.º 27
0
    def create_bids_dataframe(self):
        """Generate the dataframe."""

        # Suppress a Future Warning from pybids about leading dot included in 'extension' from version 0.14.0
        # The config_bids.json file used matches the future behavior
        # TODO: when reaching version 0.14.0, remove the following line
        pybids.config.set_option('extension_initial_dot', True)

        for path_data in self.paths_data:
            path_data = os.path.join(path_data, '')

            # Initialize BIDSLayoutIndexer and BIDSLayout
            # validate=True by default for both indexer and layout, BIDS-validator is not skipped
            # Force index for samples tsv and json files, and for subject subfolders containing microscopy files based on extensions.
            # Force index of subject subfolders containing CT-scan files under "anat" or "ct" folder based on extensions and modality suffix.
            # TODO: remove force indexing of microscopy files after BEP microscopy is merged in BIDS
            # TODO: remove force indexing of CT-scan files after BEP CT-scan is merged in BIDS
            ext_microscopy = ('.png', '.ome.tif', '.ome.tiff', '.ome.tf2',
                              '.ome.tf8', '.ome.btf')
            ext_ct = ('.nii.gz', '.nii')
            suffix_ct = ('ct', 'CT')
            force_index = []
            for root, dirs, files in os.walk(path_data):
                for file in files:
                    # Microscopy
                    if file == "samples.tsv" or file == "samples.json":
                        force_index.append(file)
                    if (file.endswith(ext_microscopy)
                            and os.path.basename(root) == "microscopy" and
                        (root.replace(path_data, '').startswith("sub"))):
                        force_index.append(
                            os.path.join(root.replace(path_data, '')))
                    # CT-scan
                    if (file.endswith(ext_ct)
                            and file.split('.')[0].endswith(suffix_ct)
                            and (os.path.basename(root) == "anat"
                                 or os.path.basename(root) == "ct") and
                        (root.replace(path_data, '').startswith("sub"))):
                        force_index.append(
                            os.path.join(root.replace(path_data, '')))
            indexer = pybids.BIDSLayoutIndexer(force_index=force_index)

            if self.derivatives:
                self.write_derivatives_dataset_description(path_data)

            layout = pybids.BIDSLayout(path_data,
                                       config=self.bids_config,
                                       indexer=indexer,
                                       derivatives=self.derivatives)

            # Transform layout to dataframe with all entities and json metadata
            # As per pybids, derivatives don't include parsed entities, only the "path" column
            df_next = layout.to_df(metadata=True)

            # Add filename column
            df_next.insert(1, 'filename',
                           df_next['path'].apply(os.path.basename))

            # Drop rows with json, tsv and LICENSE files in case no extensions are provided in config file for filtering
            df_next = df_next[~df_next['filename'].str.
                              endswith(tuple(['.json', '.tsv', 'LICENSE']))]

            # Update dataframe with subject files of chosen contrasts and extensions,
            # and with derivative files of chosen target_suffix from loader parameters
            df_next = df_next[(
                ~df_next['path'].str.contains('derivatives')
                & df_next['suffix'].str.contains('|'.join(self.contrast_lst))
                & df_next['extension'].str.contains('|'.join(self.extensions)))
                              | (df_next['path'].str.contains('derivatives')
                                 & df_next['filename'].str.
                                 contains('|'.join(self.target_suffix)))]

            if df_next[~df_next['path'].str.contains('derivatives')].empty:
                # Warning if no subject files are found in path_data
                logger.warning(
                    "No subject files were found in '{}' dataset. Skipping dataset."
                    .format(path_data))

            else:
                # Add tsv files metadata to dataframe
                df_next = self.add_tsv_metadata(df_next, path_data, layout)

                # TODO: check if other files are needed for EEG and DWI

                # Merge dataframes
                self.df = pd.concat([self.df, df_next],
                                    join='outer',
                                    ignore_index=True)

        if self.df.empty:
            # Raise error and exit if no subject files are found in any path data
            raise RuntimeError(
                "No subject files found. Check selection of parameters in config.json"
                " and datasets compliance with BIDS specification.")

        # Drop duplicated rows based on all columns except 'path'
        # Keep first occurence
        columns = self.df.columns.to_list()
        columns.remove('path')
        self.df = self.df[~(
            self.df.astype(str).duplicated(subset=columns, keep='first'))]

        # If indexing of derivatives is true
        if self.derivatives:

            # Get list of subject files with available derivatives
            has_deriv, deriv = self.get_subjects_with_derivatives()

            # Filter dataframe to keep subjects files with available derivatives only
            if has_deriv:
                self.df = self.df[
                    self.df['filename'].str.contains('|'.join(has_deriv))
                    | self.df['filename'].str.contains('|'.join(deriv))]
            else:
                # Raise error and exit if no derivatives are found for any subject files
                raise RuntimeError("Derivatives not found.")

        # Reset index
        self.df.reset_index(drop=True, inplace=True)

        # Drop columns with all null values
        self.df.dropna(axis=1, inplace=True, how='all')
Ejemplo n.º 28
0
import pandas as pd
import json
import bids
import matplotlib.pyplot as plt
import plotje

# Download data from here: Esteban, O. et al. Crowdsourced MRI quality metrics
# and expert quality annotations for training of humans and machines. Sci Data 6, 30 (2019).
# Then run make_distributions.py to summarize the data from this snapshot
summary_path = './data/summary/bold_curated'
dataset = '/home/william/datasets/es-fmri_v2/'
dfd = pd.read_csv(summary_path + qc + '_summary.csv', index_col=[0])

layout = bids.BIDSLayout(dataset)
layout.add_derivatives(dataset + '/derivatives/')
layout = layout.to_df()

keeprow = []
for i, n in layout.iterrows():
    if 'mriqc_output' in n['path'] and n['path'].endswith('.json'):
        keeprow.append(i)

layout = layout.loc[keeprow]
layout_bold = layout[layout['suffix'] == 'bold']

params = [('pre', 'rest', 'preop'), ('es', 'es', 'postop')]

qcmet = {}
qcdesc = {}
for p in params:
    qcmet[p[0]] = {}
Ejemplo n.º 29
0
def run_fitlins(argv=None):
    import re
    from nipype import logging as nlogging

    warnings.showwarning = _warn_redirect
    opts = get_parser().parse_args(argv)

    force_index = [
        # If entry looks like `/<pattern>/`, treat `<pattern>` as a regex
        re.compile(ign[1:-1]) if (ign[0], ign[-1]) == ('/', '/') else ign
        # Iterate over empty tuple if undefined
        for ign in opts.force_index or ()
    ]
    ignore = [
        # If entry looks like `/<pattern>/`, treat `<pattern>` as a regex
        re.compile(ign[1:-1]) if (ign[0], ign[-1]) == ('/', '/') else ign
        # Iterate over empty tuple if undefined
        for ign in opts.ignore or ()
    ]

    log_level = 25 + 5 * (opts.quiet - opts.verbose)
    logger.setLevel(log_level)
    nlogging.getLogger('nipype.workflow').setLevel(log_level)
    nlogging.getLogger('nipype.interface').setLevel(log_level)
    nlogging.getLogger('nipype.utils').setLevel(log_level)

    if not opts.space:
        # make it an explicit None
        opts.space = None
    if not opts.desc_label:
        # make it an explicit None
        opts.desc_label = None

    ncpus = opts.n_cpus
    if ncpus < 1:
        ncpus = cpu_count()

    plugin_settings = {
        'plugin': 'MultiProc',
        'plugin_args': {
            'n_procs': ncpus,
            'raise_insufficient': False,
            'maxtasksperchild': 1,
        },
    }

    if opts.mem_gb:
        plugin_settings['plugin_args']['memory_gb'] = opts.mem_gb

    model = default_path(opts.model, opts.bids_dir, 'model-default_smdl.json')
    if opts.model in (None, 'default') and not op.exists(model):
        model = 'default'

    derivatives = True if not opts.derivatives else opts.derivatives
    # Need this when specifying args directly (i.e. neuroscout)
    # god bless neuroscout, but let's make it work for others!
    if isinstance(derivatives, list) and len(derivatives) == 1:
        # WRONG AND EVIL to those who have spaces in their paths... bad bad practice
        # TODO - fix neuroscout
        derivatives = derivatives[0].split(" ")

    if opts.estimator != 'afni':
        if opts.error_ts:
            raise NotImplementedError(
                "Saving the error time series is only implemented for"
                " the afni estimator. If this is a feature you want"
                f" for {opts.estimator} please let us know on github.")

    if opts.derivative_label:
        logger.warning('--derivative-label no longer has any effect; '
                       'set output directory name directly')
    os.makedirs(opts.output_dir, exist_ok=True)
    fub.write_derivative_description(opts.bids_dir, opts.output_dir,
                                     vars(opts))

    work_dir = mkdtemp() if opts.work_dir is None else opts.work_dir

    # Go ahead and initialize the layout database
    if opts.database_path is None:
        database_path = Path(work_dir) / 'dbcache'
        reset_database = True
    else:
        database_path = opts.database_path
        reset_database = False

    indexer = bids.BIDSLayoutIndexer(ignore=ignore, force_index=force_index)
    layout = bids.BIDSLayout(
        opts.bids_dir,
        derivatives=derivatives,
        database_path=database_path,
        reset_database=reset_database,
        indexer=indexer,
    )

    subject_list = None
    if opts.participant_label is not None:
        subject_list = fub.collect_participants(
            layout, participant_label=opts.participant_label)

    # Build main workflow
    logger.log(25, INIT_MSG(version=__version__, subject_list=subject_list))

    # TODO: Fix AUTO_MODEL
    # if model == 'default':
    #     models = auto_model(layout)
    # else:
    #     import json
    #     if op.exists(model):
    #         model_dict = json.loads(Path(model).read_text())
    #     models = [model_dict]

    model_dict = None
    if model == 'default':
        retcode = 1
        raise NotImplementedError(
            "The default model has not been implemented yet.")
    else:
        import json

        if op.exists(model):
            model_dict = json.loads(Path(model).read_text())

    if not model_dict:
        raise ValueError(
            f'model_dict cannot be empty. Invalid model filepath {model}.')

    graph = BIDSStatsModelsGraph(layout, model_dict)

    fitlins_wf = init_fitlins_wf(
        database_path,
        opts.output_dir,
        graph=graph,
        analysis_level=opts.analysis_level,
        model=model,
        space=opts.space,
        desc=opts.desc_label,
        participants=subject_list,
        base_dir=work_dir,
        smoothing=opts.smoothing,
        drop_missing=opts.drop_missing,
        drift_model=opts.drift_model,
        estimator=opts.estimator,
        errorts=opts.error_ts,
    )
    fitlins_wf.config = deepcopy(config.get_fitlins_config()._sections)

    if opts.work_dir:
        # dump crashes in working directory (non /tmp)
        fitlins_wf.config['execution']['crashdump_dir'] = opts.work_dir
    retcode = 0
    if not opts.reports_only:
        try:
            fitlins_wf.run(**plugin_settings)
        except Exception as e:
            logger.critical(f"FitLins failed: {e}")
            raise

    run_context = {
        'version': __version__,
        'command': ' '.join(sys.argv),
        'timestamp': time.strftime('%Y-%m-%d %H:%M:%S %z'),
    }

    selectors = {'desc': opts.desc_label, 'space': opts.space}
    if subject_list is not None:
        selectors['subject'] = subject_list

    graph.load_collections(**selectors)
    report_dict = build_report_dict(opts.output_dir, work_dir, graph)
    write_full_report(report_dict, run_context, opts.output_dir)

    return retcode
Ejemplo n.º 30
0
def fw_heudiconv_download():
    """Use fw-heudiconv to download BIDS data."""
    subjects = [subject_container.label]
    sessions = [session_container.label]

    # Use manually specified T1 if it exists
    if manual_t1 is not None:
        anat_input = manual_t1_path
        subject_label = subject_container.label.replace('_',
                                                        'x').replace(' ', 'x')
        session_label = session_container.label.replace('_',
                                                        'x').replace(' ', 'x')
        prefix = 'sub-{}_ses-{}_'.format(subject_label, session_label)
        return True, anat_input, prefix

    # Do the download!
    bids_root.parent.mkdir(parents=True, exist_ok=True)
    downloads = export.gather_bids(fw, project_label, subjects, sessions)
    export.download_bids(fw,
                         downloads,
                         str(bids_dir.resolve()),
                         dry_run=False,
                         folders_to_download=['anat'])

    bids.config.set_option('extension_initial_dot', True)  # suppress warning
    layout = bids.BIDSLayout(bids_root)
    filters = {}

    if bids_filter_path:
        logger.info("Using {} as BIDS filter.".format(bids_filter_path))
        with open(bids_filter_path) as f:
            data = json.load(f)
        try:
            filters = data["t1w"]
        except KeyError as ke:
            print(ke)
            logger.warning("Could not find 't1w' field in json.")
            logger.info("BIDS filter file not formatted correctly.")
            return False
    else:
        if bids_sub:
            filters["subject"] = [bids_sub]
        else:
            filters["subject"] = subjects
        if bids_ses:
            filters["session"] = [bids_ses]
        else:
            filters["session"] = sessions

        if bids_acq:
            filters["acquisition"] = bids_acq

        if bids_run:
            filters["run"] = bids_run

    anat_list = layout.get(return_type='file',
                           suffix='T1w',
                           extension=['.nii', '.nii.gz'],
                           **filters)

    # if there are multiple files or no files, error out
    # otherwise just use the one
    if len(anat_list) > 1:
        logger.warning(
            "Multiple anatomical files found in %s. If you want to process multiple images, use the longitudinal gear.",
            bids_root)
        return False
    elif not len(anat_list) or len(anat_list) == 0:
        logger.warning("No anatomical files found in %s", bids_root)
        return False
    else:
        anat_input = anat_list[0]

    logger.info("Using {} as input anatomical image.".format(anat_input))

    # Generate prefix from bids layout
    basename = os.path.basename(anat_input).split('.')[0]
    prefix = basename.replace('_T1w', '') + '_'

    return True, anat_input, prefix