예제 #1
0
def collect_data(bids_dir, participant_label, task=None, echo=None,
                 bids_validate=True):
    """
    Uses pybids to retrieve the input data for a given participant
    >>> bids_root, _ = collect_data(str(datadir / 'ds054'), '100185',
    ...                             bids_validate=False)
    >>> bids_root['fmap']  # doctest: +ELLIPSIS
    ['.../ds054/sub-100185/fmap/sub-100185_magnitude1.nii.gz', \
'.../ds054/sub-100185/fmap/sub-100185_magnitude2.nii.gz', \
'.../ds054/sub-100185/fmap/sub-100185_phasediff.nii.gz']
    >>> bids_root['bold']  # doctest: +ELLIPSIS
    ['.../ds054/sub-100185/func/sub-100185_task-machinegame_run-01_bold.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-02_bold.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-03_bold.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-04_bold.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-05_bold.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-06_bold.nii.gz']
    >>> bids_root['sbref']  # doctest: +ELLIPSIS
    ['.../ds054/sub-100185/func/sub-100185_task-machinegame_run-01_sbref.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-02_sbref.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-03_sbref.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-04_sbref.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-05_sbref.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-06_sbref.nii.gz']
    >>> bids_root['t1w']  # doctest: +ELLIPSIS
    ['.../ds054/sub-100185/anat/sub-100185_T1w.nii.gz']
    >>> bids_root['t2w']  # doctest: +ELLIPSIS
    []
    """
    if isinstance(bids_dir, BIDSLayout):
        layout = bids_dir
    else:
        layout = BIDSLayout(str(bids_dir), validate=bids_validate)

    queries = {
        'fmap': {'datatype': 'fmap'},
        'bold': {'datatype': 'func', 'suffix': 'bold'},
        'sbref': {'datatype': 'func', 'suffix': 'sbref'},
        'flair': {'datatype': 'anat', 'suffix': 'FLAIR'},
        't2w': {'datatype': 'anat', 'suffix': 'T2w'},
        't1w': {'datatype': 'anat', 'suffix': 'T1w'},
        'roi': {'datatype': 'anat', 'suffix': 'roi'},
    }

    if task:
        queries['bold']['task'] = task

    if echo:
        queries['bold']['echo'] = echo

    subj_data = {
        dtype: sorted(layout.get(return_type='file', subject=participant_label,
                                 extensions=['nii', 'nii.gz'], **query))
        for dtype, query in queries.items()}

    # Special case: multi-echo BOLD, grouping echos
    if any(['_echo-' in bold for bold in subj_data['bold']]):
        subj_data['bold'] = group_multiecho(subj_data['bold'])

    return subj_data, layout
예제 #2
0
def test_dcm2bids():
    tmpBase = os.path.join(TEST_DATA_DIR, "tmp")
    #bidsDir = TemporaryDirectory(dir=tmpBase)
    bidsDir = TemporaryDirectory()

    tmpSubDir = os.path.join(bidsDir.name, DEFAULT.tmpDirName, "sub-01")
    shutil.copytree(
            os.path.join(TEST_DATA_DIR, "sidecars"),
            tmpSubDir)

    app = Dcm2bids(
            [TEST_DATA_DIR], "01",
            os.path.join(TEST_DATA_DIR, "config_test.json"),
            bidsDir.name
            )
    app.run()
    layout = BIDSLayout(bidsDir.name, validate=False)

    assert layout.get_subjects() == ["01"]
    assert layout.get_sessions() == []
    assert layout.get_tasks() == ["rest"]
    assert layout.get_runs() == [1,2,3]

    app = Dcm2bids(
            [TEST_DATA_DIR], "01",
            os.path.join(TEST_DATA_DIR, "config_test.json"),
            bidsDir.name
            )
    app.run()


    fmapFile = os.path.join(
            bidsDir.name, "sub-01", "fmap", "sub-01_echo-492_fmap.json")
    data = load_json(fmapFile)
    fmapMtime = os.stat(fmapFile).st_mtime
    assert data["IntendedFor"] == "dwi/sub-01_dwi.nii.gz"

    data = load_json(os.path.join(
        bidsDir.name, "sub-01", "localizer", "sub-01_run-01_localizer.json"))
    assert data["ProcedureStepDescription"] == "Modify by dcm2bids"

    #rerun
    shutil.rmtree(tmpSubDir)
    shutil.copytree(
            os.path.join(TEST_DATA_DIR, "sidecars"),
            tmpSubDir)

    app = Dcm2bids(
            [TEST_DATA_DIR], "01",
            os.path.join(TEST_DATA_DIR, "config_test.json"),
            bidsDir.name
            )
    app.run()

    fmapMtimeRerun = os.stat(fmapFile).st_mtime
    assert fmapMtime == fmapMtimeRerun

    bidsDir.cleanup()
예제 #3
0
import os
import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from nltools.data import Brain_Data, Adjacency
from nltools.mask import expand_mask, roi_to_brain
from nltools.stats import fdr, threshold, fisher_r_to_z, one_sample_permutation
from sklearn.metrics import pairwise_distances
from nilearn.plotting import plot_glass_brain, plot_stat_map, view_img_on_surf, view_img
from bids import BIDSLayout, BIDSValidator

data_dir = '../data/localizer'
layout = BIDSLayout(data_dir, derivatives=True)

# ### Single Subject Pattern Similarity
# Recall that in the Single Subject Model Lab that we ran single subject models for 10 different regressors for the Pinel Localizer task.  In this tutorial, we will use our results to learn how to conduct RSA style analyses.
#
# First, let's get a list of all of the subject IDs and load the beta values from each condition for a single subject into a `Brain_Data` object.

# In[77]:

sub = 'S01'

file_list = glob.glob(
    os.path.join(data_dir, 'derivatives', 'fmriprep', f'sub-{sub}', 'func',
                 '*denoised*.nii.gz'))
file_list = [x for x in file_list if 'betas' not in x]
file_list.sort()
예제 #4
0
파일: run.py 프로젝트: kjamison/smriprep
def build_workflow(opts, retval):
    """
    Create the Nipype Workflow that supports the whole execution graph, given the inputs.

    All the checks and the construction of the workflow are done
    inside this function that has pickleable inputs and output
    dictionary (``retval``) to allow isolation using a
    ``multiprocessing.Process`` that allows smriprep to enforce
    a hard-limited memory-scope.

    """
    from shutil import copyfile
    from os import cpu_count
    import uuid
    from time import strftime
    from subprocess import check_call, CalledProcessError, TimeoutExpired
    from pkg_resources import resource_filename as pkgrf

    import json
    from bids import BIDSLayout
    from nipype import logging, config as ncfg
    from niworkflows.utils.bids import collect_participants
    from ..__about__ import __version__
    from ..workflows.base import init_smriprep_wf

    logger = logging.getLogger('nipype.workflow')

    INIT_MSG = """
    Running sMRIPrep version {version}:
      * BIDS dataset path: {bids_dir}.
      * Participant list: {subject_list}.
      * Run identifier: {uuid}.

    {spaces}
    """.format

    # Set up some instrumental utilities
    run_uuid = '%s_%s' % (strftime('%Y%m%d-%H%M%S'), uuid.uuid4())

    # First check that bids_dir looks like a BIDS folder
    bids_dir = opts.bids_dir.resolve()
    layout = BIDSLayout(str(bids_dir), validate=False)
    subject_list = collect_participants(
        layout, participant_label=opts.participant_label)

    bids_filters = json.loads(
        opts.bids_filter_file.read_text()) if opts.bids_filter_file else None

    # Load base plugin_settings from file if --use-plugin
    if opts.use_plugin is not None:
        from yaml import load as loadyml
        with open(opts.use_plugin) as f:
            plugin_settings = loadyml(f)
        plugin_settings.setdefault('plugin_args', {})
    else:
        # Defaults
        plugin_settings = {
            'plugin': 'MultiProc',
            'plugin_args': {
                'raise_insufficient': False,
                'maxtasksperchild': 1,
            }
        }

    # Resource management options
    # Note that we're making strong assumptions about valid plugin args
    # This may need to be revisited if people try to use batch plugins
    nprocs = plugin_settings['plugin_args'].get('n_procs')
    # Permit overriding plugin config with specific CLI options
    if nprocs is None or opts.nprocs is not None:
        nprocs = opts.nprocs
        if nprocs is None or nprocs < 1:
            nprocs = cpu_count()
        plugin_settings['plugin_args']['n_procs'] = nprocs

    if opts.mem_gb:
        plugin_settings['plugin_args']['memory_gb'] = opts.mem_gb

    omp_nthreads = opts.omp_nthreads
    if omp_nthreads == 0:
        omp_nthreads = min(nprocs - 1 if nprocs > 1 else cpu_count(), 8)

    if 1 < nprocs < omp_nthreads:
        logger.warning(
            'Per-process threads (--omp-nthreads=%d) exceed total '
            'available CPUs (--nprocs/--ncpus=%d)', omp_nthreads, nprocs)

    # Set up directories
    output_dir = opts.output_dir.resolve()
    log_dir = output_dir / 'smriprep' / 'logs'
    work_dir = opts.work_dir.resolve()

    # Check and create output and working directories
    log_dir.mkdir(parents=True, exist_ok=True)
    work_dir.mkdir(parents=True, exist_ok=True)

    # Nipype config (logs and execution)
    ncfg.update_config({
        'logging': {
            'log_directory': str(log_dir),
            'log_to_file': True
        },
        'execution': {
            'crashdump_dir': str(log_dir),
            'crashfile_format': 'txt',
            'get_linked_libs': False,
            'stop_on_first_crash': opts.stop_on_first_crash,
        },
        'monitoring': {
            'enabled': opts.resource_monitor,
            'sample_frequency': '0.5',
            'summary_append': True,
        }
    })

    if opts.resource_monitor:
        ncfg.enable_resource_monitor()

    retval['return_code'] = 0
    retval['plugin_settings'] = plugin_settings
    retval['bids_dir'] = str(bids_dir)
    retval['output_dir'] = str(output_dir)
    retval['work_dir'] = str(work_dir)
    retval['subject_list'] = subject_list
    retval['run_uuid'] = run_uuid
    retval['workflow'] = None

    # Called with reports only
    if opts.reports_only:
        from niworkflows.reports import generate_reports

        logger.log(25, 'Running --reports-only on participants %s',
                   ', '.join(subject_list))
        if opts.run_uuid is not None:
            run_uuid = opts.run_uuid
        retval['return_code'] = generate_reports(subject_list,
                                                 str(output_dir),
                                                 run_uuid,
                                                 packagename="smriprep")
        return retval

    logger.log(
        25,
        INIT_MSG(version=__version__,
                 bids_dir=bids_dir,
                 subject_list=subject_list,
                 uuid=run_uuid,
                 spaces=opts.output_spaces))

    # Build main workflow
    retval['workflow'] = init_smriprep_wf(
        debug=opts.sloppy,
        fast_track=opts.fast_track,
        freesurfer=opts.run_reconall,
        fs_subjects_dir=opts.fs_subjects_dir,
        hires=opts.hires,
        layout=layout,
        longitudinal=opts.longitudinal,
        low_mem=opts.low_mem,
        omp_nthreads=omp_nthreads,
        output_dir=str(output_dir),
        run_uuid=run_uuid,
        skull_strip_fixed_seed=opts.skull_strip_fixed_seed,
        skull_strip_mode=opts.skull_strip_mode,
        skull_strip_template=opts.skull_strip_template[0],
        spaces=opts.output_spaces,
        subject_list=subject_list,
        work_dir=str(work_dir),
        bids_filters=bids_filters,
    )
    retval['return_code'] = 0

    boilerplate = retval['workflow'].visit_desc()
    (log_dir / 'CITATION.md').write_text(boilerplate)
    logger.log(
        25, 'Works derived from this sMRIPrep execution should '
        'include the following boilerplate:\n\n%s', boilerplate)

    # Generate HTML file resolving citations
    cmd = [
        'pandoc', '-s', '--bibliography',
        pkgrf('smriprep',
              'data/boilerplate.bib'), '--filter', 'pandoc-citeproc',
        '--metadata', 'pagetitle="sMRIPrep citation boilerplate"',
        str(log_dir / 'CITATION.md'), '-o',
        str(log_dir / 'CITATION.html')
    ]
    try:
        check_call(cmd, timeout=10)
    except (FileNotFoundError, CalledProcessError, TimeoutExpired):
        logger.warning('Could not generate CITATION.html file:\n%s',
                       ' '.join(cmd))

    # Generate LaTex file resolving citations
    cmd = [
        'pandoc', '-s', '--bibliography',
        pkgrf('smriprep', 'data/boilerplate.bib'), '--natbib',
        str(log_dir / 'CITATION.md'), '-o',
        str(log_dir / 'CITATION.tex')
    ]
    try:
        check_call(cmd, timeout=10)
    except (FileNotFoundError, CalledProcessError, TimeoutExpired):
        logger.warning('Could not generate CITATION.tex file:\n%s',
                       ' '.join(cmd))
    else:
        copyfile(pkgrf('smriprep', 'data/boilerplate.bib'),
                 str(log_dir / 'CITATION.bib'))
    return retval
예제 #5
0
        logtext(LOGFILE, ' '.join(dcm2bids_command))
        logtext(LOGFILE, str(subprocess.check_output(dcm2bids_command)))
     
        #delete temporary folder
        tmpBidsDir=os.path.join(niftidir,'tmp_dcm2bids')
        if cleanup:
            try:
                logtext(LOGFILE,'Cleaning up %s directory.' % tmpBidsDir)
                rmtree(tmpBidsDir)
            except OSError:
                logtext(LOGFILE, 'problem deleting tmp_dcm2bids directory due to OS error. Please delete manually')


        # perform deface
        createDatasetDescription(niftidir, "PROJECTNAME")
        layout = BIDSLayout(niftidir)
        T1w=layout.get(subject=subject, suffix='T1w', extension='nii.gz')
        for t1w in T1w:
            t1wpath=t1w.path
            deface_command = "pydeface --force {}".format(t1wpath).split()
            logtext(LOGFILE,"Executing command: " + " ".join(deface_command))
            logtext(LOGFILE,subprocess.check_output(deface_command))

        logtext (LOGFILE,"Get project BIDS bidsaction map")
        if os.path.exists(bidsactionfile):
            with open(bidsactionfile) as f:
            	action = json.load(f)

            try:
            	copyitems = action['copy']
            except KeyError:
예제 #6
0
def collect_data(
    bids_dir,
    participant_label,
    task=None,
    echo=None,
    bids_validate=True,
    bids_filters=None,
):
    """
    Uses pybids to retrieve the input data for a given participant

    Examples
    --------
    >>> bids_root, _ = collect_data(str(datadir / 'ds054'), '100185',
    ...                             bids_validate=False)
    >>> bids_root['fmap']  # doctest: +ELLIPSIS
    ['.../ds054/sub-100185/fmap/sub-100185_magnitude1.nii.gz', \
'.../ds054/sub-100185/fmap/sub-100185_magnitude2.nii.gz', \
'.../ds054/sub-100185/fmap/sub-100185_phasediff.nii.gz']
    >>> bids_root['bold']  # doctest: +ELLIPSIS
    ['.../ds054/sub-100185/func/sub-100185_task-machinegame_run-01_bold.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-02_bold.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-03_bold.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-04_bold.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-05_bold.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-06_bold.nii.gz']
    >>> bids_root['sbref']  # doctest: +ELLIPSIS
    ['.../ds054/sub-100185/func/sub-100185_task-machinegame_run-01_sbref.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-02_sbref.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-03_sbref.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-04_sbref.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-05_sbref.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-06_sbref.nii.gz']
    >>> bids_root['t1w']  # doctest: +ELLIPSIS
    ['.../ds054/sub-100185/anat/sub-100185_T1w.nii.gz']
    >>> bids_root['t2w']  # doctest: +ELLIPSIS
    []
    >>> bids_root, _ = collect_data(str(datadir / 'ds051'), '01',
    ...                             bids_validate=False, bids_filters={'t1w':{'run': 1}})
    >>> bids_root['t1w']  # doctest: +ELLIPSIS
    ['.../ds051/sub-01/anat/sub-01_run-01_T1w.nii.gz']

    """
    if isinstance(bids_dir, BIDSLayout):
        layout = bids_dir
    else:
        layout = BIDSLayout(str(bids_dir), validate=bids_validate)

    queries = {
        "fmap": {
            "datatype": "fmap"
        },
        "bold": {
            "datatype": "func",
            "suffix": "bold"
        },
        "sbref": {
            "datatype": "func",
            "suffix": "sbref"
        },
        "flair": {
            "datatype": "anat",
            "suffix": "FLAIR"
        },
        "t2w": {
            "datatype": "anat",
            "suffix": "T2w"
        },
        "t1w": {
            "datatype": "anat",
            "suffix": "T1w"
        },
        "roi": {
            "datatype": "anat",
            "suffix": "roi"
        },
    }
    bids_filters = bids_filters or {}
    for acq, entities in bids_filters.items():
        queries[acq].update(entities)

    if task:
        queries["bold"]["task"] = task

    if echo:
        queries["bold"]["echo"] = echo

    subj_data = {
        dtype: sorted(
            layout.get(
                return_type="file",
                subject=participant_label,
                extension=["nii", "nii.gz"],
                **query,
            ))
        for dtype, query in queries.items()
    }

    # Special case: multi-echo BOLD, grouping echos
    if any(["_echo-" in bold for bold in subj_data["bold"]]):
        subj_data["bold"] = group_multiecho(subj_data["bold"])

    return subj_data, layout
예제 #7
0
def run_conversion(raw_dir,
                   output_base_dir,
                   analysis_level,
                   info_out_dir,
                   participant_label,
                   session_label,
                   public_output,
                   use_new_ids,
                   ds_version,
                   info_list,
                   dataset_description,
                   new_id_lut_file=None,
                   bvecs_from_scanner_file=None,
                   tp6_raw_lut=None,
                   dry_run=False,
                   demo_file=None,
                   session_duration_min=120):
    # privacy settings
    private_str = "_PRIVATE" if not (public_output and use_new_ids) else ""
    output_dir = Path(
        output_base_dir) / f"LHAB_{ds_version}{private_str}" / "sourcedata"
    metainfo_dir = Path(
        output_base_dir) / f"LHAB_{ds_version}{private_str}" / "metainfo"
    metainfo_dir.mkdir(exist_ok=True, parents=True)

    output_dir.mkdir(parents=True, exist_ok=True)
    info_out_dir = Path(info_out_dir) / "PRIVATE"
    info_out_dir.mkdir(parents=True, exist_ok=True)

    if analysis_level == "participant":
        for old_subject_id in participant_label:
            submit_single_subject(
                old_subject_id,
                session_label,
                raw_dir,
                output_dir,
                info_list,
                info_out_dir,
                bvecs_from_scanner_file=bvecs_from_scanner_file,
                public_output=public_output,
                use_new_ids=use_new_ids,
                new_id_lut_file=new_id_lut_file,
                tp6_raw_lut=tp6_raw_lut,
                dry_run=dry_run,
                session_duration_min=session_duration_min)
        print("\n\n\n\nDONE.\nConverted %d subjects." % len(participant_label))
        print(participant_label)

    elif analysis_level == "group":
        ds_desc_file = output_dir / "dataset_description.json"
        if ds_desc_file.is_file():
            ds_desc_file.unlink()
        dataset_description["DataSetVersion"] = ds_version
        add_info_to_json(ds_desc_file, dataset_description, create_new=True)

        # Demos
        print("Exporting demos...")
        pwd = getpass.getpass("Enter the Password for dob file:")
        calc_demos(output_dir,
                   info_out_dir,
                   demo_file,
                   pwd,
                   new_id_lut_file=new_id_lut_file)

        # check for duplicates
        mappings = concat_tsvs(info_out_dir / "parrec_mapping_PRIVATE")
        dups = mappings[mappings.duplicated(subset="from")]
        assert len(dups) == 0, print("duplicates found", dups)

        # concat notconverted files
        unconv_df = concat_tsvs(info_out_dir / "unconverted_files")
        unconv_df.to_csv(info_out_dir / "unconverted_files.tsv",
                         sep="\t",
                         index=False)

        print("X" * 20 + "\nRuning BIDS validator")
        os.system(f"bids-validator {str(output_dir)}")

        print("\n Get BIDS layout")
        layout = BIDSLayout(output_dir)
        layout.to_df().to_csv(metainfo_dir / "layout.csv", index=False)

    else:
        raise RuntimeError(f"Analysis level unknown {analysis_level}")
예제 #8
0
def bidsmri2project(directory, args):
    #Parse dataset_description.json file in BIDS directory
    if (os.path.isdir(os.path.join(directory))):
        try:
            with open(os.path.join(directory,
                                   'dataset_description.json')) as data_file:
                dataset = json.load(data_file)
        except OSError:
            logging.critical(
                "Cannot find dataset_description.json file which is required in the BIDS spec"
            )
            exit("-1")
    else:
        logging.critical("Error: BIDS directory %s does not exist!" %
                         os.path.join(directory))
        exit("-1")

    #create project / nidm-exp doc
    project = Project()

    #add various attributes if they exist in BIDS dataset
    for key in dataset:
        #if key from dataset_description file is mapped to term in BIDS_Constants.py then add to NIDM object
        if key in BIDS_Constants.dataset_description:
            if type(dataset[key]) is list:
                project.add_attributes({
                    BIDS_Constants.dataset_description[key]:
                    "".join(dataset[key])
                })
            else:
                project.add_attributes(
                    {BIDS_Constants.dataset_description[key]: dataset[key]})
        #add absolute location of BIDS directory on disk for later finding of files which are stored relatively in NIDM document
        project.add_attributes({Constants.PROV['Location']: directory})

    #get BIDS layout
    bids_layout = BIDSLayout(directory)

    #create empty dictinary for sessions where key is subject id and used later to link scans to same session as demographics
    session = {}
    participant = {}
    #Parse participants.tsv file in BIDS directory and create study and acquisition objects
    if os.path.isfile(os.path.join(directory, 'participants.tsv')):
        with open(os.path.join(directory, 'participants.tsv')) as csvfile:
            participants_data = csv.DictReader(csvfile, delimiter='\t')

            #logic to map variables to terms.#########################################################################################################

            #first iterate over variables in dataframe and check which ones are already mapped as BIDS constants and which are not.  For those that are not
            #we want to use the variable-term mapping functions to help the user do the mapping
            #iterate over columns
            mapping_list = []
            column_to_terms = {}
            for field in participants_data.fieldnames:

                #column is not in BIDS_Constants
                if not (field in BIDS_Constants.participants):
                    #add column to list for column_to_terms mapping
                    mapping_list.append(field)

            #do variable-term mappings
            if ((args.json_map != False) or (args.key != None)):

                #if user didn't supply a json mapping file but we're doing some variable-term mapping create an empty one for column_to_terms to use
                if args.json_map == False:
                    #defaults to participants.json because here we're mapping the participants.tsv file variables to terms
                    # if participants.json file doesn't exist then run without json mapping file
                    if not os.path.isfile(
                            os.path.join(directory, 'participants.json')):
                        #maps variables in CSV file to terms
                        temp = DataFrame(columns=mapping_list)

                        column_to_terms, cde = map_variables_to_terms(
                            directory=directory,
                            assessment_name='participants.tsv',
                            df=temp,
                            apikey=args.key,
                            output_file=os.path.join(directory,
                                                     'participants.json'))
                    else:
                        #maps variables in CSV file to terms
                        temp = DataFrame(columns=mapping_list)
                        column_to_terms, cde = map_variables_to_terms(
                            directory=directory,
                            assessment_name='participants.tsv',
                            df=temp,
                            apikey=args.key,
                            output_file=os.path.join(directory,
                                                     'participants.json'),
                            json_file=os.path.join(directory,
                                                   'participants.json'))

                else:
                    #maps variables in CSV file to terms
                    temp = DataFrame(columns=mapping_list)
                    column_to_terms, cde = map_variables_to_terms(
                        directory=directory,
                        assessment_name='participants.tsv',
                        df=temp,
                        apikey=args.key,
                        output_file=os.path.join(directory,
                                                 'participants.json'),
                        json_file=args.json_map)

            for row in participants_data:
                #create session object for subject to be used for participant metadata and image data
                #parse subject id from "sub-XXXX" string
                temp = row['participant_id'].split("-")
                #for ambiguity in BIDS datasets.  Sometimes participant_id is sub-XXXX and othertimes it's just XXXX
                if len(temp) > 1:
                    subjid = temp[1]
                else:
                    subjid = temp[0]
                logging.info(subjid)
                session[subjid] = Session(project)

                #add acquisition object
                acq = AssessmentAcquisition(session=session[subjid])

                acq_entity = AssessmentObject(acquisition=acq)
                participant[subjid] = {}
                participant[subjid]['person'] = acq.add_person(
                    attributes=({
                        Constants.NIDM_SUBJECTID: row['participant_id']
                    }))

                #add qualified association of participant with acquisition activity
                acq.add_qualified_association(
                    person=participant[subjid]['person'],
                    role=Constants.NIDM_PARTICIPANT)
                print(acq)

                for key, value in row.items():
                    if not value:
                        continue
                    #for variables in participants.tsv file who have term mappings in BIDS_Constants.py use those, add to json_map so we don't have to map these if user
                    #supplied arguments to map variables
                    if key in BIDS_Constants.participants:

                        #if this was the participant_id, we already handled it above creating agent / qualified association
                        if not (BIDS_Constants.participants[key]
                                == Constants.NIDM_SUBJECTID):
                            acq_entity.add_attributes(
                                {BIDS_Constants.participants[key]: value})

                    #else if user added -mapvars flag to command line then we'll use the variable-> term mapping procedures to help user map variables to terms (also used
                    # in CSV2NIDM.py)
                    else:

                        # WIP: trying to add new support for CDEs...
                        add_attributes_with_cde(prov_object=acq_entity,
                                                cde=cde,
                                                row_variable=key,
                                                value=value)
                        # if key in column_to_terms:
                        #    acq_entity.add_attributes({QualifiedName(provNamespace(Core.safe_string(None,string=str(key)), column_to_terms[key]["url"]), ""):value})
                        #else:

                        #    acq_entity.add_attributes({Constants.BIDS[key.replace(" ", "_")]:value})

    #create acquisition objects for each scan for each subject

    #loop through all subjects in dataset
    for subject_id in bids_layout.get_subjects():
        logging.info("Converting subject: %s" % subject_id)
        #skip .git directories...added to support datalad datasets
        if subject_id.startswith("."):
            continue

        #check if there's a session number.  If so, store it in the session activity
        session_dirs = bids_layout.get(target='session',
                                       subject=subject_id,
                                       return_type='dir')
        #if session_dirs has entries then get any metadata about session and store in session activity

        #bids_layout.get(subject=subject_id,type='session',extensions='.tsv')
        #bids_layout.get(subject=subject_id,type='scans',extensions='.tsv')
        #bids_layout.get(extensions='.tsv',return_type='obj')

        #check whether sessions have been created (i.e. was there a participants.tsv file?  If not, create here
        if not (subject_id in session):
            session[subject_id] = Session(project)

        for file_tpl in bids_layout.get(subject=subject_id,
                                        extensions=['.nii', '.nii.gz']):
            #create an acquisition activity
            acq = MRAcquisition(session[subject_id])

            #check whether participant (i.e. agent) for this subject already exists (i.e. if participants.tsv file exists) else create one
            if not (subject_id in participant):
                participant[subject_id] = {}
                participant[subject_id]['person'] = acq.add_person(
                    attributes=({
                        Constants.NIDM_SUBJECTID: subject_id
                    }))

            #add qualified association with person
            acq.add_qualified_association(
                person=participant[subject_id]['person'],
                role=Constants.NIDM_PARTICIPANT)

            if file_tpl.entities['datatype'] == 'anat':
                #do something with anatomicals
                acq_obj = MRObject(acq)
                #add image contrast type
                if file_tpl.entities['suffix'] in BIDS_Constants.scans:
                    acq_obj.add_attributes({
                        Constants.NIDM_IMAGE_CONTRAST_TYPE:
                        BIDS_Constants.scans[file_tpl.entities['suffix']]
                    })
                else:
                    logging.info(
                        "WARNING: No matching image contrast type found in BIDS_Constants.py for %s"
                        % file_tpl.entities['suffix'])

                #add image usage type
                if file_tpl.entities['datatype'] in BIDS_Constants.scans:
                    acq_obj.add_attributes({
                        Constants.NIDM_IMAGE_USAGE_TYPE:
                        BIDS_Constants.scans[file_tpl.entities['datatype']]
                    })
                else:
                    logging.info(
                        "WARNING: No matching image usage type found in BIDS_Constants.py for %s"
                        % file_tpl.entities['datatype'])
                #add file link
                #make relative link to
                acq_obj.add_attributes({
                    Constants.NIDM_FILENAME:
                    getRelPathToBIDS(join(file_tpl.dirname, file_tpl.filename),
                                     directory)
                })
                #WIP: add absolute location of BIDS directory on disk for later finding of files
                acq_obj.add_attributes({Constants.PROV['Location']: directory})

                #add sha512 sum
                if isfile(join(directory, file_tpl.dirname,
                               file_tpl.filename)):
                    acq_obj.add_attributes({
                        Constants.CRYPTO_SHA512:
                        getsha512(
                            join(directory, file_tpl.dirname,
                                 file_tpl.filename))
                    })
                else:
                    logging.info(
                        "WARNINGL file %s doesn't exist! No SHA512 sum stored in NIDM files..."
                        % join(directory, file_tpl.dirname, file_tpl.filename))
                #get associated JSON file if exists
                #There is T1w.json file with information
                json_data = (bids_layout.get(
                    suffix=file_tpl.entities['suffix'],
                    subject=subject_id))[0].metadata
                if len(json_data.info) > 0:
                    for key in json_data.info.items():
                        if key in BIDS_Constants.json_keys:
                            if type(json_data.info[key]) is list:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key.replace(
                                        " ", "_")]:
                                    ''.join(
                                        str(e) for e in json_data.info[key])
                                })
                            else:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key.replace(
                                        " ", "_")]:
                                    json_data.info[key]
                                })

                #Parse T1w.json file in BIDS directory to add the attributes contained inside
                if (os.path.isdir(os.path.join(directory))):
                    try:
                        with open(os.path.join(directory,
                                               'T1w.json')) as data_file:
                            dataset = json.load(data_file)
                    except OSError:
                        logging.critical(
                            "Cannot find T1w.json file which is required in the BIDS spec"
                        )
                        exit("-1")
                else:
                    logging.critical(
                        "Error: BIDS directory %s does not exist!" %
                        os.path.join(directory))
                    exit("-1")

                #add various attributes if they exist in BIDS dataset
                for key in dataset:
                    #if key from T1w.json file is mapped to term in BIDS_Constants.py then add to NIDM object
                    if key in BIDS_Constants.json_keys:
                        if type(dataset[key]) is list:
                            acq_obj.add_attributes({
                                BIDS_Constants.json_keys[key]:
                                "".join(dataset[key])
                            })
                        else:
                            acq_obj.add_attributes(
                                {BIDS_Constants.json_keys[key]: dataset[key]})

            elif file_tpl.entities['datatype'] == 'func':
                #do something with functionals
                acq_obj = MRObject(acq)
                #add image contrast type
                if file_tpl.entities['suffix'] in BIDS_Constants.scans:
                    acq_obj.add_attributes({
                        Constants.NIDM_IMAGE_CONTRAST_TYPE:
                        BIDS_Constants.scans[file_tpl.entities['suffix']]
                    })
                else:
                    logging.info(
                        "WARNING: No matching image contrast type found in BIDS_Constants.py for %s"
                        % file_tpl.entities['suffix'])

                #add image usage type
                if file_tpl.entities['datatype'] in BIDS_Constants.scans:
                    acq_obj.add_attributes({
                        Constants.NIDM_IMAGE_USAGE_TYPE:
                        BIDS_Constants.scans[file_tpl.entities['datatype']]
                    })
                else:
                    logging.info(
                        "WARNING: No matching image usage type found in BIDS_Constants.py for %s"
                        % file_tpl.entities['datatype'])
                #make relative link to
                acq_obj.add_attributes({
                    Constants.NIDM_FILENAME:
                    getRelPathToBIDS(join(file_tpl.dirname, file_tpl.filename),
                                     directory)
                })
                #WIP: add absolute location of BIDS directory on disk for later finding of files
                acq_obj.add_attributes({Constants.PROV['Location']: directory})

                #add sha512 sum
                if isfile(join(directory, file_tpl.dirname,
                               file_tpl.filename)):
                    acq_obj.add_attributes({
                        Constants.CRYPTO_SHA512:
                        getsha512(
                            join(directory, file_tpl.dirname,
                                 file_tpl.filename))
                    })
                else:
                    logging.info(
                        "WARNINGL file %s doesn't exist! No SHA512 sum stored in NIDM files..."
                        % join(directory, file_tpl.dirname, file_tpl.filename))

                if 'run' in file_tpl.entities:
                    acq_obj.add_attributes({
                        BIDS_Constants.json_keys["run"]:
                        file_tpl.entities['run']
                    })

                #get associated JSON file if exists
                json_data = (bids_layout.get(
                    suffix=file_tpl.entities['suffix'],
                    subject=subject_id))[0].metadata

                if len(json_data.info) > 0:
                    for key in json_data.info.items():
                        if key in BIDS_Constants.json_keys:
                            if type(json_data.info[key]) is list:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key.replace(
                                        " ", "_")]:
                                    ''.join(
                                        str(e) for e in json_data.info[key])
                                })
                            else:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key.replace(
                                        " ", "_")]:
                                    json_data.info[key]
                                })
                #get associated events TSV file
                if 'run' in file_tpl.entities:
                    events_file = bids_layout.get(
                        subject=subject_id,
                        extensions=['.tsv'],
                        modality=file_tpl.entities['datatype'],
                        task=file_tpl.entities['task'],
                        run=file_tpl.entities['run'])
                else:
                    events_file = bids_layout.get(
                        subject=subject_id,
                        extensions=['.tsv'],
                        modality=file_tpl.entities['datatype'],
                        task=file_tpl.entities['task'])
                #if there is an events file then this is task-based so create an acquisition object for the task file and link
                if events_file:
                    #for now create acquisition object and link it to the associated scan
                    events_obj = AcquisitionObject(acq)
                    #add prov type, task name as prov:label, and link to filename of events file

                    events_obj.add_attributes({
                        PROV_TYPE:
                        Constants.NIDM_MRI_BOLD_EVENTS,
                        BIDS_Constants.json_keys["TaskName"]:
                        json_data["TaskName"],
                        Constants.NIDM_FILENAME:
                        getRelPathToBIDS(events_file[0].filename, directory)
                    })
                    #link it to appropriate MR acquisition entity
                    events_obj.wasAttributedTo(acq_obj)

                #Parse task-rest_bold.json file in BIDS directory to add the attributes contained inside
                if (os.path.isdir(os.path.join(directory))):
                    try:
                        with open(
                                os.path.join(
                                    directory,
                                    'task-rest_bold.json')) as data_file:
                            dataset = json.load(data_file)
                    except OSError:
                        logging.critical(
                            "Cannot find task-rest_bold.json file which is required in the BIDS spec"
                        )
                        exit("-1")
                else:
                    logging.critical(
                        "Error: BIDS directory %s does not exist!" %
                        os.path.join(directory))
                    exit("-1")

                #add various attributes if they exist in BIDS dataset
                for key in dataset:
                    #if key from task-rest_bold.json file is mapped to term in BIDS_Constants.py then add to NIDM object
                    if key in BIDS_Constants.json_keys:
                        if type(dataset[key]) is list:
                            acq_obj.add_attributes({
                                BIDS_Constants.json_keys[key]:
                                ",".join(map(str, dataset[key]))
                            })
                        else:
                            acq_obj.add_attributes(
                                {BIDS_Constants.json_keys[key]: dataset[key]})

            elif file_tpl.entities['datatype'] == 'dwi':
                #do stuff with with dwi scans...
                acq_obj = MRObject(acq)
                #add image contrast type
                if file_tpl.entities['suffix'] in BIDS_Constants.scans:
                    acq_obj.add_attributes({
                        Constants.NIDM_IMAGE_CONTRAST_TYPE:
                        BIDS_Constants.scans[file_tpl.entities['suffix']]
                    })
                else:
                    logging.info(
                        "WARNING: No matching image contrast type found in BIDS_Constants.py for %s"
                        % file_tpl.entities['suffix'])

                #add image usage type
                if file_tpl.entities['datatype'] in BIDS_Constants.scans:
                    acq_obj.add_attributes({
                        Constants.NIDM_IMAGE_USAGE_TYPE:
                        BIDS_Constants.scans["dti"]
                    })
                else:
                    logging.info(
                        "WARNING: No matching image usage type found in BIDS_Constants.py for %s"
                        % file_tpl.entities['datatype'])
                #make relative link to
                acq_obj.add_attributes({
                    Constants.NIDM_FILENAME:
                    getRelPathToBIDS(join(file_tpl.dirname, file_tpl.filename),
                                     directory)
                })
                #add sha512 sum
                if isfile(join(directory, file_tpl.dirname,
                               file_tpl.filename)):
                    acq_obj.add_attributes({
                        Constants.CRYPTO_SHA512:
                        getsha512(
                            join(directory, file_tpl.dirname,
                                 file_tpl.filename))
                    })
                else:
                    logging.info(
                        "WARNINGL file %s doesn't exist! No SHA512 sum stored in NIDM files..."
                        % join(directory, file_tpl.dirname, file_tpl.filename))

                if 'run' in file_tpl._fields:
                    acq_obj.add_attributes(
                        {BIDS_Constants.json_keys["run"]: file_tpl.run})

                #get associated JSON file if exists
                json_data = (bids_layout.get(
                    suffix=file_tpl.entities['suffix'],
                    subject=subject_id))[0].metadata

                if len(json_data.info) > 0:
                    for key in json_data.info.items():
                        if key in BIDS_Constants.json_keys:
                            if type(json_data.info[key]) is list:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key.replace(
                                        " ", "_")]:
                                    ''.join(
                                        str(e) for e in json_data.info[key])
                                })
                            else:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key.replace(
                                        " ", "_")]:
                                    json_data.info[key]
                                })
                #for bval and bvec files, what to do with those?

                #for now, create new generic acquisition objects, link the files, and associate with the one for the DWI scan?
                acq_obj_bval = AcquisitionObject(acq)
                acq_obj_bval.add_attributes(
                    {PROV_TYPE: BIDS_Constants.scans["bval"]})
                #add file link to bval files
                acq_obj_bval.add_attributes({
                    Constants.NIDM_FILENAME:
                    getRelPathToBIDS(
                        join(file_tpl.dirname,
                             bids_layout.get_bval(file_tpl.filename)),
                        directory)
                })
                #WIP: add absolute location of BIDS directory on disk for later finding of files
                acq_obj_bval.add_attributes(
                    {Constants.PROV['Location']: directory})

                #add sha512 sum
                if isfile(join(directory, file_tpl.dirname,
                               file_tpl.filename)):
                    acq_obj_bval.add_attributes({
                        Constants.CRYPTO_SHA512:
                        getsha512(
                            join(directory, file_tpl.dirname,
                                 file_tpl.filename))
                    })
                else:
                    logging.info(
                        "WARNINGL file %s doesn't exist! No SHA512 sum stored in NIDM files..."
                        % join(directory, file_tpl.dirname, file_tpl.filename))
                acq_obj_bvec = AcquisitionObject(acq)
                acq_obj_bvec.add_attributes(
                    {PROV_TYPE: BIDS_Constants.scans["bvec"]})
                #add file link to bvec files
                acq_obj_bvec.add_attributes({
                    Constants.NIDM_FILENAME:
                    getRelPathToBIDS(
                        join(file_tpl.dirname,
                             bids_layout.get_bvec(file_tpl.filename)),
                        directory)
                })
                #WIP: add absolute location of BIDS directory on disk for later finding of files
                acq_obj_bvec.add_attributes(
                    {Constants.PROV['Location']: directory})

                if isfile(join(directory, file_tpl.dirname,
                               file_tpl.filename)):
                    #add sha512 sum
                    acq_obj_bvec.add_attributes({
                        Constants.CRYPTO_SHA512:
                        getsha512(
                            join(directory, file_tpl.dirname,
                                 file_tpl.filename))
                    })
                else:
                    logging.info(
                        "WARNINGL file %s doesn't exist! No SHA512 sum stored in NIDM files..."
                        % join(directory, file_tpl.dirname, file_tpl.filename))

                #link bval and bvec acquisition object entities together or is their association with DWI scan...

        #Added temporarily to support phenotype files
        #for each *.tsv / *.json file pair in the phenotypes directory
        #WIP: ADD VARIABLE -> TERM MAPPING HERE
        for tsv_file in glob.glob(os.path.join(directory, "phenotype",
                                               "*.tsv")):
            #for now, open the TSV file, extract the row for this subject, store it in an acquisition object and link to
            #the associated JSON data dictionary file
            with open(tsv_file) as phenofile:
                pheno_data = csv.DictReader(phenofile, delimiter='\t')
                for row in pheno_data:
                    subjid = row['participant_id'].split("-")
                    if not subjid[1] == subject_id:
                        continue
                    else:
                        #add acquisition object
                        acq = AssessmentAcquisition(session=session[subjid[1]])
                        #add qualified association with person
                        acq.add_qualified_association(
                            person=participant[subject_id]['person'],
                            role=Constants.NIDM_PARTICIPANT)

                        acq_entity = AssessmentObject(acquisition=acq)

                        for key, value in row.items():
                            if not value:
                                continue
                            #we're using participant_id in NIDM in agent so don't add to assessment as a triple.
                            #BIDS phenotype files seem to have an index column with no column header variable name so skip those
                            if ((not key == "participant_id") and (key != "")):
                                #for now we're using a placeholder namespace for BIDS and simply the variable names as the concept IDs..
                                acq_entity.add_attributes(
                                    {Constants.BIDS[key]: value})

                        #link TSV file
                        acq_entity.add_attributes({
                            Constants.NIDM_FILENAME:
                            getRelPathToBIDS(tsv_file, directory)
                        })
                        #WIP: add absolute location of BIDS directory on disk for later finding of files
                        acq_entity.add_attributes(
                            {Constants.PROV['Location']: directory})

                        #link associated JSON file if it exists
                        data_dict = os.path.join(
                            directory, "phenotype",
                            os.path.splitext(os.path.basename(tsv_file))[0] +
                            ".json")
                        if os.path.isfile(data_dict):
                            acq_entity.add_attributes({
                                Constants.BIDS["data_dictionary"]:
                                getRelPathToBIDS(data_dict, directory)
                            })

    return project, cde
예제 #9
0
def _load_bids(ctx, bids_dir):
    layout = BIDSLayout(bids_dir, absolute_paths=True)
    other_entities = ["datatype", "extension", "suffix"]

    # load

    anatfiles = layout.get(return_type="filename",
                           datatype="anat",
                           suffix="T1w")
    anat_pattern_set = _get_pattern_set(
        layout,
        anatfiles,
        ["subject"],
        other_entities,
        [],
    )
    _load_part(ctx, BIDSAnatTagsSchema(), anat_pattern_set)

    funcfiles = layout.get(return_type="filename",
                           datatype="func",
                           suffix="bold")
    func_pattern_set = _get_pattern_set(
        layout,
        funcfiles,
        ["subject", "session", "run", "task"],
        other_entities,
        ["RepetitionTime", "PhaseEncodingDirection", "EffectiveEchoSpacing"],
    )
    _load_part(ctx, BIDSBoldTagsSchema(), func_pattern_set)

    eventfiles = layout.get(return_type="filename",
                            datatype="func",
                            suffix="events",
                            extension="tsv")
    events_pattern_set = _get_pattern_set(
        layout,
        eventfiles,
        ["subject", "session", "run", "task"],
        other_entities,
        [],
    )
    _load_part(ctx, BIDSEventsTagsSchema(), events_pattern_set)

    fmap_pattern_set = _get_fmap_pattern_set(
        layout,
        funcfiles,
        ["subject", "session", "run", "task"],
        other_entities,
        ["PhaseEncodingDirection", "EchoTimeDifference", "EchoTime"],
    )
    _load_part(ctx, BIDSFmapTagsSchema(), fmap_pattern_set)

    participantsfilepath = op.join(bids_dir, "participants.tsv")
    if op.isfile(participantsfilepath):
        ctx.spreadsheet_file = participantsfilepath

    # validate

    funcfiles = ctx.database.get(
        datatype="func",
        suffix="bold")  # only validate files that made it to the database
    for funcfile in funcfiles:
        bidsfmapset = set()
        fmapslist = layout.get_fieldmap(funcfile, return_list=True)
        for fmaps in fmapslist:
            for fmapfile in fmaps.values():
                if not op.isfile(fmapfile):
                    continue
                bidsfmapset.add(fmapfile)
        fmapfiles = ctx.database.get_associations(funcfile, datatype="fmap")
        if fmapfiles is not None:
            specfmapset = set(fmapfiles)
            assert bidsfmapset == specfmapset, "Inconsistent FieldMap specification"
예제 #10
0
def main(bids_dir):
    """ Extract CMP3 connectome in a bids dataset and create PDF report"""

    # Read BIDS dataset
    try:
        bids_layout = BIDSLayout(bids_dir)
        print("BIDS: %s" % bids_layout)

        subjects = []
        for subj in bids_layout.get_subjects():
            subjects.append('sub-' + str(subj))

        print("Available subjects : ")
        print(subjects)

    except Exception:
        print(
            "BIDS ERROR: Invalid BIDS dataset. Please see documentation for more details."
        )
        sys.exit(1)

    c = canvas.Canvas(os.path.join(bids_dir, 'derivatives', __cmp_directory__,
                                   'report.pdf'),
                      pagesize=A4)
    width, height = A4

    print("Page size : %s x %s" % (width, height))

    startY = 841.89 - 50

    c.drawString(245, startY, 'Report')
    c.drawString(10, startY - 20, 'BIDS : %s ' % bids_dir)

    offset = 0
    for subj in bids_layout.get_subjects():
        print("Processing %s..." % subj)

        sessions = bids_layout.get(target='session',
                                   return_type='id',
                                   subject=subj)
        if len(sessions) > 0:
            print("Warning: multiple sessions")
            for ses in sessions:
                gpickle_fn = os.path.join(
                    bids_dir, 'derivatives', __cmp_directory__,
                    'sub-' + str(subj), 'ses-' + str(ses), 'dwi',
                    'sub-%s_ses-%s_label-L2008_res-scale1_conndata-snetwork_connectivity.gpickle'
                    % (str(subj), str(ses)))
                if os.path.isfile(gpickle_fn):
                    # c.drawString(10,20+offset,'Subject: %s / Session: %s '%(str(subj),str(sess)))
                    G = nx.read_gpickle(gpickle_fn)
                    con_metric = 'number_of_fibers'
                    con = nx.to_numpy_matrix(G,
                                             weight=con_metric,
                                             dtype=np.float64)

                    fig = figure(figsize=(8, 8))
                    suptitle('Subject: %s / Session: %s ' %
                             (str(subj), str(ses)),
                             fontsize=11)
                    title('Connectivity metric: %s' % con_metric, fontsize=10)
                    # copy the default cmap (0,0,0.5156)
                    my_cmap = copy.copy(cm.get_cmap('inferno'))
                    my_cmap.set_bad((0, 0, 0))
                    imshow(con,
                           interpolation='nearest',
                           norm=colors.LogNorm(),
                           cmap=my_cmap)
                    colorbar()

                    imgdata = io.StringIO()
                    fig.savefig(imgdata, format='png')
                    imgdata.seek(0)  # rewind the data

                    Image = ImageReader(imgdata)
                    posY = startY - 20 - 4.5 * inch - offset
                    c.drawImage(Image, 10, posY, 4 * inch, 4 * inch)

                    offset += 4.5 * inch
                    if posY - offset < 0:
                        c.showPage()
                        offset = 0

        else:
            print("No session")
            gpickle_fn = os.path.join(
                bids_dir, 'derivatives', __cmp_directory__, 'sub-' + str(subj),
                'connectivity',
                'sub-%s_label-L2008_res-scale1_conndata-snetwork_connectivity.gpickle'
                % (str(subj)))
            if os.path.isfile(gpickle_fn):
                # c.drawString(10,20+offset,'Subject : %s '%str(subj))
                G = nx.read_gpickle(gpickle_fn)
                con_metric = 'number_of_fibers'
                con = nx.to_numpy_matrix(G,
                                         weight=con_metric,
                                         dtype=np.float64)

                fig = figure(figsize=(8, 8))
                suptitle('Subject: %s ' % (str(subj)), fontsize=11)
                title('Connectivity metric: %s' % con_metric, fontsize=10)
                # copy the default cmap (0,0,0.5156)
                my_cmap = copy.copy(cm.get_cmap('inferno'))
                my_cmap.set_bad((0, 0, 0))
                imshow(con,
                       interpolation='nearest',
                       norm=colors.LogNorm(),
                       cmap=my_cmap)
                colorbar()

                imgdata = io.StringIO()
                fig.savefig(imgdata, format='png')
                imgdata.seek(0)  # rewind the data

                Image = ImageReader(imgdata)
                posY = startY - 20 - 4.5 * inch - offset
                c.drawImage(Image, 10, posY, 4 * inch, 4 * inch)

                offset += 4.5 * inch
                if posY - offset < 0:
                    c.showPage()
                    offset = 0

    c.save()
예제 #11
0
    def _resolve_bids(self, fileobj: File) -> list[File]:

        # load using pybids
        validate = False  # save time
        layout = BIDSLayout(
            root=fileobj.path,
            reset_database=True,  # force reindex in case files have changed
            absolute_paths=True,
            validate=validate,
            indexer=BIDSLayoutIndexer(
                validate=validate,
                index_metadata=False,  # save time
            ),
        )

        # load override metadata
        basemetadata = dict()
        if hasattr(fileobj, "metadata"):
            metadata = getattr(fileobj, "metadata", None)
            if isinstance(metadata, dict):
                basemetadata.update(metadata)

        resolved_files: list[File] = []
        for obj in layout.get_files().values():
            file: File | None = to_fileobj(obj, basemetadata)

            if file is None:
                continue

            self.fileobj_by_filepaths[file.path] = file
            self.specfileobj_by_filepaths[file.path] = file
            resolved_files.append(file)

        intended_for: dict[str, frozenset[tuple[str, str]]] = dict()
        for file in resolved_files:
            if file.datatype != "fmap":
                continue

            metadata = SidecarMetadataLoader.load(file.path)
            if metadata is None:
                continue

            intended_for_paths = metadata.get("intended_for")
            if intended_for_paths is None:
                continue

            linked_fmap_tags = frozenset(file.tags.items())
            for intended_for_path in intended_for_paths:
                intended_for[intended_for_path] = linked_fmap_tags

        informed_by: dict[frozenset[tuple[str, str]],
                          list[frozenset[tuple[str, str]]]] = defaultdict(list)
        for file in resolved_files:
            file_tags = frozenset(file.tags.items())

            for file_path, linked_fmap_tags in intended_for.items():
                if file.path.endswith(file_path):  # slow performance
                    informed_by[file_tags].append(linked_fmap_tags)

        mappings: set[tuple[tuple[str, str], tuple[str, str]]] = set()
        for func_tags, linked_fmap_tags_list in informed_by.items():
            for linked_fmap_tags in linked_fmap_tags_list:
                for func_tag, linked_fmap_tag in product(
                        func_tags, linked_fmap_tags):
                    if func_tag[0] == "sub" or linked_fmap_tag[0] == "sub":
                        continue
                    if (func_tag[0] == linked_fmap_tag[0]
                        ):  # only map between different entities
                        continue
                    mappings.add((func_tag, linked_fmap_tag))

        intended_for_rules = defaultdict(list)
        for functag, fmaptag in mappings:
            entity, val = functag
            funcstr = f"{entity}.{val}"

            entity, val = fmaptag
            fmapstr = f"{entity}.{val}"

            intended_for_rules[fmapstr].append(funcstr)

        if len(intended_for) > 0:
            logger.info(
                "Inferred mapping between func and fmap files to be %s",
                pformat(intended_for_rules),
            )
            for file in resolved_files:
                if file.datatype != "fmap":
                    continue
                file.intended_for = intended_for_rules

        return resolved_files
예제 #12
0
파일: settings.py 프로젝트: timonmerk/icn
f_ranges = [[4, 8], [8, 12], [13, 20], [20, 35], [13, 35], [60, 80], [90, 200],
            [60, 200]]
z_score_running_interval = 10000  # used for "online" z-scoring to setup running interval in which data is z-scored
clip_low = -3  # data is clipped after t-f transformation
clip_high = 3
int_distance_ecog = 20  # distance in which channels are interpolated to a given grid point
int_distance_stn = 10

#  Filter parameters
line_noise = 60
ripple_db = 60.0

#  rolling variance
var_rolling_window = 5  # ms given the sample rate

num_patients = len(BIDSLayout(BIDS_path).get_subjects())


class Settings:
    @staticmethod
    def get_DBS_patients(subject_path):
        """

        :param subject_path:
        :return: list with DBS folders in the given subject_path
        """
        list_DBS_folder = [i for i in os.listdir(subject_path) \
                           if i.startswith('DBS') and \
                           len([file for file in os.listdir(os.path.join(subject_path, i)) \
                                if file.startswith('stream_')]) != 0]
        return list_DBS_folder
예제 #13
0
parser.add_argument('analysis_level', help='Level of the analysis that will be performed. '
                    'Multiple participant level analyses can be run independently '
                    '(in parallel) using the same output_dir.',
                    choices=['participant', 'group'])
parser.add_argument("model",help='json description of model')
parser.add_argument('--participant_label', help='The label(s) of the participant(s) that should be analyzed. The label '
                   'corresponds to sub-<participant_label> from the BIDS spec '
                   '(so it does not include "sub-"). If this parameter is not '
                   'provided all subjects should be analyzed. Multiple '
                   'participants can be specified with a space separated list.',
                   nargs="+")
#parser.add_argument("method",help='estimation method')
args = parser.parse_args()


layout = BIDSLayout(args.bids_dir, validate=False)
derivatives = args.output_dir
#layout = BIDSLayout('data', validate=False)

# load model
with open(args.model) as f:
    model_json = json.load(f)

estimation_method = model_json['HDDMmodel']['estimation']['method']



df = []
for fn in layout.get(suffix='events', return_type='file'):
    df.append(pd.read_table(fn))
    df[-1]['subj_idx'] = layout.get_file(fn).subject
예제 #14
0
파일: bids.py 프로젝트: cmaumet/fitlins
def collect_participants(bids_dir, participant_label=None, strict=False):
    """
    List the participants under the BIDS root and checks that participants
    designated with the participant_label argument exist in that folder.

    Returns the list of participants to be finally processed.

    Requesting all subjects in a BIDS directory root:

    >>> collect_participants('ds114')
    ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10']

    Requesting two subjects, given their IDs:

    >>> collect_participants('ds114', participant_label=['02', '04'])
    ['02', '04']

    Requesting two subjects, given their IDs (works with 'sub-' prefixes):

    >>> collect_participants('ds114', participant_label=['sub-02', 'sub-04'])
    ['02', '04']

    Requesting two subjects, but one does not exist:

    >>> collect_participants('ds114', participant_label=['02', '14'])
    ['02']

    >>> collect_participants('ds114', participant_label=['02', '14'],
    ...                      strict=True)  # doctest: +IGNORE_EXCEPTION_DETAIL
    Traceback (most recent call last):
    fmriprep.utils.bids.BIDSError:
    ...


    """
    layout = BIDSLayout(bids_dir)
    all_participants = layout.get_subjects()

    # Error: bids_dir does not contain subjects
    if not all_participants:
        raise BIDSError(
            'Could not find participants. Please make sure the BIDS data '
            'structure is present and correct. Datasets can be validated online '
            'using the BIDS Validator (http://incf.github.io/bids-validator/).\n'
            'If you are using Docker for Mac or Docker for Windows, you '
            'may need to adjust your "File sharing" preferences.', bids_dir)

    # No --participant-label was set, return all
    if not participant_label:
        return all_participants

    # Drop sub- prefixes
    participant_label = [
        sub[4:] if sub.startswith('sub-') else sub for sub in participant_label
    ]

    found_label = layout.get_subjects(subject=participant_label)

    if not found_label:
        raise BIDSError(
            'Could not find participants [{}]'.format(
                ', '.join(participant_label)), bids_dir)

    # Warn if some IDs were not found
    notfound_label = sorted(set(participant_label) - set(found_label))
    if notfound_label:
        exc = BIDSError(
            'Some participants were not found: {}'.format(
                ', '.join(notfound_label)), bids_dir)
        if strict:
            raise exc
        warnings.warn(exc.msg, BIDSWarning)

    return found_label
예제 #15
0
import mne
import pandas as pd
import numpy as np
import os
from os.path import join as opj
import matplotlib.pyplot as plt
from bids import BIDSLayout
from mne.time_frequency import read_tfrs
import ptitprince as pt
import seaborn as sns
from mne.viz import plot_topomap

###############################
# Parameters
##############################
layout = BIDSLayout('/data/source')
part = ['sub-' + s for s in layout.get_subject()]

# Remove stupid pandas warning
pd.options.mode.chained_assignment = None  # default='warn'

# Outpath for analysis
outpath = '/data/derivatives/statistics/tfr_modelfree_anova'
# Outpath for figures
outfigpath = '/data/derivatives/figures/tfr_modelfree_anova'

if not os.path.exists(outpath):
    os.mkdir(outpath)
if not os.path.exists(outfigpath):
    os.mkdir(outfigpath)
예제 #16
0
def collect_data(bids_dir, participant_label, task=None, echo=None,
                 bids_validate=True, bids_filters=None):
    """
    Uses pybids to retrieve the input data for a given participant
    >>> bids_root, _ = collect_data(str(datadir / 'ds054'), '100185',
    ...                             bids_validate=False)
    >>> bids_root['fmap']  # doctest: +ELLIPSIS
    ['.../ds054/sub-100185/fmap/sub-100185_magnitude1.nii.gz', \
'.../ds054/sub-100185/fmap/sub-100185_magnitude2.nii.gz', \
'.../ds054/sub-100185/fmap/sub-100185_phasediff.nii.gz']
    >>> bids_root['bold']  # doctest: +ELLIPSIS
    ['.../ds054/sub-100185/func/sub-100185_task-machinegame_run-01_bold.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-02_bold.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-03_bold.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-04_bold.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-05_bold.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-06_bold.nii.gz']
    >>> bids_root['sbref']  # doctest: +ELLIPSIS
    ['.../ds054/sub-100185/func/sub-100185_task-machinegame_run-01_sbref.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-02_sbref.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-03_sbref.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-04_sbref.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-05_sbref.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-06_sbref.nii.gz']
    >>> bids_root['t1w']  # doctest: +ELLIPSIS
    ['.../ds054/sub-100185/anat/sub-100185_T1w.nii.gz']
    >>> bids_root['t2w']  # doctest: +ELLIPSIS
    []
    >>> bids_root, _ = collect_data(str(datadir / 'ds051'), '01',
    ...                             bids_validate=False, bids_filters={'t1w':{'run': 1}})
    >>> bids_root['t1w']  # doctest: +ELLIPSIS
    ['.../ds051/sub-01/anat/sub-01_run-01_T1w.nii.gz']
    """
    if isinstance(bids_dir, BIDSLayout):
        layout = bids_dir
    else:
        layout = BIDSLayout(str(bids_dir), validate=bids_validate)

    queries = {
        'fmap': {'datatype': 'fmap'},
        'bold': {'datatype': 'func', 'suffix': 'bold'},
        'sbref': {'datatype': 'func', 'suffix': 'sbref'},
        'flair': {'datatype': 'anat', 'suffix': 'FLAIR'},
        't2w': {'datatype': 'anat', 'suffix': 'T2w'},
        't1w': {'datatype': 'anat', 'suffix': 'T1w'},
        'roi': {'datatype': 'anat', 'suffix': 'roi'},
    }
    bids_filters = bids_filters or {}
    for acq, entities in bids_filters.items():
        queries[acq].update(entities)

    if task:
        queries['bold']['task'] = task

    if echo:
        queries['bold']['echo'] = echo

    subj_data = {
        dtype: sorted(layout.get(return_type='file', subject=participant_label,
                                 extension=['nii', 'nii.gz'], **query))
        for dtype, query in queries.items()}

    # Special case: multi-echo BOLD, grouping echos
    if any(['_echo-' in bold for bold in subj_data['bold']]):
        subj_data['bold'] = group_multiecho(subj_data['bold'])

    return subj_data, layout
예제 #17
0
def main(subject, sourcedata, derivatives):
    source_layout = BIDSLayout(sourcedata, validate=False, derivatives=False)
    fmriprep_layout = BIDSLayout(op.join(derivatives, 'fmriprep'),
                                 validate=False)

    bold = fmriprep_layout.get(
        subject=subject,
        suffix='bold',
        description='preproc',
        extension='nii.gz',
    )
    bold = sorted([e for e in bold if 'MNI' in e.filename],
                  key=lambda x: x.run)

    reg = re.compile('.*_space-(?P<space>.+)_desc.*')

    fmriprep_layout_df = fmriprep_layout.to_df()
    fmriprep_layout_df = fmriprep_layout_df[~fmriprep_layout_df.subject.isnull(
    )]
    fmriprep_layout_df['subject'] = fmriprep_layout_df.subject.astype(int)
    fmriprep_layout_df = fmriprep_layout_df[np.in1d(
        fmriprep_layout_df.suffix, ['bold', 'regressors', 'mask'])]
    fmriprep_layout_df = fmriprep_layout_df[np.in1d(
        fmriprep_layout_df.extension, ['nii.gz', 'tsv'])]
    fmriprep_layout_df['space'] = fmriprep_layout_df.path.apply(
        lambda path: reg.match(path).group(1) if reg.match(path) else None)
    fmriprep_layout_df = fmriprep_layout_df.set_index(
        ['subject', 'run', 'suffix', 'space'])

    events_df = source_layout.to_df()
    events_df = events_df[events_df.suffix == 'events']
    events_df['subject'] = events_df['subject'].astype(int)
    events_df = events_df.set_index(['subject', 'run'])

    tr = source_layout.get_tr(bold[0].path)

    for b in bold:
        run = b.entities['run']
        print(run)

        confounds_ = fmriprep_layout_df.loc[(subject, run, 'regressors'),
                                            'path'].iloc[0]
        confounds_ = pd.read_csv(confounds_, sep='\t')
        confounds_ = confounds_[to_include].fillna(method='bfill')

        events_ = events_df.loc[(subject, run), 'path']
        events_ = pd.read_csv(events_, sep='\t')
        events_['trial_type'] = events_['trial_type'].apply(
            lambda x: 'stim2' if x.startswith('stim2') else x)

        model = FirstLevelModel(tr,
                                drift_model=None,
                                n_jobs=5,
                                smoothing_fwhm=4.0)
        pca = PCA(n_components=7)

        confounds_ -= confounds_.mean(0)
        confounds_ /= confounds_.std(0)
        confounds_pca = pca.fit_transform(confounds_[to_include])

        events_['onset'] += tr

        model.fit(b.path, events_, confounds_pca)

        base_dir = op.join(derivatives, 'glm_stim1', f'sub-{subject}', 'func')

        if not op.exists(base_dir):
            os.makedirs(base_dir)

        # PE
        ims = []
        for stim in 5, 7, 10, 14, 20, 28:
            im = model.compute_contrast(f'stim1-{stim}',
                                        output_type='effect_size')
            ims.append(im)
        ims = image.concat_imgs(ims)
        ims.to_filename(
            op.join(base_dir,
                    f'sub-{subject}_run-{run}_desc-stims1_pe.nii.gz'))

        # zmap
        ims = []
        for stim in 5, 7, 10, 14, 20, 28:
            im = model.compute_contrast(f'stim1-{stim}', output_type='z_score')
            ims.append(im)
        ims = image.concat_imgs(ims)
        ims.to_filename(
            op.join(base_dir,
                    f'sub-{subject}_run-{run}_desc-stims1_zmap.nii.gz'))
예제 #18
0
def main(subject, sourcedata, derivatives, smoothed, n_jobs=5):

    os.environ['SUBJECTS_DIR'] = op.join(derivatives, 'freesurfer')

    source_layout = BIDSLayout(sourcedata, validate=False, derivatives=False)

    fmriprep_layout = BIDSLayout(op.join(derivatives, 'fmriprep'),
                                 validate=False)

    if smoothed:
        bold_layout = BIDSLayout(op.join(derivatives, 'smoothed'),
                                 validate=False)
        bold = bold_layout.get(subject=subject, extension='func.gii')
    else:
        bold = fmriprep_layout.get(subject=subject, extension='func.gii')

        bold = sorted([e for e in bold if 'fsaverage6' in e.filename],
                      key=lambda x: x.run)

    fmriprep_layout_df = fmriprep_layout.to_df()
    fmriprep_layout_df = fmriprep_layout_df[~fmriprep_layout_df.subject.isnull(
    )]
    fmriprep_layout_df['subject'] = fmriprep_layout_df.subject.astype(int)
    fmriprep_layout_df = fmriprep_layout_df[np.in1d(fmriprep_layout_df.suffix,
                                                    ['regressors'])]
    fmriprep_layout_df = fmriprep_layout_df[np.in1d(
        fmriprep_layout_df.extension, ['tsv'])]
    fmriprep_layout_df = fmriprep_layout_df.set_index(['subject', 'run'])

    events_df = source_layout.to_df()
    events_df = events_df[events_df.suffix == 'events']
    events_df['subject'] = events_df['subject'].astype(int)
    events_df = events_df.set_index(['subject', 'run'])

    tr = source_layout.get_tr(bold[0].path)

    if smoothed:
        base_dir = op.join(derivatives, 'glm_stim1_surf_smoothed',
                           f'sub-{subject}', 'func')
    else:
        base_dir = op.join(derivatives, 'glm_stim1_surf', f'sub-{subject}',
                           'func')

    if not op.exists(base_dir):
        os.makedirs(base_dir)

    for b in bold:
        run = b.entities['run']
        hemi = b.entities['suffix']
        #     print(run)

        confounds_ = fmriprep_layout_df.loc[(subject, run), 'path'].iloc[0]
        confounds_ = pd.read_csv(confounds_, sep='\t')
        confounds_ = confounds_[to_include].fillna(method='bfill')

        pca = PCA(n_components=7)
        confounds_ -= confounds_.mean(0)
        confounds_ /= confounds_.std(0)
        confounds_pca = pca.fit_transform(confounds_[to_include])

        events_ = events_df.loc[(subject, run), 'path']
        events_ = pd.read_csv(events_, sep='\t')
        events_['trial_type'] = events_['trial_type'].apply(
            lambda x: 'stim2' if x.startswith('stim2') else x)

        frametimes = np.arange(0, tr * len(confounds_), tr)

        X = make_first_level_design_matrix(
            frametimes,
            events_,
            add_regs=confounds_pca,
            add_reg_names=[f'confound_pca.{i}' for i in range(1, 8)])

        Y = surface.load_surf_data(b.path).T
        Y = (Y / Y.mean(0) * 100)
        Y -= Y.mean(0)

        fit = run_glm(Y, X, noise_model='ols', n_jobs=n_jobs)
        r = fit[1][0.0]
        betas = pd.DataFrame(r.theta, index=X.columns)

        stim1 = []

        for stim in 5, 7, 10, 14, 20, 28:
            stim1.append(betas.loc[f'stim1-{stim}'])

        result = pd.concat(stim1, 1).T
        print(result.shape)

        pes = nb.gifti.GiftiImage(header=nb.load(b.path).header,
                                  darrays=[
                                      nb.gifti.GiftiDataArray(row)
                                      for ix, row in result.iterrows()
                                  ])

        fn_template = op.join(
            base_dir,
            'sub-{subject}_run-{run}_space-{space}_desc-stims1_hemi-{hemi}.pe.gii'
        )
        space = 'fsaverage6'

        pes.to_filename(fn_template.format(**locals()))

        transformer = SurfaceTransform(source_subject='fsaverage6',
                                       target_subject='fsaverage',
                                       hemi={
                                           'L': 'lh',
                                           'R': 'rh'
                                       }[hemi])

        transformer.inputs.source_file = pes.get_filename()
        space = 'fsaverage'
        transformer.inputs.out_file = fn_template.format(**locals())
        # Disable on MAC OS X (SIP problem)
        transformer.run()
예제 #19
0
def bidsmri2project(directory, args):

    # initialize empty cde graph...it may get replaced if we're doing variable to term mapping or not
    cde=Graph()

    # Parse dataset_description.json file in BIDS directory
    if (os.path.isdir(os.path.join(directory))):
        try:
            with open(os.path.join(directory,'dataset_description.json')) as data_file:
                dataset = json.load(data_file)
        except OSError:
            logging.critical("Cannot find dataset_description.json file which is required in the BIDS spec")
            exit("-1")
    else:
        logging.critical("Error: BIDS directory %s does not exist!" %os.path.join(directory))
        exit("-1")

    # create project / nidm-exp doc
    project = Project()

    # if there are git annex sources then add them
    num_sources=addGitAnnexSources(obj=project.get_uuid(),bids_root=directory)
    # else just add the local path to the dataset
    if num_sources == 0:
        project.add_attributes({Constants.PROV['Location']:"file:/" + directory})


    # add various attributes if they exist in BIDS dataset
    for key in dataset:
        # if key from dataset_description file is mapped to term in BIDS_Constants.py then add to NIDM object
        if key in BIDS_Constants.dataset_description:
            if type(dataset[key]) is list:
                project.add_attributes({BIDS_Constants.dataset_description[key]:"".join(dataset[key])})
            else:
                project.add_attributes({BIDS_Constants.dataset_description[key]:dataset[key]})




    # get BIDS layout
    bids_layout = BIDSLayout(directory)


    # create empty dictinary for sessions where key is subject id and used later to link scans to same session as demographics
    session={}
    participant={}
    # Parse participants.tsv file in BIDS directory and create study and acquisition objects
    if os.path.isfile(os.path.join(directory,'participants.tsv')):
        with open(os.path.join(directory,'participants.tsv')) as csvfile:
            participants_data = csv.DictReader(csvfile, delimiter='\t')

            # logic to map variables to terms.
            # first iterate over variables in dataframe and check which ones are already mapped as BIDS constants and which are not.  For those that are not
            # we want to use the variable-term mapping functions to help the user do the mapping
            # iterate over columns
            mapping_list=[]
            column_to_terms={}
            for field in participants_data.fieldnames:

                # column is not in BIDS_Constants
                if not (field in BIDS_Constants.participants):
                    # add column to list for column_to_terms mapping
                    mapping_list.append(field)



            #if user didn't supply a json mapping file but we're doing some variable-term mapping create an empty one for column_to_terms to use
            if args.json_map == False:
                #defaults to participants.json because here we're mapping the participants.tsv file variables to terms
                # if participants.json file doesn't exist then run without json mapping file
                if not os.path.isfile(os.path.join(directory,'participants.json')):
                    #maps variables in CSV file to terms
                    temp=DataFrame(columns=mapping_list)
                    if args.no_concepts:
                        column_to_terms,cde = map_variables_to_terms(directory=directory,assessment_name='participants.tsv',
                            df=temp,output_file=os.path.join(directory,'participants.json'),bids=True,associate_concepts=False)
                    else:
                        column_to_terms,cde = map_variables_to_terms(directory=directory,assessment_name='participants.tsv',
                            df=temp,output_file=os.path.join(directory,'participants.json'),bids=True)
                else:
                    #maps variables in CSV file to terms
                    temp=DataFrame(columns=mapping_list)
                    if args.no_concepts:
                        column_to_terms,cde = map_variables_to_terms(directory=directory, assessment_name='participants.tsv', df=temp,
                            output_file=os.path.join(directory,'participants.json'),json_file=os.path.join(directory,'participants.json'),bids=True,associate_concepts=False)
                    else:
                        column_to_terms,cde = map_variables_to_terms(directory=directory, assessment_name='participants.tsv', df=temp,
                            output_file=os.path.join(directory,'participants.json'),json_file=os.path.join(directory,'participants.json'),bids=True)
            else:
                #maps variables in CSV file to terms
                temp=DataFrame(columns=mapping_list)
                if args.no_concepts:
                    column_to_terms, cde = map_variables_to_terms(directory=directory, assessment_name='participants.tsv', df=temp,
                        output_file=os.path.join(directory,'participants.json'),json_file=args.json_map,bids=True,associate_concepts=False)
                else:
                    column_to_terms, cde = map_variables_to_terms(directory=directory, assessment_name='participants.tsv', df=temp,
                        output_file=os.path.join(directory,'participants.json'),json_file=args.json_map,bids=True)


            for row in participants_data:
                #create session object for subject to be used for participant metadata and image data
                #parse subject id from "sub-XXXX" string
                temp = row['participant_id'].split("-")
                #for ambiguity in BIDS datasets.  Sometimes participant_id is sub-XXXX and othertimes it's just XXXX
                if len(temp) > 1:
                    subjid = temp[1]
                else:
                    subjid = temp[0]
                logging.info(subjid)
                session[subjid] = Session(project)

                #add acquisition object
                acq = AssessmentAcquisition(session=session[subjid])

                acq_entity = AssessmentObject(acquisition=acq)
                participant[subjid] = {}
                participant[subjid]['person'] = acq.add_person(attributes=({Constants.NIDM_SUBJECTID:row['participant_id']}))

                # add nfo:filename entry to assessment entity to reflect provenance of where this data came from
                acq_entity.add_attributes({Constants.NIDM_FILENAME:getRelPathToBIDS(os.path.join(directory,'participants.tsv'),directory)})
                #acq_entity.add_attributes({Constants.NIDM_FILENAME:os.path.join(directory,'participants.tsv')})

                #add qualified association of participant with acquisition activity
                acq.add_qualified_association(person=participant[subjid]['person'],role=Constants.NIDM_PARTICIPANT)
                # print(acq)

                # if there are git annex sources for participants.tsv file then add them
                num_sources=addGitAnnexSources(obj=acq_entity.get_uuid(),bids_root=directory)
                # else just add the local path to the dataset
                if num_sources == 0:
                    acq_entity.add_attributes({Constants.PROV['Location']:"file:/" + os.path.join(directory,'participants.tsv')})

                 # if there's a JSON sidecar file then create an entity and associate it with all the assessment entities
                if os.path.isfile(os.path.join(directory,'participants.json')):
                    json_sidecar = AssessmentObject(acquisition=acq)
                    json_sidecar.add_attributes({PROV_TYPE:QualifiedName(Namespace("bids",Constants.BIDS),"sidecar_file"), Constants.NIDM_FILENAME:
                        getRelPathToBIDS(os.path.join(directory,'participants.json'),directory)})

                    # add Git Annex Sources
                    # if there are git annex sources for participants.tsv file then add them
                    num_sources=addGitAnnexSources(obj=json_sidecar.get_uuid(),filepath=os.path.join(directory,'participants.json'),bids_root=directory)
                    # else just add the local path to the dataset
                    if num_sources == 0:
                        json_sidecar.add_attributes({Constants.PROV['Location']:"file:/" + os.path.join(directory,'participants.json')})


                # check if json_sidecar entity exists and if so associate assessment entity with it
                if 'json_sidecar' in  locals():
                    #connect json_entity with acq_entity
                    acq_entity.add_attributes({Constants.PROV["wasInfluencedBy"]:json_sidecar})

                for key,value in row.items():
                    if not value:
                        continue
                    #for variables in participants.tsv file who have term mappings in BIDS_Constants.py use those, add to json_map so we don't have to map these if user
                    #supplied arguments to map variables
                    if key in BIDS_Constants.participants:
                        # WIP
                        # Here we are adding to CDE graph data elements for BIDS Constants that remain fixed for each BIDS-compliant dataset

                        if not (BIDS_Constants.participants[key] == Constants.NIDM_SUBJECTID):


                            # create a namespace with the URL for fixed BIDS_Constants term
                            # item_ns = Namespace(str(Constants.BIDS.namespace.uri))
                            # add prefix to namespace which is the BIDS fixed variable name
                            # cde.bind(prefix="bids", namespace=item_ns)
                            # ID for BIDS variables is always the same bids:[bids variable]
                            cde_id = Constants.BIDS[key]
                            # add the data element to the CDE graph
                            cde.add((cde_id,RDF.type, Constants.NIDM['DataElement']))
                            cde.add((cde_id,RDF.type, Constants.PROV['Entity']))
                            # add some basic information about this data element
                            cde.add((cde_id,Constants.RDFS['label'],Literal(BIDS_Constants.participants[key].localpart)))
                            cde.add((cde_id,Constants.NIDM['isAbout'],URIRef(BIDS_Constants.participants[key].uri)))
                            cde.add((cde_id,Constants.NIDM['source_variable'],Literal(key)))
                            cde.add((cde_id,Constants.NIDM['description'],Literal("participant/subject identifier")))
                            cde.add((cde_id,Constants.RDFS['comment'],Literal("BIDS participants_id variable fixed in specification")))
                            cde.add((cde_id,Constants.RDFS['valueType'],URIRef(Constants.XSD["string"])))

                            acq_entity.add_attributes({cde_id:Literal(value)})

                        # if this was the participant_id, we already handled it above creating agent / qualified association
                        # if not (BIDS_Constants.participants[key] == Constants.NIDM_SUBJECTID):
                        #    acq_entity.add_attributes({BIDS_Constants.participants[key]:value})


                    # else if user added -mapvars flag to command line then we'll use the variable-> term mapping procedures to help user map variables to terms (also used
                    # in CSV2NIDM.py)
                    else:

                        # WIP: trying to add new support for CDEs...
                        add_attributes_with_cde(prov_object=acq_entity,cde=cde,row_variable=key,value=value)
                        # if key in column_to_terms:
                        #    acq_entity.add_attributes({QualifiedName(provNamespace(Core.safe_string(None,string=str(key)), column_to_terms[key]["url"]), ""):value})
                        # else:

                        #    acq_entity.add_attributes({Constants.BIDS[key.replace(" ", "_")]:value})


    # create acquisition objects for each scan for each subject

    # loop through all subjects in dataset
    for subject_id in bids_layout.get_subjects():
        logging.info("Converting subject: %s" %subject_id)
        # skip .git directories...added to support datalad datasets
        if subject_id.startswith("."):
            continue

        # check if there are a session numbers.  If so, store it in the session activity and create a new
        # sessions for these imaging acquisitions.  Because we don't know which imaging session the root
        # participants.tsv file data may be associated with we simply link the imaging acquisitions to different
        # sessions (i.e. the participants.tsv file goes into an AssessmentAcquisition and linked to a unique
        # sessions and the imaging acquisitions go into MRAcquisitions and has a unique session)
        imaging_sessions = bids_layout.get_sessions(subject=subject_id)
        # if session_dirs has entries then get any metadata about session and store in session activity

        # bids_layout.get(subject=subject_id,type='session',extensions='.tsv')
        # bids_layout.get(subject=subject_id,type='scans',extensions='.tsv')
        # bids_layout.get(extensions='.tsv',return_type='obj')

        # loop through each session if there is a sessions directory
        if len(imaging_sessions) > 0:
            for img_session in imaging_sessions:
                # create a new session
                ses = Session(project)
                # add session number as metadata
                ses.add_attributes({Constants.BIDS['session_number']:img_session})
                addimagingsessions(bids_layout=bids_layout,subject_id=subject_id,session=ses,participant=participant, directory=directory,img_session=img_session)
        # else we have no ses-* directories in the BIDS layout
        addimagingsessions(bids_layout=bids_layout,subject_id=subject_id,session=Session(project),participant=participant, directory=directory)



        # Added temporarily to support phenotype files
        # for each *.tsv / *.json file pair in the phenotypes directory
        # WIP: ADD VARIABLE -> TERM MAPPING HERE
        for tsv_file in glob.glob(os.path.join(directory,"phenotype","*.tsv")):
            # for now, open the TSV file, extract the row for this subject, store it in an acquisition object and link to
            # the associated JSON data dictionary file
            with open(tsv_file) as phenofile:
                pheno_data = csv.DictReader(phenofile, delimiter='\t')
                for row in pheno_data:
                    subjid = row['participant_id'].split("-")
                    if not subjid[1] == subject_id:
                        continue
                    else:
                        # add acquisition object
                        acq = AssessmentAcquisition(session=session[subjid[1]])
                        # add qualified association with person
                        acq.add_qualified_association(person=participant[subject_id]['person'],role=Constants.NIDM_PARTICIPANT)

                        acq_entity = AssessmentObject(acquisition=acq)



                        for key,value in row.items():
                            if not value:
                                continue
                            # we're using participant_id in NIDM in agent so don't add to assessment as a triple.
                            # BIDS phenotype files seem to have an index column with no column header variable name so skip those
                            if ((not key == "participant_id") and (key != "")):
                                # for now we're using a placeholder namespace for BIDS and simply the variable names as the concept IDs..
                                acq_entity.add_attributes({Constants.BIDS[key]:value})

                        # link TSV file
                        acq_entity.add_attributes({Constants.NIDM_FILENAME:getRelPathToBIDS(tsv_file,directory)})
                        #acq_entity.add_attributes({Constants.NIDM_FILENAME:tsv_file})

                        # if there are git annex sources for participants.tsv file then add them
                        num_sources=addGitAnnexSources(obj=acq_entity.get_uuid(),bids_root=directory)
                        # else just add the local path to the dataset
                        if num_sources == 0:
                            acq_entity.add_attributes({Constants.PROV['Location']:"file:/" + tsv_file})


                        # link associated JSON file if it exists
                        data_dict = os.path.join(directory,"phenotype",os.path.splitext(os.path.basename(tsv_file))[0]+ ".json")
                        if os.path.isfile(data_dict):
                            # if file exists, create a new entity and associate it with the appropriate activity  and a used relationship
                            # with the TSV-related entity
                            json_entity = AssessmentObject(acquisition=acq)
                            json_entity.add_attributes({PROV_TYPE:Constants.BIDS["sidecar_file"], Constants.NIDM_FILENAME:
                                getRelPathToBIDS(data_dict,directory)})

                            # add Git Annex Sources
                            # if there are git annex sources for participants.tsv file then add them
                            num_sources=addGitAnnexSources(obj=json_entity.get_uuid(),filepath=data_dict,bids_root=directory)
                            # else just add the local path to the dataset
                            if num_sources == 0:
                                json_entity.add_attributes({Constants.PROV['Location']:"file:/" + data_dict})

                            #connect json_entity with acq_entity
                            acq_entity.add_attributes({Constants.PROV["wasInfluencedBy"]:json_entity.get_uuid()})


    return project, cde
예제 #20
0
try:
    fmriprepdir = sys.argv[2]
except IOError:
    print("No fmriprep directory specified.")
    sys.exit(1)

try:
    outputdir = sys.argv[3]
except IOError:
    print("No output directory specified.")
    sys.exit(1)


# get the layout object of the BIDS directory
layout = BIDSLayout(bidsdir)

lba_mask = datadir + "masks/ba_left.nii.gz"
rba_mask = datadir + "masks/ba_right.nii.gz"
lstg_mask = datadir + "masks/stg_left.nii.gz"
rstg_mask = datadir + "masks/stg_right.nii.gz"
lmtg_mask = datadir + "masks/mtg_left.nii.gz"
rmtg_mask = datadir + "masks/mtg_right.nii.gz"
litg_mask = datadir + "masks/itg_left.nii.gz"
ritg_mask = datadir + "masks/itg_right.nii.gz"
lsfg_mask = datadir + "masks/sfg_left.nii.gz"
rsfg_mask = datadir + "masks/sfg_right.nii.gz"
lwa_mask = datadir + "masks/stg_post_left.nii.gz"
rwa_mask = datadir + "masks/stg_post_right.nii.gz"
lhem_mask = datadir + "masks/hem_left.nii.gz"
rhem_mask = datadir + "masks/hem_right.nii.gz"
예제 #21
0
    def _run_interface(self, runtime):

        import json
        from bids import BIDSLayout

        def validate_derivatives(bids_dir, derivatives):
            """ Validate derivatives argument provided by the user.

            Args:
                bids_dir: list
                    Path to bids root directory.
                derivatives: str or list(str)
                    Derivatives to use for denoising.

            Returns:
                derivatives_: list
                    Validated derivatives list.
                scope: list
                    Right scope keyword used in pybids query.
            """

            if isinstance(derivatives, str):
                derivatives_ = [derivatives]
            else:
                derivatives_ = derivatives

            # Create full paths to derivatives folders
            derivatives_ = [
                os.path.join(bids_dir, 'derivatives', d) for d in derivatives_
            ]

            # Establish right scope keyword for arbitrary packages
            scope = []
            for derivative_path in derivatives_:
                dataset_desc_path = os.path.join(derivative_path,
                                                 'dataset_description.json')
                try:
                    with open(dataset_desc_path, 'r') as f:
                        dataset_desc = json.load(f)
                    scope.append(dataset_desc['PipelineDescription']['Name'])
                except FileNotFoundError as e:
                    raise Exception(f"{derivative_path} should contain" +
                                    " dataset_description.json file") from e
                except KeyError as e:
                    raise Exception(
                        f"Key 'PipelineDescription.Name' is " +
                        "required in {dataset_desc_path} file") from e

            return derivatives_, scope

        def validate_option(layout, option, kind='task'):
            """ Validate BIDS query filters provided by the user.

            Args:
                layout: bids.layout.layout.BIDSLayout
                    Lightweight class representing BIDS project file tree.
                option: list
                    Filter arguments provided by the user.
                kind: string
                    Type of query. Available options are 'task', 'session' and
                    'subject'.

            Returns:
                option_: list
                    Validated filter values.
            """
            # Grab all possible filter values
            if kind == 'task':
                option_all = layout.get_tasks()
            elif kind == 'session':
                option_all = layout.get_sessions()
            elif kind == 'subject':
                option_all = layout.get_subjects()

            option_ = option
            for option_item in option_:
                if option_item not in option_all:
                    raise ValueError(f'{kind} {option_item} is not found')

            return option_

        # Validate derivatives argument
        derivatives, scope = validate_derivatives(
            bids_dir=self.inputs.bids_dir, derivatives=self.inputs.derivatives)

        layout = BIDSLayout(root=self.inputs.bids_dir,
                            validate=True,
                            derivatives=derivatives)

        # Validate optional arguments
        filter_base = {}
        if isdefined(self.inputs.task):
            task = validate_option(layout, self.inputs.task, kind='task')
            filter_base['task'] = task
        else:
            task = layout.get_tasks()
        if isdefined(self.inputs.session):
            session = validate_option(layout,
                                      self.inputs.session,
                                      kind='session')
            filter_base['session'] = session
        if isdefined(self.inputs.subject):
            subject = validate_option(layout,
                                      self.inputs.subject,
                                      kind='subject')
            filter_base['subject'] = subject

        # Define query filters
        keys_entities = ['task', 'session', 'subject', 'datatype']

        filter_fmri = {
            'extension': ['nii', 'nii.gz'],
            'suffix': 'bold',
            'desc': 'preproc'
        }
        filter_fmri_aroma = {
            'extension': ['nii', 'nii.gz'],
            'suffix': 'bold',
            'desc': 'smoothAROMAnonaggr',
        }
        filter_conf = {
            'extension': 'tsv',
            'suffix': 'regressors',
            'desc': 'confounds',
        }  # for later
        filter_conf_json = {
            'extension': 'json',
            'suffix': 'regressors',
            'desc': 'confounds',
        }

        filter_fmri.update(filter_base)

        ########################################################################
        ### SOLUTION FOR LATER #################################################
        ########################################################################
        # filter_fmri_aroma.update(filter_base)
        # filter_conf.update(filter_base)
        # filter_conf_json.update(filter_base)

        # # Grab all requested files
        # fmri_prep = layout.get(scope=scope, **filter_fmri)
        # if self.inputs.ica_aroma:
        #     fmri_prep_aroma = layout.get(scope=scope, **filter_fmri_aroma)
        # conf_raw = layout.get(scope=scope, **filter_conf)
        # conf_json = layout.get(scope=scope, **filter_conf_json)
        ########################################################################
        ########################################################################
        ########################################################################

        fmri_prep, fmri_prep_aroma, conf_raw, conf_json, entities = (
            [] for _ in range(5))

        for fmri_file in layout.get(scope=scope, **filter_fmri):

            entity_bold = fmri_file.get_entities()

            # Look for corresponding confounds file
            filter_entities = {
                key: value
                for key, value in entity_bold.items() if key in keys_entities
            }

            # Constraining search
            filter_conf.update(filter_entities)
            filter_conf_json.update(filter_entities)

            conf_file = layout.get(scope=scope, **filter_conf)
            conf_json_file = layout.get(scope=scope, **filter_conf_json)

            if not conf_file:
                raise FileNotFoundError(
                    f"Regressor file not found for file {fmri_file.path}")
            else:
                # Add entity only if both files are available
                if len(conf_file) > 1:
                    print(
                        f"Warning: Multiple regressors found for file {fmri_file.path}.\n"
                        f"Selecting {conf_file[0].path}"
                    )  # TODO: find proper warning (logging?)

                conf_file = conf_file[0]

            if not conf_json_file:
                raise FileNotFoundError(
                    f"Regressor file not found for file {fmri_file.path}")
            else:
                # Add entity only if both files are available
                if len(conf_json_file) > 1:
                    print(
                        f"Warning: Multiple .json regressors found for file {fmri_file.path}.\n"
                        f"Selecting {conf_json_file[0].path}")

                conf_json_file = conf_json_file[0]

            if self.inputs.ica_aroma:
                filter_fmri_aroma.update(
                    filter_entities)  # Add specific fields to constrain search
                fmri_aroma_file = layout.get(scope=scope, **filter_fmri_aroma)

                if not fmri_aroma_file:
                    raise FileNotFoundError(
                        f"ICA-Aroma file not found for file {fmri_file.path}")

                else:
                    # Add entity only if both files are available
                    if len(fmri_aroma_file) > 1:
                        print(
                            f"Warning: Multiple ICA-Aroma files found for file {fmri_file.path}.\n"
                            f"Selecting {fmri_aroma_file[0].path}")
                    # TODO: find proper warning (logging?)

                    fmri_aroma_file = fmri_aroma_file[0]
                    fmri_prep_aroma.append(fmri_aroma_file.path)

            fmri_prep.append(fmri_file.path)
            conf_raw.append(conf_file.path)
            conf_json.append(conf_json_file.path)
            entities.append(filter_entities)

        # Extract TRs
        tr_dict = {}

        for t in task:
            filter_fmri_tr = filter_fmri.copy()
            filter_fmri_tr['task'] = t

            example_file = layout.get(**filter_fmri_tr)[0]
            tr = layout.get_metadata(example_file.path)['RepetitionTime']
            tr_dict[t] = tr

        self._results['fmri_prep'] = fmri_prep
        self._results['fmri_prep_aroma'] = fmri_prep_aroma
        self._results['conf_raw'] = conf_raw
        self._results['conf_json'] = conf_json
        self._results['entities'] = entities
        self._results['tr_dict'] = tr_dict

        return runtime
예제 #22
0
def collect_participants(bids_dir,
                         participant_label=None,
                         strict=False,
                         bids_validate=True):
    """
    List the participants under the BIDS root and checks that participants
    designated with the participant_label argument exist in that folder.
    Returns the list of participants to be finally processed.
    Requesting all subjects in a BIDS directory root:

    Examples
    --------
    >>> collect_participants(str(datadir / 'ds114'), bids_validate=False)
    ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10']

    Requesting two subjects, given their IDs:

    >>> collect_participants(str(datadir / 'ds114'), participant_label=['02', '04'],
    ...                      bids_validate=False)
    ['02', '04']

    Requesting two subjects, given their IDs (works with 'sub-' prefixes):

    >>> collect_participants(str(datadir / 'ds114'), participant_label=['sub-02', 'sub-04'],
    ...                      bids_validate=False)
    ['02', '04']

    Requesting two subjects, but one does not exist:

    >>> collect_participants(str(datadir / 'ds114'), participant_label=['02', '14'],
    ...                      bids_validate=False)
    ['02']
    >>> collect_participants(
    ...     str(datadir / 'ds114'), participant_label=['02', '14'],
    ...     strict=True, bids_validate=False)  # doctest: +IGNORE_EXCEPTION_DETAIL
    Traceback (most recent call last):
    BIDSError:
    ...

    """

    if isinstance(bids_dir, BIDSLayout):
        layout = bids_dir
    else:
        layout = BIDSLayout(str(bids_dir), validate=bids_validate)

    all_participants = set(layout.get_subjects())

    # Error: bids_dir does not contain subjects
    if not all_participants:
        raise BIDSError(
            "Could not find participants. Please make sure the BIDS data "
            "structure is present and correct. Datasets can be validated online "
            "using the BIDS Validator (http://bids-standard.github.io/bids-validator/).\n"
            "If you are using Docker for Mac or Docker for Windows, you "
            'may need to adjust your "File sharing" preferences.',
            bids_dir,
        )

    # No --participant-label was set, return all
    if not participant_label:
        return sorted(all_participants)

    if isinstance(participant_label, str):
        participant_label = [participant_label]

    # Drop sub- prefixes
    participant_label = [
        sub[4:] if sub.startswith("sub-") else sub for sub in participant_label
    ]
    # Remove duplicates
    participant_label = sorted(set(participant_label))
    # Remove labels not found
    found_label = sorted(set(participant_label) & all_participants)
    if not found_label:
        raise BIDSError(
            "Could not find participants [{}]".format(
                ", ".join(participant_label)),
            bids_dir,
        )

    # Warn if some IDs were not found
    notfound_label = sorted(set(participant_label) - all_participants)
    if notfound_label:
        exc = BIDSError(
            "Some participants were not found: {}".format(
                ", ".join(notfound_label)),
            bids_dir,
        )
        if strict:
            raise exc
        warnings.warn(exc.msg, BIDSWarning)

    return found_label
예제 #23
0
파일: mp2rage.py 프로젝트: nbeliy/pymp2rage
    def from_bids(cls,
                  source_dir,
                  subject=None,
                  session=None,
                  acquisition=None,
                  run=None,
                  inversion_efficiency=0.96):
        """ Creates a MEMP2RAGE-object from a properly organized BIDS-folder.

        The folder should be organized similar to this example:

        sub-01/anat/:
        # The first inversion time volumes
         * sub-01_inv-1_part-mag_MPRAGE.nii
         * sub-01_inv-1_part-phase_MPRAGE.nii

        # The four echoes of the second inversion (magnitude)
         * sub-01_inv-2_part-mag_echo-1_MPRAGE.nii
         * sub-01_inv-2_part-mag_echo-2_MPRAGE.nii
         * sub-01_inv-2_part-mag_echo-3_MPRAGE.nii
         * sub-01_inv-2_part-mag_echo-4_MPRAGE.nii

        # The four echoes of the second inversion (phase)
         * sub-01_inv-2_part-phase_echo-1_MPRAGE.nii
         * sub-01_inv-2_part-phase_echo-2_MPRAGE.nii
         * sub-01_inv-2_part-phase_echo-3_MPRAGE.nii
         * sub-01_inv-2_part-phase_echo-4_MPRAGE.nii

        # The json describing the parameters of the first inversion pulse
         * sub-01_inv-1_MPRAGE.json

        # The json describing the parameters of the second inversion pulse
         * sub-01_inv-2_echo-1_MPRAGE.json
         * sub-01_inv-2_echo-2_MPRAGE.json
         * sub-01_inv-2_echo-3_MPRAGE.json
         * sub-01_inv-2_echo-4_MPRAGE.json

         The JSON-files should contain all the necessary MP2RAGE sequence parameters
         and should look something like this:

         sub-01/anat/sub-01_inv-1_MPRAGE.json:
             {
                "InversionTime":0.67,
                "FlipAngle":7,
                "RepetitionTimeExcitation":0.0062,
                "RepetitionTimePreparation":6.723,
                "NumberShots":150,
                "FieldStrength": 7
             }

         sub-01/anat/sub-01_inv-2_echo-1_MPRAGE.json:
             {
                "InversionTime":3.855,
                "FlipAngle":6,
                "RepetitionTimeExcitation":0.0320,
                "RepetitionTimePreparation":6.723,
                "NumberShots":150,
                "EchoTime": 6.0
                "FieldStrength": 7
             }

         sub-01/anat/sub-01_inv-2_echo-2_MPRAGE.json:
             {
                "InversionTime":3.855,
                "FlipAngle":6,
                "RepetitionTimeExcitation":0.0320,
                "RepetitionTimePreparation":6.723,
                "NumberShots":150,
                "EchoTime": 14.5
                "FieldStrength": 7
             }

         sub-01/anat/sub-01_inv-2_echo-3_MPRAGE.json:
             {
                "InversionTime":3.855,
                "FlipAngle":6,
                "RepetitionTimeExcitation":0.0320,
                "RepetitionTimePreparation":6.723,
                "NumberShots":150,
                "EchoTime": 23
                "FieldStrength": 7
             }

         sub-01/anat/sub-01_inv-2_echo-4_MPRAGE.json:
             {
                "InversionTime":3.855,
                "FlipAngle":6,
                "RepetitionTimeExcitation":0.0320,
                "RepetitionTimePreparation":6.723,
                "NumberShots":150,
                "EchoTime": 31.5
                "FieldStrength": 7
             }

        A MEMP2RAGE-object can now be created from the BIDS folder as follows:

        Example:
            >>> import pymp2rage
            >>> mp2rage = pymp2rage.MEMP2RAGE.from_bids('/data/sourcedata/', '01')

        Args:
            source_dir (BIDS dir): directory containing all necessary files
            subject (str): subject identifier
            **kwargs: additional keywords that are forwarded to get-function of
            BIDSLayout. For example `ses` could be used to select specific session.
        """

        __dir__ = os.path.abspath(os.path.dirname(__file__))
        layout = BIDSLayout(source_dir,
                            validate=False,
                            config=op.join(__dir__, 'bids', 'bep001.json'))

        df = layout.to_df()

        subject = str(subject) if subject is not None else subject
        session = str(session) if session is not None else session
        run = int(run) if run is not None else run

        for var_str, var in zip(['subject', 'session', 'run', 'acquisition'],
                                [subject, session, run, acquisition]):
            if var is not None:
                df = df[df[var_str] == var]
        df = df[np.in1d(df.extension, ['nii', 'nii.gz'])]

        for key in ['echo', 'inv', 'fa']:
            if key in df.columns:
                df[key] = df[key].astype(float)

        df = df.set_index(['suffix', 'inv', 'echo', 'part'])
        df = df.loc[['MP2RAGE', 'TB1map']]

        for ix, row in df.iterrows():

            for key, value in layout.get_metadata(row.path).items():
                if key in [
                        'EchoTime', 'InversionTime',
                        'RepetitionTimePreparation',
                        'RepetitionTimeExcitation', 'NumberShots',
                        'FieldStrength', 'FlipAngle'
                ]:
                    df.loc[ix, key] = value

        if 'TB1map' in df.index:

            if len(df.loc['TB1map']) == 1:
                print('using {} as B1map'.format(
                    str(df.loc['TB1map'].iloc[0]['path'])))
                b1map = df.loc['TB1map'].iloc[0]['path']
            else:
                print('FOUND MORE THAN ONE B1-MAP! Will not use B1-correction')
                b1map = None
        else:
            b1map = None

        inv1 = df.loc[('MP2RAGE', 1, slice(None), 'mag'), 'path'].iloc[0]
        inv1ph = df.loc[('MP2RAGE', 1, slice(None), 'phase'), 'path'].iloc[0]
        inv2 = df.loc[('MP2RAGE', 2, slice(None), 'mag'), 'path'].tolist()
        inv2ph = df.loc[('MP2RAGE', 2, slice(None), 'phase'), 'path'].tolist()

        echo_times = df.loc[('MP2RAGE', 2, slice(None), 'mag'),
                            'EchoTime'].values
        MPRAGE_tr = df.loc[('MP2RAGE', 1, slice(None), 'mag'),
                           'RepetitionTimePreparation'].values[0]
        invtimesAB = df.loc[('MP2RAGE', 1, slice(None), 'mag'),
                            'InversionTime'].values[0], df.loc[(
                                'MP2RAGE', 2, slice(None),
                                'mag'), 'InversionTime'].values[0],
        nZslices = df.loc[('MP2RAGE', 1, slice(None), 'mag'),
                          'NumberShots'].values[0]
        FLASH_tr = df.loc[('MP2RAGE', 1, slice(None), 'mag'),
                          'RepetitionTimeExcitation'].values[0], df.loc[(
                              'MP2RAGE', 2, slice(None),
                              'mag'), 'RepetitionTimeExcitation'].values[0]
        B0 = df.loc[('MP2RAGE', 1, slice(None), 'mag'),
                    'FieldStrength'].values[0]
        flipangleABdegree = df.loc[('MP2RAGE', 1, slice(None), 'mag'),
                                   'FlipAngle'].values[0], df.loc[(
                                       'MP2RAGE', 2, slice(None),
                                       'mag'), 'FlipAngle'].values[0]

        mp2rageme = cls(echo_times=echo_times,
                        MPRAGE_tr=MPRAGE_tr,
                        invtimesAB=invtimesAB,
                        flipangleABdegree=flipangleABdegree,
                        nZslices=nZslices,
                        FLASH_tr=FLASH_tr,
                        inversion_efficiency=inversion_efficiency,
                        B0=B0,
                        inv1=inv1,
                        inv1ph=inv1ph,
                        inv2=inv2,
                        inv2ph=inv2ph)

        return mp2rageme
예제 #24
0
파일: run.py 프로젝트: hspopal/fmriprep
def build_workflow(opts, retval):
    """
    Create the Nipype Workflow that supports the whole execution
    graph, given the inputs.

    All the checks and the construction of the workflow are done
    inside this function that has pickleable inputs and output
    dictionary (``retval``) to allow isolation using a
    ``multiprocessing.Process`` that allows fmriprep to enforce
    a hard-limited memory-scope.

    """
    from bids import BIDSLayout

    from nipype import logging as nlogging, config as ncfg
    from niworkflows.utils.bids import collect_participants
    from niworkflows.reports import generate_reports
    from ..__about__ import __version__
    from ..workflows.base import init_fmriprep_wf

    build_log = nlogging.getLogger('nipype.workflow')

    INIT_MSG = """
    Running fMRIPREP version {version}:
      * BIDS dataset path: {bids_dir}.
      * Participant list: {subject_list}.
      * Run identifier: {uuid}.
    """.format

    bids_dir = opts.bids_dir.resolve()
    output_dir = opts.output_dir.resolve()
    work_dir = opts.work_dir.resolve()

    retval['return_code'] = 1
    retval['workflow'] = None
    retval['bids_dir'] = str(bids_dir)
    retval['output_dir'] = str(output_dir)
    retval['work_dir'] = str(work_dir)

    if output_dir == bids_dir:
        build_log.error(
            'The selected output folder is the same as the input BIDS folder. '
            'Please modify the output path (suggestion: %s).', bids_dir /
            'derivatives' / ('fmriprep-%s' % __version__.split('+')[0]))
        retval['return_code'] = 1
        return retval

    if bids_dir in work_dir.parents:
        build_log.error(
            'The selected working directory is a subdirectory of the input BIDS folder. '
            'Please modify the output path.')
        retval['return_code'] = 1
        return retval

    output_spaces = parse_spaces(opts)

    # Set up some instrumental utilities
    run_uuid = '%s_%s' % (strftime('%Y%m%d-%H%M%S'), uuid.uuid4())
    retval['run_uuid'] = run_uuid

    # First check that bids_dir looks like a BIDS folder
    layout = BIDSLayout(str(bids_dir),
                        validate=False,
                        ignore=("code", "stimuli", "sourcedata", "models",
                                "derivatives", re.compile(r'^\.')))
    subject_list = collect_participants(
        layout, participant_label=opts.participant_label)
    retval['subject_list'] = subject_list

    # Load base plugin_settings from file if --use-plugin
    if opts.use_plugin is not None:
        from yaml import load as loadyml
        with open(opts.use_plugin) as f:
            plugin_settings = loadyml(f)
        plugin_settings.setdefault('plugin_args', {})
    else:
        # Defaults
        plugin_settings = {
            'plugin': 'MultiProc',
            'plugin_args': {
                'raise_insufficient': False,
                'maxtasksperchild': 1,
            }
        }

    # Resource management options
    # Note that we're making strong assumptions about valid plugin args
    # This may need to be revisited if people try to use batch plugins
    nthreads = plugin_settings['plugin_args'].get('n_procs')
    # Permit overriding plugin config with specific CLI options
    if nthreads is None or opts.nthreads is not None:
        nthreads = opts.nthreads
        if nthreads is None or nthreads < 1:
            nthreads = cpu_count()
        plugin_settings['plugin_args']['n_procs'] = nthreads

    if opts.mem_mb:
        plugin_settings['plugin_args']['memory_gb'] = opts.mem_mb / 1024

    omp_nthreads = opts.omp_nthreads
    if omp_nthreads == 0:
        omp_nthreads = min(nthreads - 1 if nthreads > 1 else cpu_count(), 8)

    if 1 < nthreads < omp_nthreads:
        build_log.warning(
            'Per-process threads (--omp-nthreads=%d) exceed total '
            'threads (--nthreads/--n_cpus=%d)', omp_nthreads, nthreads)
    retval['plugin_settings'] = plugin_settings

    # Set up directories
    log_dir = output_dir / 'fmriprep' / 'logs'
    # Check and create output and working directories
    output_dir.mkdir(exist_ok=True, parents=True)
    log_dir.mkdir(exist_ok=True, parents=True)
    work_dir.mkdir(exist_ok=True, parents=True)

    # Nipype config (logs and execution)
    ncfg.update_config({
        'logging': {
            'log_directory': str(log_dir),
            'log_to_file': True
        },
        'execution': {
            'crashdump_dir': str(log_dir),
            'crashfile_format': 'txt',
            'get_linked_libs': False,
            'stop_on_first_crash': opts.stop_on_first_crash,
        },
        'monitoring': {
            'enabled': opts.resource_monitor,
            'sample_frequency': '0.5',
            'summary_append': True,
        }
    })

    if opts.resource_monitor:
        ncfg.enable_resource_monitor()

    # Called with reports only
    if opts.reports_only:
        build_log.log(25, 'Running --reports-only on participants %s',
                      ', '.join(subject_list))
        if opts.run_uuid is not None:
            run_uuid = opts.run_uuid
            retval['run_uuid'] = run_uuid
        retval['return_code'] = generate_reports(subject_list,
                                                 output_dir,
                                                 work_dir,
                                                 run_uuid,
                                                 packagename='fmriprep')
        return retval

    # Build main workflow
    build_log.log(
        25,
        INIT_MSG(version=__version__,
                 bids_dir=bids_dir,
                 subject_list=subject_list,
                 uuid=run_uuid))

    retval['workflow'] = init_fmriprep_wf(
        anat_only=opts.anat_only,
        aroma_melodic_dim=opts.aroma_melodic_dimensionality,
        bold2t1w_dof=opts.bold2t1w_dof,
        cifti_output=opts.cifti_output,
        debug=opts.sloppy,
        dummy_scans=opts.dummy_scans,
        echo_idx=opts.echo_idx,
        err_on_aroma_warn=opts.error_on_aroma_warnings,
        fmap_bspline=opts.fmap_bspline,
        fmap_demean=opts.fmap_no_demean,
        force_syn=opts.force_syn,
        freesurfer=opts.run_reconall,
        hires=opts.hires,
        ignore=opts.ignore,
        layout=layout,
        longitudinal=opts.longitudinal,
        low_mem=opts.low_mem,
        medial_surface_nan=opts.medial_surface_nan,
        omp_nthreads=omp_nthreads,
        output_dir=str(output_dir),
        output_spaces=output_spaces,
        run_uuid=run_uuid,
        regressors_all_comps=opts.return_all_components,
        regressors_fd_th=opts.fd_spike_threshold,
        regressors_dvars_th=opts.dvars_spike_threshold,
        skull_strip_fixed_seed=opts.skull_strip_fixed_seed,
        skull_strip_template=opts.skull_strip_template,
        subject_list=subject_list,
        t2s_coreg=opts.t2s_coreg,
        task_id=opts.task_id,
        use_aroma=opts.use_aroma,
        use_bbr=opts.use_bbr,
        use_syn=opts.use_syn_sdc,
        work_dir=str(work_dir),
    )
    retval['return_code'] = 0

    logs_path = Path(output_dir) / 'fmriprep' / 'logs'
    boilerplate = retval['workflow'].visit_desc()

    if boilerplate:
        citation_files = {
            ext: logs_path / ('CITATION.%s' % ext)
            for ext in ('bib', 'tex', 'md', 'html')
        }
        # To please git-annex users and also to guarantee consistency
        # among different renderings of the same file, first remove any
        # existing one
        for citation_file in citation_files.values():
            try:
                citation_file.unlink()
            except FileNotFoundError:
                pass

        citation_files['md'].write_text(boilerplate)
        build_log.log(
            25, 'Works derived from this fMRIPrep execution should '
            'include the following boilerplate:\n\n%s', boilerplate)
    return retval
예제 #25
0
def main(**args):

    outfiles = ['fitts', 'errts', 'stats', 'betas']

    path = args['path']
    pipeline = args['pipeline']

    command = '3dDeconvolve -input {files} -jobs {n_jobs} -polort {polort} -float {confounds} {events_string} '+ \
              ' -mask {mask} -allzero_OK -fout -tout -x1D {design_matrix_txt} -xjpeg {design_matrix_jpg} -xsave '+\
              '-fitts {fitts} -errts {errts} -bucket {stats} -cbucket {betas} -rout -gltsym "SYM: RESP+L -RESP+R" -glt_label 1 RespLvsRespR'

    extra_event = process_extraevent_arg(args['extra_event'])

    derivatives = os.path.join(path, "derivatives", pipeline)
    print("mkdir -p {}".format(derivatives))
    os.system("mkdir -p {}".format(derivatives))

    derivatives_pattern = os.path.join(derivatives, 'sub-{subject}',
                                       "ses-{session}")
    pattern = os.path.join(
        derivatives_pattern, "{datatype}",
        "sub-{subject}[_ses-{session}][_space-{space}][_desc-{desc}]_{suffix}.{extension}"
    )

    layout = BIDSLayout(path, derivatives=True)

    subjects = layout.get_subjects()
    subjects.remove('lormat')
    sessions = layout.get_sessions()

    # TODO: Check if there are sessions
    for session in sessions:
        for subj in subjects:

            deriv_dir = derivatives_pattern.format(session=session,
                                                   subject=subj)
            print("mkdir -p {}".format(deriv_dir))
            os.system("mkdir -p {}".format(deriv_dir))

            # Create func
            func_dir = os.path.join(derivatives_pattern,
                                    "{datatype}").format(session=session,
                                                         subject=subj,
                                                         datatype='func')

            print("mkdir -p {}".format(func_dir))
            os.system("mkdir -p {}".format(func_dir))

            # Main command
            files = layout.get(subject=subj,
                               session=session,
                               task=session,
                               desc='afniproc',
                               extension='nii.gz')
            entities = files[0].get_entities()
            files = " ".join(f.path for f in files)

            args['files'] = files

            confounds = ''
            for desc in ['bpass', 'demean']:
                ort_files = layout.get(subject=subj,
                                       session=session,
                                       desc=desc)
                confounds += '-ortvec {} {} '.format(ort_files[0].path, desc)

            args['confounds'] = confounds

            # Stimuli
            stims = bids2afni_events(subj,
                                     session,
                                     layout,
                                     pattern,
                                     extra_event=extra_event)

            write_afni(stims)
            args['events_string'] = stims_times(stims)

            # Mask
            mask = layout.get(subject=subj,
                              session=session,
                              suffix='mask',
                              extension='nii.gz')
            args['mask'] = mask[0].path

            # Buckets
            for desc in outfiles:
                entities['desc'] = pipeline
                entities['suffix'] = desc
                args[desc] = layout.build_path(entities,
                                               pattern,
                                               validate=False)

            for extension in ['jpg', 'txt']:
                entities['suffix'] = 'dmatrix'
                entities['extension'] = extension
                args['design_matrix_' + extension] = layout.build_path(
                    entities, pattern, validate=False)

            print(command.format(**args))
            os.system(command.format(**args))
예제 #26
0
def layout(tmp_bids):
    orig_dir = join(get_test_data_path(), '7t_trt')
    # return BIDSLayout(data_dir, absolute_paths=False)
    new_dir = join(str(tmp_bids), 'bids')
    os.symlink(orig_dir, new_dir)
    return BIDSLayout(new_dir)
예제 #27
0
def run_deeid():

    args = get_parser().parse_args()
    subjects_to_analyze = []

    # special variable set in the container
    if os.getenv('IS_DOCKER'):
        exec_env = 'singularity'
        cgroup = Path('/proc/1/cgroup')
        if cgroup.exists() and 'docker' in cgroup.read_text():
            exec_env = 'docker'
    else:
        exec_env = 'local'

    if args.brainextraction is None:
        raise Exception(
            "For post defacing quality it is required to run a form of brainextraction"
            "on the non-deindentified data. Thus please either indicate bet "
            "(--brainextration bet) or nobrainer (--brainextraction nobrainer)."
        )

    if args.skip_bids_validation:
        print("Input data will not be checked for BIDS compliance.")
    else:
        print("Making sure the input data is BIDS compliant "
              "(warnings can be ignored in most cases).")
        validate_input_dir(exec_env, args.bids_dir, args.participant_label)

    layout = BIDSLayout(args.bids_dir)

    if args.analysis_level == "participant":
        if args.participant_label:
            subjects_to_analyze = args.participant_label
        else:
            print("No participant label indicated. Please do so.")
    else:
        subjects_to_analyze = layout.get(return_type='id', target='subject')

    list_part_prob = []
    for part in subjects_to_analyze:
        if part not in layout.get_subjects():
            list_part_prob.append(part)
    if len(list_part_prob) >= 1:
        raise Exception(
            "The participant(s) you indicated are not present in the BIDS dataset, please check again."
            "This refers to:")
        print(list_part_prob)

    sessions_to_analyze = layout.get(return_type='id', target='session')

    if not sessions_to_analyze:
        print('Processing data from one session.')
    else:
        print('Processing data from %s sessions:' %
              str(len(sessions_to_analyze)))
        print(sessions_to_analyze)

    list_check_meta = args.check_meta

    list_field_del = args.del_meta

    for subject_label in subjects_to_analyze:
        if not sessions_to_analyze:
            list_t1w = layout.get(subject=subject_label,
                                  extension='nii.gz',
                                  suffix='T1w',
                                  return_type='filename')
        else:
            list_t1w = layout.get(subject=subject_label,
                                  extension='nii.gz',
                                  suffix='T1w',
                                  return_type='filename',
                                  session=sessions_to_analyze)
        for T1_file in list_t1w:
            check_outpath(args.bids_dir, subject_label)
            if args.brainextraction == 'bet':
                if args.bet_frac is None:
                    raise Exception(
                        "If you want to use BET for pre-defacing brain extraction,"
                        "please provide a Frac value. For example: --bet_frac 0.5"
                    )
                else:
                    run_brain_extraction_bet(T1_file, args.bet_frac[0],
                                             subject_label, args.bids_dir)
            elif args.brainextraction == 'nobrainer':
                run_brain_extraction_nb(T1_file, subject_label, args.bids_dir)

            check_meta_data(args.bids_dir, subject_label, list_check_meta)
            source_t1w = copy_no_deid(args.bids_dir, subject_label, T1_file)

            if args.del_meta:
                del_meta_data(args.bids_dir, subject_label, list_field_del)
            if args.deid == "pydeface":
                run_pydeface(source_t1w, T1_file)
            elif args.deid == "mri_deface":
                run_mri_deface(source_t1w, T1_file)
            elif args.deid == "quickshear":
                run_quickshear(source_t1w, T1_file)
            elif args.deid == "mridefacer":
                run_mridefacer(source_t1w, T1_file)
            elif args.deid == "deepdefacer":
                run_deepdefacer(source_t1w, subject_label, args.bids_dir)

        if args.deface_t2w:
            if not sessions_to_analyze:
                list_t2w = layout.get(subject=subject_label,
                                      extension='nii.gz',
                                      suffix='T2w',
                                      return_type='filename')
            else:
                list_t2w = layout.get(subject=subject_label,
                                      extension='nii.gz',
                                      suffix='T2w',
                                      return_type='filename',
                                      session=sessions_to_analyze)
            if list_t2w == []:
                raise Exception(
                    "You indicated that a T2w image should be defaced as well."
                    "However, no T2w image exists for subject %s."
                    "Please check again." % subject_label)

            for T2_file in list_t2w:
                if args.brainextraction == 'bet':
                    run_brain_extraction_bet(T2_file, args.bet_frac[0],
                                             subject_label, args.bids_dir)
                elif args.brainextraction == 'nobrainer':
                    run_brain_extraction_nb(T2_file, subject_label,
                                            args.bids_dir)

                source_t2w = copy_no_deid(args.bids_dir, subject_label,
                                          T2_file)
                run_t2w_deface(source_t2w, T1_file, T2_file)

        rename_non_deid(args.bids_dir, subject_label)

        if not sessions_to_analyze and args.deface_t2w is False:
            create_graphics(args.bids_dir,
                            subject_label,
                            session=None,
                            t2w=None)
        elif sessions_to_analyze and args.deface_t2w is False:
            for session in sessions_to_analyze:
                create_graphics(args.bids_dir,
                                subject_label,
                                session=session,
                                t2w=None)
        elif not sessions_to_analyze and args.deface_t2w:
            create_graphics(args.bids_dir,
                            subject_label,
                            session=None,
                            t2w=True)
        elif sessions_to_analyze and args.deface_t2w:
            for session in sessions_to_analyze:
                create_graphics(args.bids_dir,
                                subject_label,
                                session=session,
                                t2w=True)

        if not sessions_to_analyze:
            clean_up_files(args.bids_dir, subject_label)
        else:
            for session in sessions_to_analyze:
                clean_up_files(args.bids_dir, subject_label, session=session)
예제 #28
0
파일: run.py 프로젝트: cmaumet/fitlins
def run_fitlins(argv=None):
    warnings.showwarning = _warn_redirect
    opts = get_parser().parse_args(argv)
    if opts.debug:
        logger.setLevel(logging.DEBUG)

    subject_list = None
    if opts.participant_label is not None:
        subject_list = bids.collect_participants(
            opts.bids_dir, participant_label=opts.participant_label)

    ncpus = opts.n_cpus
    if ncpus < 1:
        ncpus = cpu_count()

    plugin_settings = {
        'plugin': 'MultiProc',
        'plugin_args': {
            'n_procs': ncpus,
            'raise_insufficient': False,
            'maxtasksperchild': 1,
        }
    }

    # Build main workflow
    logger.log(25, INIT_MSG(
        version=__version__,
        subject_list=subject_list)
    )

    model = default_path(opts.model, opts.bids_dir, 'model.json')
    if opts.model in (None, 'default') and not op.exists(model):
        model = 'default'

    preproc_dir = default_path(opts.preproc_dir,
                               op.join(opts.bids_dir, 'derivatives'),
                               'fmriprep')
    if not op.exists(preproc_dir):
        preproc_dir = default_path(opts.preproc_dir, opts.output_dir, 'fmriprep')
        if not op.exists(preproc_dir):
            raise RuntimeError("Preprocessed data could not be found")

    pipeline_name = 'fitlins'
    if opts.derivative_label:
        pipeline_name += '_' + opts.derivative_label
    deriv_dir = op.join(opts.output_dir, pipeline_name)
    os.makedirs(deriv_dir, exist_ok=True)

    bids.write_derivative_description(opts.bids_dir, deriv_dir)

    work_dir = mkdtemp() if opts.work_dir is None else opts.work_dir

    fitlins_wf = init_fitlins_wf(
        opts.bids_dir, preproc_dir, deriv_dir, opts.space, model=model,
        participants=subject_list, base_dir=work_dir,
        include_pattern=opts.include, exclude_pattern=opts.exclude
        )

    retcode = 0
    try:
        fitlins_wf.run(**plugin_settings)
    except Exception:
        retcode = 1

    layout = BIDSLayout(opts.bids_dir)
    models = auto_model(layout) if model == 'default' else [model]

    run_context = {'version': __version__,
                   'command': ' '.join(sys.argv),
                   'timestamp': time.strftime('%Y-%m-%d %H:%M:%S %z'),
                   }

    for model in models:
        analysis = Analysis(layout, model=model)
        report_dicts = parse_directory(deriv_dir, work_dir, analysis)
        write_report('unknown', report_dicts, run_context, deriv_dir)

    return retcode
예제 #29
0
def bids_mode(input_dir: str, output_dir: str, suffix: str, exclude: str,
              include: str, flat: bool, orientation: str, scale: int,
              pad_crop: list):
    """
        Runs decompose on all images in a bids dataset
    """
    # make the output directory path
    os.makedirs(output_dir, exist_ok=True)

    # Create the layout object
    print('Loading BIDS Directory...')
    layout = BIDSLayout(input_dir)
    print(layout)

    # Get T1w images only
    files = layout.get(suffix=suffix, extension='nii.gz')

    # Get list of subjects to exclude if set
    exclude_list = []  # make exclude list
    if exclude:
        # read in exclude list
        with open(exclude, 'r') as f:
            for line in f:
                exclude_list.append(line.rstrip())

    # Get list of subjects to include if set
    include_list = []
    if include:
        # read in include list
        with open(include, 'r') as f:
            for line in f:
                include_list.append(line.rstrip())

    # loop over each file
    for f in files:
        print('Processing {}...'.format(f.filename))

        # get subject
        subject = f.entities['subject']

        # Check if subject in include list (only if not empty)
        if include_list:
            if not subject in include_list:
                print('sub-{} is not in include list. Skipping...'.format(
                    subject))
                continue

        # Check if subject in exclude list
        if subject in exclude_list:
            print('sub-{} is in exclude list. Skipping...'.format(subject))
            # skip if in exclude list
            continue

        # create output file prefix and directory
        if args.flat:  # places images directly in output dir
            name = os.path.join(output_dir, get_prefix(f.filename))
        else:  # creates subfolder
            name = os.path.join(output_dir, get_prefix(f.filename),
                                get_prefix(f.filename))
        file_dir = os.path.dirname(name)
        os.makedirs(file_dir, exist_ok=True)

        # decompose image
        decompose(f.get_image(), name, orientation, scale, pad_crop)
예제 #30
0
def collect_participants(bids_dir, participant_label=None, strict=False,
                         bids_validate=True):
    """
    List the participants under the BIDS root and checks that participants
    designated with the participant_label argument exist in that folder.
    Returns the list of participants to be finally processed.
    Requesting all subjects in a BIDS directory root:
    >>> collect_participants(str(datadir / 'ds114'), bids_validate=False)
    ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10']

    Requesting two subjects, given their IDs:
    >>> collect_participants(str(datadir / 'ds114'), participant_label=['02', '04'],
    ...                      bids_validate=False)
    ['02', '04']

    Requesting two subjects, given their IDs (works with 'sub-' prefixes):
    >>> collect_participants(str(datadir / 'ds114'), participant_label=['sub-02', 'sub-04'],
    ...                      bids_validate=False)
    ['02', '04']

    Requesting two subjects, but one does not exist:
    >>> collect_participants(str(datadir / 'ds114'), participant_label=['02', '14'],
    ...                      bids_validate=False)
    ['02']
    >>> collect_participants(
    ...     str(datadir / 'ds114'), participant_label=['02', '14'],
    ...     strict=True, bids_validate=False)  # doctest: +IGNORE_EXCEPTION_DETAIL
    Traceback (most recent call last):
    fmriprep.utils.bids.BIDSError:
    ...
    """

    if isinstance(bids_dir, BIDSLayout):
        layout = bids_dir
    else:
        layout = BIDSLayout(str(bids_dir), validate=bids_validate)

    all_participants = set(layout.get_subjects())

    # Error: bids_dir does not contain subjects
    if not all_participants:
        raise BIDSError(
            'Could not find participants. Please make sure the BIDS data '
            'structure is present and correct. Datasets can be validated online '
            'using the BIDS Validator (http://bids-standard.github.io/bids-validator/).\n'
            'If you are using Docker for Mac or Docker for Windows, you '
            'may need to adjust your "File sharing" preferences.', bids_dir)

    # No --participant-label was set, return all
    if not participant_label:
        return sorted(all_participants)

    if isinstance(participant_label, str):
        participant_label = [participant_label]

    # Drop sub- prefixes
    participant_label = [sub[4:] if sub.startswith('sub-') else sub for sub in participant_label]
    # Remove duplicates
    participant_label = sorted(set(participant_label))
    # Remove labels not found
    found_label = sorted(set(participant_label) & all_participants)
    if not found_label:
        raise BIDSError('Could not find participants [{}]'.format(
            ', '.join(participant_label)), bids_dir)

    # Warn if some IDs were not found
    notfound_label = sorted(set(participant_label) - all_participants)
    if notfound_label:
        exc = BIDSError('Some participants were not found: {}'.format(
            ', '.join(notfound_label)), bids_dir)
        if strict:
            raise exc
        warnings.warn(exc.msg, BIDSWarning)

    return found_label
def main(argv=sys.argv):
    parser = generate_parser()
    args = parser.parse_args()

    # Set environment variables for FSL dir based on CLI
    os.environ['FSL_DIR'] = args.fsl_dir
    os.environ['FSLDIR'] = args.fsl_dir
    # for this script's usage of FSL_DIR...
    fsl_dir = args.fsl_dir + '/bin'

    # Load the bids layout
    layout = BIDSLayout(args.bids_dir)
    subsess = read_bids_layout(layout,
                               subject_list=args.subject_list,
                               collect_on_subject=args.collect)

    for subject, sessions in subsess:

        # Check if fieldmaps are concatenated
        if layout.get(subject=subject,
                      session=sessions,
                      datatype='fmap',
                      extension='.nii.gz',
                      acquisition='func',
                      direction='both'):
            print(
                "Func fieldmaps are concatenated. Running seperate_concatenate_fm"
            )
            seperate_concatenated_fm(layout, subject, sessions, fsl_dir)
            # recreate layout with the additional SEFMS
            layout = BIDSLayout(args.bids_dir)

        fmap = layout.get(subject=subject,
                          session=sessions,
                          datatype='fmap',
                          extension='.nii.gz',
                          acquisition='func')
        # Check if there are func fieldmaps and return a list of each SEFM pos/neg pair
        if fmap:
            print("Running SEFM select")
            base_temp_dir = fmap[0].dirname
            bes_pos, best_neg = sefm_select(layout, subject, sessions,
                                            base_temp_dir, fsl_dir,
                                            args.mre_dir, args.debug)
            for sefm in [os.path.join(x.dirname, x.filename) for x in fmap]:
                sefm_json = sefm.replace('.nii.gz', '.json')
                sefm_metadata = layout.get_metadata(sefm)

                if 'Philips' in sefm_metadata['Manufacturer']:
                    insert_edit_json(sefm_json, 'EffectiveEchoSpacing',
                                     0.00062771)
                if 'GE' in sefm_metadata['Manufacturer']:
                    insert_edit_json(sefm_json, 'EffectiveEchoSpacing',
                                     0.000536)
                if 'Siemens' in sefm_metadata['Manufacturer']:
                    insert_edit_json(sefm_json, 'EffectiveEchoSpacing',
                                     0.000510012)

        # Check if there are dwi fieldmaps and insert IntendedFor field accordingly
        if layout.get(subject=subject,
                      session=sessions,
                      datatype='fmap',
                      extension='.nii.gz',
                      acquisition='dwi'):
            print("Editing DWI jsons")
            edit_dwi_jsons(layout, subject, sessions)

        # Additional edits to the anat json sidecar
        anat = layout.get(subject=subject,
                          session=sessions,
                          datatype='anat',
                          extension='.nii.gz')
        if anat:
            for TX in [os.path.join(x.dirname, x.filename) for x in anat]:
                TX_json = TX.replace('.nii.gz', '.json')
                TX_metadata = layout.get_metadata(TX)
                #if 'T1' in TX_metadata['SeriesDescription']:

                if 'Philips' in TX_metadata['Manufacturer']:
                    insert_edit_json(TX_json, 'DwellTime', 0.00062771)
                if 'GE' in TX_metadata['Manufacturer']:
                    insert_edit_json(TX_json, 'DwellTime', 0.000536)
                if 'Siemens' in TX_metadata['Manufacturer']:
                    insert_edit_json(TX_json, 'DwellTime', 0.000510012)

        # add EffectiveEchoSpacing if it doesn't already exist

        # PE direction vs axis
        func = layout.get(subject=subject,
                          session=sessions,
                          datatype='func',
                          extension='.nii.gz')
        if func:
            for task in [os.path.join(x.dirname, x.filename) for x in func]:
                task_json = task.replace('.nii.gz', '.json')
                task_metadata = layout.get_metadata(task)
                if 'Philips' in task_metadata['Manufacturer']:
                    insert_edit_json(task_json, 'EffectiveEchoSpacing',
                                     0.00062771)
                if 'GE' in task_metadata['Manufacturer']:
                    if 'DV26' in task_metadata['SoftwareVersions']:
                        insert_edit_json(task_json, 'EffectiveEchoSpacing',
                                         0.000556)
                if 'Siemens' in task_metadata['Manufacturer']:
                    insert_edit_json(task_json, 'EffectiveEchoSpacing',
                                     0.000510012)
                if "PhaseEncodingAxis" in task_metadata:
                    insert_edit_json(task_json, 'PhaseEncodingDirection',
                                     task_metadata['PhaseEncodingAxis'])
                elif "PhaseEncodingDirection" in task_metadata:
                    insert_edit_json(
                        task_json, 'PhaseEncodingAxis',
                        task_metadata['PhaseEncodingDirection'].strip('-'))
예제 #32
0
def init_nibetaseries_participant_wf(
    estimator,
    atlas_img,
    atlas_lut,
    bids_dir,
    derivatives_pipeline_dir,
    exclude_description_label,
    fir_delays,
    hrf_model,
    high_pass,
    output_dir,
    run_label,
    selected_confounds,
    session_label,
    smoothing_kernel,
    space_label,
    subject_list,
    task_label,
    description_label,
    work_dir,
):
    """
    This workflow organizes the execution of NiBetaSeries, with a sub-workflow for
    each subject.

    Parameters
    ----------

        atlas_img : str
            Path to input atlas nifti
        atlas_lut : str
            Path to input atlas lookup table (tsv)
        bids_dir : str
            Root directory of BIDS dataset
        derivatives_pipeline_dir : str
            Root directory of the derivatives pipeline
        exclude_description_label : str or None
            Exclude bold series containing this description label
        fir_delays : list or None
            FIR delays (in scans)
        hrf_model : str
            The model that represents the shape of the hemodynamic response function
        high_pass : float
            High pass filter to apply to bold (in Hertz).
            Reminder - frequencies _higher_ than this number are kept.
        output_dir : str
            Directory where derivatives are saved
        run_label : str or None
            Include bold series containing this run label
        selected_confounds : list
            List of confounds to be included in regression
        session_label : str or None
            Include bold series containing this session label
        smoothing_kernel : float or None
            The smoothing kernel to be applied to the bold series before beta estimation
        space_label : str or None
            Include bold series containing this space label
        subject_list : list
            List of subject labels
        task_label : str or None
            Include bold series containing this task label
        description_label : str or None
            Include bold series containing this description label
        work_dir : str
            Directory in which to store workflow execution state and temporary files
    """
    # setup workflow
    nibetaseries_participant_wf = pe.Workflow(
        name='nibetaseries_participant_wf')
    nibetaseries_participant_wf.base_dir = os.path.join(
        work_dir, 'NiBetaSeries_work')
    os.makedirs(nibetaseries_participant_wf.base_dir, exist_ok=True)

    # reading in derivatives and bids inputs as queryable database like objects
    layout = BIDSLayout(bids_dir, derivatives=derivatives_pipeline_dir)

    for subject_label in subject_list:

        # collect the necessary inputs for both collect data
        subject_data = collect_data(layout,
                                    subject_label,
                                    task=task_label,
                                    run=run_label,
                                    ses=session_label,
                                    space=space_label,
                                    description=description_label)
        # collect files to be associated with each preproc
        brainmask_list = [d['brainmask'] for d in subject_data]
        confound_tsv_list = [d['confounds'] for d in subject_data]
        events_tsv_list = [d['events'] for d in subject_data]
        preproc_img_list = [d['preproc'] for d in subject_data]
        bold_metadata_list = [d['metadata'] for d in subject_data]

        single_subject_wf = init_single_subject_wf(
            estimator=estimator,
            atlas_img=atlas_img,
            atlas_lut=atlas_lut,
            bold_metadata_list=bold_metadata_list,
            brainmask_list=brainmask_list,
            confound_tsv_list=confound_tsv_list,
            events_tsv_list=events_tsv_list,
            fir_delays=fir_delays,
            hrf_model=hrf_model,
            high_pass=high_pass,
            name='single_subject' + subject_label + '_wf',
            output_dir=output_dir,
            preproc_img_list=preproc_img_list,
            selected_confounds=selected_confounds,
            smoothing_kernel=smoothing_kernel,
        )

        single_subject_wf.config['execution']['crashdump_dir'] = (os.path.join(
            output_dir, "sub-" + subject_label, 'log'))

        for node in single_subject_wf._get_all_nodes():
            node.config = deepcopy(single_subject_wf.config)

        nibetaseries_participant_wf.add_nodes([single_subject_wf])

    return nibetaseries_participant_wf
예제 #33
0
from nltools.data import Brain_Data, Design_Matrix, Adjacency
from nltools.mask import expand_mask, roi_to_brain
from nltools.stats import zscore, fdr, one_sample_permutation
from nltools.file_reader import onsets_to_dm
from nltools.plotting import component_viewer
from scipy.stats import binom, ttest_1samp
from sklearn.metrics import pairwise_distances
from copy import deepcopy
import networkx as nx
from nilearn.plotting import plot_stat_map, view_img_on_surf
from bids import BIDSLayout, BIDSValidator
import nibabel as nib

base_dir = '/Users/lukechang/Dropbox/Dartbrains'
data_dir = os.path.join(base_dir, 'data', 'Localizer')
layout = BIDSLayout(data_dir, derivatives=True)

Now let's load an example participant's preprocessed functional data.

sub = 'S01'
fwhm=6

data = Brain_Data(layout.get(subject=sub, task='localizer', scope='derivatives', suffix='bold', extension='nii.gz', return_type='file')[0])
smoothed = data.smooth(fwhm=fwhm)

Next we need to pick an ROI. Pretty much any type of ROI will work. 

In this example, we will be using a whole brain parcellation based on similar patterns of coactivation across over 10,000 published studies available in neurosynth (see this paper for more [details](http://cosanlab.com/static/papers/delaVega_2016_JNeuro.pdf)). We will be using a parcellation of 50 different functionally similar ROIs.

mask = Brain_Data('https://neurovault.org/media/images/8423/k50_2mm.nii.gz')