def main():
    print('Running ' + sys.argv[1])
    if not os.path.isdir(MEM_DIR):
        os.mkdir(MEM_DIR)
    mem = Memory(base_dir=MEM_DIR)
    layout = BIDSLayout(BIDS_DIR)
    if num_runs > 1:
        func_files = [[
            layout.get(type='bold',
                       task=task,
                       run=i + 1,
                       subject=subj,
                       extensions='nii.gz')[0] for i in range(num_runs)
        ] for subj in SUBJECTS]
    else:
        func_files = [
            layout.get(type='bold',
                       task=task,
                       subject=subj,
                       extensions='nii.gz') for subj in SUBJECTS
        ]
    events = get_events(func_files)
    confounds = get_confounds(func_files)
    info = get_info(events, confounds)
    specify_model_results = specify_model(layout, func_files, info)
    level1design_results = lv1_design(mem, layout, func_files,
                                      specify_model_results)
    modelgen_results = feat_model(mem, level1design_results)
    mask_results = masking(mem, func_files)
    film_gls(mem, mask_results, modelgen_results)
示例#2
0
def test_get_fieldmap2():
    data_dir = join(dirname(__file__), 'data', '7t_trt')
    layout = BIDSLayout(data_dir)
    target = 'sub-03/ses-2/func/sub-03_ses-2_task-' \
             'rest_acq-fullbrain_run-2_bold.nii.gz'
    result = layout.get_fieldmap(join(data_dir, target))
    assert result["type"] == "phasediff"
    assert result["phasediff"].endswith('sub-03_ses-2_run-2_phasediff.nii.gz')
示例#3
0
    def get_TR(in_file):
        from bids.grabbids import BIDSLayout

        data_directory = '/home1/varunk/data/ABIDE1/RawDataBIDs'
        layout = BIDSLayout(data_directory)
        metadata = layout.get_metadata(path=in_file)
        TR = metadata['RepetitionTime']
        return TR
示例#4
0
def get_participants(nip):

    from bids.grabbids import BIDSLayout

    layout = BIDSLayout(nip.input_path)
    participants = layout.get_subjects()

    return list(set(participants) - set(nip.skipped_participants))
示例#5
0
def collection():
    layout_path = join(get_test_data_path(), 'ds005')
    layout = BIDSLayout(layout_path)
    collection = layout.get_collections('run',
                                        types=['events'],
                                        scan_length=480,
                                        merge=True,
                                        sampling_rate=10)
    return collection
示例#6
0
    def change_subject(self, ui_info):
        changed_project = ui_info.ui.context["object"].project_info

        print "BIDS directoy : %s" % changed_project.base_directory
        try:
            bids_layout = BIDSLayout(changed_project.base_directory)
            changed_project.subjects = []
            for subj in bids_layout.get_subjects():
                changed_project.subjects.append('sub-' + str(subj))
            # changed_project.subjects = ['sub-'+str(subj) for subj in bids_layout.get_subjects()]
            print "Subjects : %s" % changed_project.subjects

            print "Previous selected subject : %s" % changed_project.subject
            changed_project.configure_traits(view='subject_view')
            print "New selected subject : %s" % changed_project.subject
        except:
            error(
                message=
                "Invalid BIDS dataset. Please see documentation for more details.",
                title="BIDS error")

        self.inputs_checked = False

        changed_project.config_file = os.path.join(
            changed_project.base_directory, 'derivatives', '%s_%s_config.ini' %
            (changed_project.subject, changed_project.process_type))

        if os.path.isfile(
                changed_project.config_file
        ):  # If existing config file / connectome data, load subject project

            print "Existing config file for subject %s: %s" % (
                changed_project.config_file, changed_project.subject)

            changed_project.process_type = get_process_detail(
                changed_project, 'Global', 'process_type')
            changed_project.diffusion_imaging_model = get_process_detail(
                changed_project, 'Global', 'diffusion_imaging_model')

            self.pipeline = init_project(changed_project, False)
            if self.pipeline != None:
                update_last_processed(changed_project, self.pipeline)
                ui_info.ui.context["object"].project_info = changed_project
                ui_info.ui.context["object"].pipeline = self.pipeline
                print "Config for subject %s loaded !" % ui_info.ui.context[
                    "object"].project_info.subject
                self.project_loaded = True

        else:
            print "Not existing config file (%s) / connectome data for subject %s - Created new project" % (
                changed_project, changed_project.subject)
            self.pipeline = init_project(changed_project, True)
            if self.pipeline != None:
                # update_last_processed(new_project, self.pipeline) # Not required as the project is new, so no update should be done on processing status
                ui_info.ui.context["object"].project_info = changed_project
                ui_info.ui.context["object"].pipeline = self.pipeline
                self.project_loaded = True
示例#7
0
def reduce_sub_files(bids_dir, output_file, sub_file):
    df = pd.DataFrame([])
    layout = BIDSLayout(bids_dir)
    files = layout.get(extensions=sub_file)
    for file in [f.filename for f in files]:
        print(file)
        df_ = read_tsv(file)
        df = pd.concat((df, df_))

    to_tsv(df, os.path.join(bids_dir, output_file))
示例#8
0
def _get_subjects(root_input_folder):
    """
    build subject list form either input arguments (participant_label, participant_file) or
    (if participant_label and participant_file are not specified) input data in bids_input_folder,
    then remove subjects form list according to participant_exclusion_file (if any)
    """
    layout = BIDSLayout(root_input_folder)
    return [(os.path.abspath(
        os.path.join(root_input_folder, "sub-{}".format(subject))), subject)
            for subject in layout.get_subjects()]
def main(sourcedata, 
         derivatives,
         tmp_dir,
         subject=None):


    segment_layout = BIDSLayout(os.path.join(derivatives,
                                             'nighres',
                                             'segmentation_dilation'))

    segmentation = get_bids_file(segment_layout, 
                                 subject,
                                 'seg')

    boundary_dist = get_bids_file(segment_layout, 
                                 subject,
                                 'dist')

    max_labels = get_bids_file(segment_layout, 
                                 subject,
                                 'lbls')

    max_probas = get_bids_file(segment_layout, 
                               subject,
                               'mems')


    output_dir = os.path.join(derivatives,
                              'nighres',
                              'extract_brain_regions',
                              'sub-{}'.format(subject))

    extract_layout = BIDSLayout(os.path.join(derivatives,
                                             'nighres',
                                             'cortex_extraction'))
    # RIGHT
    cortex_right = nighres.brain.extract_brain_region(segmentation=segmentation,
                                               levelset_boundary=boundary_dist,
                                               maximum_membership=max_probas,
                                               maximum_label=max_labels,
                                               extracted_region='right_cerebrum',
                                               file_name='sub-{}'.format(subject),
                                               save_data=True,
                                               output_dir=output_dir)


    # LEFT
    cortex = nighres.brain.extract_brain_region(segmentation=segmentation,
                                       levelset_boundary=boundary_dist,
                                       maximum_membership=max_probas,
                                       maximum_label=max_labels,
                                       extracted_region='left_cerebrum',
                                       file_name='sub-{}'.format(subject),
                                       save_data=True,
                                       output_dir=output_dir)
示例#10
0
def synthetic(request):
    root = join(get_test_data_path(), 'synthetic')
    default_preproc = get_option('loop_preproc')
    if request.param == 'preproc':
        set_option('loop_preproc', True)
        layout = BIDSLayout((root, ['bids', 'derivatives']))
    else:
        set_option('loop_preproc', default_preproc)
        layout = BIDSLayout(root, exclude='derivatives')
    yield request.param, load_variables(layout, skip_empty=True)
    set_option('loop_preproc', default_preproc)
示例#11
0
def test_load_description(testlayout1):
    with pytest.raises(ValueError) as e:
        data_dir = join(get_test_data_path(), 'images')
        layout = BIDSLayout(data_dir)
        assert e.value.message.startswith("Mandatory 'dataset_description'")

    # Should not raise an error
    layout = BIDSLayout(data_dir, config='derivatives')
    assert hasattr(testlayout1, 'description')
    assert testlayout1.description['Name'] == '7t_trt'
    assert testlayout1.description['BIDSVersion'] == "1.0.0rc3"
示例#12
0
def collect_bids_data(dataset, participant_label=None, session=None, run=None,
                      queries=None, task=None, modalities=None):
    """Get files in dataset"""

    # Start a layout
    layout = BIDSLayout(dataset)

    # Set queries
    if queries is None:
        queries = deepcopy(DEFAULT_QUERIES)

    # Set modalities
    if modalities is None:
        modalities = deepcopy(DEFAULT_MODALITIES)

    if session:
        for mod in modalities:
            queries[mod]['session'] = [session]

    if run:
        for mod in modalities:
            queries[mod]['run'] = run

    if task:
        if isinstance(task, list) and len(task) == 1:
            task = task[0]
        queries['bold']['task'] = task

    # Set participants
    if participant_label is not None:
        if isinstance(participant_label, (bytes, str)):
            participant_label = [participant_label]

        participant_label = ['{}'.format(sub) for sub in participant_label]
        participant_label = [sub[4:] if sub.startswith('sub-') else sub
                             for sub in participant_label]
        participant_label = [sub[:-1] if sub.endswith('*') else (sub + '$')
                             for sub in participant_label]
        participant_label = [sub[1:] if sub.startswith('*') else ('^' + sub)
                             for sub in participant_label]

        # For some reason, outer subject ids are filtered out
        participant_label.insert(0, 'null')
        participant_label.append('null')
        for key in queries.keys():
            queries[key]['subject'] = 'sub-\\(' + '|'.join(participant_label) + '\\){1}'

    # Start querying
    imaging_data = {}
    for mod in modalities:
        imaging_data[mod] = [x.filename for x in layout.get(**queries[mod])]

    return imaging_data
示例#13
0
def collect_bids_data(dataset, participant_label=None, session=None, run=None,
                      queries=None, modalities=None):
    """Get files in dataset"""

    # Start a layout
    layout = BIDSLayout(dataset)

    # Find all sessions
    if session:
        session_list = [session]
    else:
        session_list = layout.unique('session')
        if session_list == []:
            session_list = [None]

    # Find all runs
    if run:
        run_list = [run]
    else:
        run_list = layout.unique('run')
        if run_list == []:
            run_list = [None]

    # Set modalities
    if modalities is None:
        modalities = deepcopy(DEFAULT_MODALITIES)

    # Set queries
    if queries is None:
        queries = deepcopy(DEFAULT_QUERIES)

    # Set participants
    if participant_label is not None:
        if not isinstance(participant_label, list):
            for key in queries.keys():
                queries[key]['subject'] = participant_label
        else:
            participant_label = ['{}'.format(sub) for sub in participant_label]
            participant_label = [sub[4:] if sub.startswith('sub-') else sub
                                 for sub in participant_label]

            # For some reason, outer subject ids are filtered out
            participant_label.insert(0, 'null')
            participant_label.append('null')
            for key in queries.keys():
                queries[key]['subject'] = 'sub-\\(' + '|'.join(participant_label) + '\\){1}'

    # Start querying
    imaging_data = {}
    for mod in modalities:
        imaging_data[mod] = [x.filename for x in layout.get(**queries[mod])]

    return imaging_data
    def _getMetadata(in_file, data_directory):
        from bids.grabbids import BIDSLayout
        import json

        # json_path = 'scripts/json/paths.json'
        #
        # with open(json_path, 'rt') as fp:
        #     task_info = json.load(fp)
        # data_directory = task_info["data_directory"]



        # import logging
        #
        # logger = logging.getLogger(__name__)
        # logger.setLevel(logging.DEBUG)
        #
        # # create a file handler
        # handler = logging.FileHandler('progress.log')
        #
        # # add the handlers to the logger
        # logger.addHandler(handler)




        interleaved = True
        index_dir = False
        # data_directory = '/mnt/project1/home1/varunk/data/ABIDE2RawDataBIDS'
        # data_directory = '/home1/shared/ABIDE_1/UM_1'
        layout = BIDSLayout(data_directory)
        metadata = layout.get_metadata(path=in_file)
        print(metadata)

        try: tr  = metadata['RepetitionTime']
        except KeyError:
            print('Key RepetitionTime not found in task-rest_bold.json so using a default of 2.0 ')
            tr = 2

        try: slice_order = metadata['SliceAcquisitionOrder']
        except KeyError:
            print('Key SliceAcquisitionOrder not found in task-rest_bold.json so using a default of interleaved ascending ')
            return tr, index_dir, interleaved


        if slice_order.split(' ')[0] == 'Sequential':
            interleaved =  False
        if slice_order.split(' ')[1] == 'Descending':
            index_dir = True

        return tr, index_dir, interleaved
def anon_acqtimes(dset_dir):
    """
    Anonymize acquisition datetimes for a dataset. Works for both longitudinal
    and cross-sectional studies. The time of day is preserved, but the first
    scan is set to January 1st, 1800. In a longitudinal study, each session is
    anonymized relative to the first session, so that time between sessions is
    preserved.

    Overwrites scan tsv files in dataset. Only run this *after* data collection
    is complete for the study, especially if it's longitudinal.

    Parameters
    ----------
    dset_dir : str
        Path to BIDS dataset to be anonymized.
    """
    bl_dt = parser.parse('1800-01-01')

    layout = BIDSLayout(dset_dir)
    subjects = layout.get_subjects()
    sessions = sorted(layout.get_sessions())

    for sub in subjects:
        if not sessions:
            scans_file = op.join(dset_dir,
                                 'sub-{0}/sub-{0}_scans.tsv'.format(sub))
            df = pd.read_csv(scans_file, sep='\t')
            first_scan = df['acq_time'].min()
            first_dt = parser.parse(first_scan.split('T')[0])
            diff = first_dt - bl_dt
            acq_times = df['acq_time'].apply(parser.parse)
            acq_times = (acq_times - diff).astype(str)
            df['acq_time'] = acq_times
            # df.to_csv(scans_file, sep='\t', index=False)
        else:
            # Separated from dataset sessions in case subject missed some
            sub_ses = sorted(layout.get_sessions(subject=sub))
            for i, ses in enumerate(sub_ses):
                scans_file = op.join(dset_dir,
                                     'sub-{0}/ses-{1}/sub-{0}_ses-{1}_scans.'
                                     'tsv'.format(sub, ses))
                df = pd.read_csv(scans_file, sep='\t')
                if i == 0:
                    # Anonymize in terms of first scan for subject.
                    first_scan = df['acq_time'].min()
                    first_dt = parser.parse(first_scan.split('T')[0])
                    diff = first_dt - bl_dt

                acq_times = df['acq_time'].apply(parser.parse)
                acq_times = (acq_times - diff).astype(str)
                df['acq_time'] = acq_times
示例#16
0
文件: bids.py 项目: fliem/pybixs
def prepare_bids_df(sourcedata_dir, subject_ids=None, session_ids=None):
    layout = BIDSLayout(sourcedata_dir)
    bids_df = layout.as_data_frame()
    bids_df.dropna(subset=["subject"], inplace=True)  # remove study level info
    nii_df = bids_df[bids_df.path.str.endswith(NII_EXT)]
    nii_df = nii_df.sort_values(
        by=["subject", "session", "modality", "type", "run"])

    # reduce to requested sessions
    if subject_ids:
        nii_df = nii_df[nii_df.subject.isin(subject_ids)]
    if session_ids:
        nii_df = nii_df[nii_df.session.isin(session_ids)]
    return nii_df
示例#17
0
文件: bids.py 项目: eort/nideconv
def _get_func_and_confounds(fmriprep_folder, sourcedata_folder):

    fmriprep_layout = BIDSLayout(fmriprep_folder)
    sourcedata_layout = BIDSLayout(sourcedata_folder)

    files = fmriprep_layout.get(extensions=['.nii', 'nii.gz'],
                                modality='func',
                                type='preproc')

    confounds = []
    metadata = []

    for f in files:
        kwargs = {}

        for key in ['subject', 'run', 'task', 'session']:
            if hasattr(f, key):
                kwargs[key] = getattr(f, key)

        c = fmriprep_layout.get(type='confounds', **kwargs)
        c = c[0]
        confounds.append(c)

        sourcedata_file = sourcedata_layout.get(modality='func',
                                                extensions='nii.gz',
                                                **kwargs)

        assert (len(sourcedata_file) == 1)
        md = sourcedata_layout.get_metadata(sourcedata_file[0].filename)
        metadata.append(md)

    return list(zip(files, confounds, metadata))
示例#18
0
def drp_seed_fc():
    import numpy as np
    from os import path
    #from labbookdb.report.tracking import treatment_group, append_external_identifiers
    from samri.plotting.overview import multiplot_matrix, multipage_plot
    from samri.utilities import bids_substitution_iterator
    from samri.analysis import fc
    from samri.utilities import N_PROCS

    N_PROCS = max(N_PROCS - 8, 2)

    from bids.grabbids import BIDSLayout
    from bids.grabbids import BIDSValidator
    import os

    base = '~/ni_data/ofM.dr/bids/preprocessing/generic/'
    base = os.path.abspath(os.path.expanduser(base))
    validate = BIDSValidator()
    for x in os.walk(base):
        print(x[0])
        print(validate.is_bids(x[0]))
    layout = BIDSLayout(base)
    df = layout.as_data_frame()
    df = df[df.type.isin(['cbv'])]
    print(df)

    #substitutions = bids_substitution_iterator(
    #	list(df['session'].unique()),
    #	all_subjects,
    #	["CogB",],
    #	"~/ni_data/ofM.dr/",
    #	"composite",
    #	acquisitions=["EPI",],
    #	check_file_format='~/ni_data/ofM.dr/preprocessing/{preprocessing_dir}/sub-{subject}/ses-{session}/func/sub-{subject}_ses-{session}_acq-{acquisition}_task-{task}_cbv.nii.gz')

    #substitutions = df.T.to_dict().values()[:2]
    substitutions = df.T.to_dict().values()
    print(substitutions)

    fc_results = fc.seed_based(
        substitutions,
        "~/ni_data/templates/roi/DSURQEc_drp.nii.gz",
        "/usr/share/mouse-brain-atlases/dsurqec_200micron_mask.nii",
        ts_file_template=
        '~/ni_data/ofM.dr/bids/preprocessing/generic/sub-{subject}/ses-{session}/func/sub-{subject}_ses-{session}_acq-{acquisition}_task-{task}_cbv.nii.gz',
        save_results=
        "~/ni_data/ofM.dr/bids/fc/DSURQEc_drp/sub-{subject}/ses-{session}/func/sub-{subject}_ses-{session}_acq-{acquisition}_task-{task}_cbv_zstat.nii.gz",
        n_procs=N_PROCS,
        cachedir='/mnt/data/joblib')
示例#19
0
    def _getMetadata(in_file):
        from bids.grabbids import BIDSLayout
        import logging

        logger = logging.getLogger(__name__)
        logger.setLevel(logging.DEBUG)

        # create a file handler
        handler = logging.FileHandler('progress.log')

        # add the handlers to the logger
        logger.addHandler(handler)

        interleaved = True
        index_dir = False
        data_directory = '/home1/varunk/data/ABIDE1/RawDataBIDs'
        layout = BIDSLayout(data_directory)
        metadata = layout.get_metadata(path=in_file)
        print(metadata)

        logger.info('Extracting Meta Data of file: %s', in_file)
        try:
            tr = metadata['RepetitionTime']
        except KeyError:
            print(
                'Key RepetitionTime not found in task-rest_bold.json so using a default of 2.0 '
            )
            tr = 2
            logger.error(
                'Key RepetitionTime not found in task-rest_bold.json for file %s so using a default of 2.0 ',
                in_file)

        try:
            slice_order = metadata['SliceAcquisitionOrder']
        except KeyError:
            print(
                'Key SliceAcquisitionOrder not found in task-rest_bold.json so using a default of interleaved ascending '
            )
            logger.error(
                'Key SliceAcquisitionOrder not found in task-rest_bold.json for file %s so using a default of interleaved ascending',
                in_file)
            return tr, index_dir, interleaved

        if slice_order.split(' ')[0] == 'Sequential':
            interleaved = False
        if slice_order.split(' ')[1] == 'Descending':
            index_dir = True

        return tr, index_dir, interleaved
示例#20
0
def get_preproc_data(dset, cfg, data_dir='../data'):
    """
    Get echo-sorted list of fMRIPrep-preprocessed files and echo times in ms.
    """
    keys = ['subject', 'run', 'task']
    data_dir = op.abspath(data_dir)
    dset_dir = op.join(data_dir, dset, cfg['version'], 'uncompressed')
    layout = BIDSLayout(dset_dir)

    kwargs = {k: cfg[k] for k in keys if k in cfg.keys()}

    echoes = sorted(layout.get_echoes())
    in_files = []
    echo_times = []
    for i, echo in enumerate(echoes):
        # Get echo time in ms
        orig_file = layout.get(modality='func',
                               type='bold',
                               extensions='nii.gz',
                               echo=echo,
                               **kwargs)
        if len(orig_file) != 1:
            raise Exception('{0} files found for echo {1} of {2}: '
                            '{3}'.format(len(orig_file), echo, dset, cfg))

        orig_file = orig_file[0].filename
        metadata = layout.get_metadata(orig_file)
        echo_time = metadata['EchoTime'] * 1000
        echo_times.append(np.round(echo_time, 3))  # be wary, but seems okay

        # Get preprocessed file associated with echo
        func_file = orig_file.replace(
            dset_dir, op.join(dset_dir, 'derivatives/fmriprep'))
        func_file = func_file.replace(
            'bold.nii.gz', 'bold_space-MNI152NLin2009cAsym_preproc.nii.gz')

        if not op.isfile(func_file):
            # print('File DNE: {0}'.format(func_file))
            pass
        in_files.append(func_file)

        if i == 0:
            mask_file = func_file.replace('_preproc.nii.gz',
                                          '_brainmask.nii.gz')

        if not op.isfile(mask_file):
            # print('File DNE: {0}'.format(mask_file))
            pass
    return in_files, echo_times, mask_file
示例#21
0
def test_read_from_files():

    mod_file = abspath(grabbids.__file__)
    path = join(dirname(mod_file), 'tests', 'data', 'ds005')

    path2 = join(dirname(abspath(grabbids.__file__)), 'tests', 'data', 'ds005')
    subs = ['02', '06', '08']
    template = 'sub-%s/func/sub-%s_task-mixedgamblestask_run-01_events.tsv'
    files = [join(path2, template % (s, s)) for s in subs]
    # Put them in a temporary directory
    tmp_dir = tempfile.mkdtemp()
    for f in files:
        shutil.copy2(f, tmp_dir)

    layout = BIDSLayout(path)
    # layout2 = BIDSLayout(tmp_dir)
    # layout = merge_layouts([layout, layout2])

    # Time-level variables
    collection = load_event_variables(layout, scan_length=480)
    col_keys = collection.columns.keys()
    assert set(col_keys) == {
        'RT', 'gain', 'respnum', 'PTval', 'loss', 'respcat', 'parametric gain',
        'trial_type'
    }

    # Subject-level variables
    collection = load_variables(layout, 'subject')
    col_keys = collection.columns.keys()
    assert set(col_keys) == {'sex', 'age'}
    shutil.rmtree(tmp_dir)
示例#22
0
文件: custom.py 项目: p3proc/p3
def format_reference(func, reference, bids_dir):
    import os
    import nibabel
    from p3.utility import get_basename
    from bids.grabbids import BIDSLayout

    # save to node folder (go up 2 directories bc of iterfield)
    cwd = os.path.dirname(os.path.dirname(os.getcwd()))

    # get filename to output
    formatted_reference = os.path.join(
        cwd, '{}_format4D.nii.gz'.format(get_basename(func)))

    # get dim 4 and TR of input image
    dim4 = nibabel.load(func).header.get_data_shape()[3]  # get the 4th dim
    TR = BIDSLayout(bids_dir).get_metadata(func)[
        'RepetitionTime']  # get the TR

    # make the reference image the same dims as the input
    print('Formatting reference image...')
    command = 'ImageMath 3 {} ReplicateImage {} {} {} 0'.format(
        formatted_reference, reference, dim4, TR)
    print(command)
    os.system(command)

    return (formatted_reference, dim4, TR)
示例#23
0
def analysis():
    layout_path = join(get_test_data_path(), 'ds005')
    layout = BIDSLayout(layout_path)
    json_file = join(layout_path, 'models', 'ds-005_type-test_model.json')
    analysis = Analysis(layout, json_file)
    analysis.setup(scan_length=480, subject=['01', '02'])
    return analysis
示例#24
0
def get_bids_df(bids_dir, scans_only=None, keep_defaced=False):

    if isinstance(bids_dir, Path):
        bids_dir = bids_dir.as_posix()
    layout = BIDSLayout(bids_dir)
    df_pybids = layout.as_data_frame()
    if not keep_defaced:
        df_pybids = df_pybids.query('~path.str.contains("defaced")')
    if scans_only:
        df_pybids = (df_pybids.loc[df_pybids.path.str.contains('nii.gz'), :].
                     query("~path.str.contains('.git')"))
        df_pybids['json_path'] = \
            (df_pybids.path.apply(
                lambda x: Path(''.join([*x.split('.')[:-2], '.json']))))

    return df_pybids
示例#25
0
    def init_getters(self):
        """Initializes the getters methods for input paths and feature readers."""

        from visualqc.features import functional_mri_features
        self.feature_extractor = functional_mri_features

        if 'BIDS' in self.in_dir_type.upper():
            from bids.grabbids import BIDSLayout
            self.bids_layout = BIDSLayout(self.in_dir)
            self.field_names, self.units = traverse_bids(
                self.bids_layout, **cfg.func_mri_BIDS_filters)

            # file name of each BOLD scan is the unique identifier, as it essentially contains all the key info.
            self.unit_by_id = {
                splitext(basename(fpath))[0]: realpath(fpath)
                for _, fpath in self.units
            }
            self.id_list = list(self.unit_by_id.keys())
        elif 'GENERIC' in self.in_dir_type.upper():
            if self.id_list is None or self.images_for_id is None:
                raise ValueError(
                    'id_list or images_for_id can not be None for generic in_dir'
                )
            self.unit_by_id = self.images_for_id.copy()
        else:
            raise NotImplementedError(
                'Only two formats are supported: BIDS and GENERIC with regex spec for filenames'
            )
示例#26
0
def analysis_force_auto_contrasts():
    layout_path = join(get_test_data_path(), 'ds005')
    layout = BIDSLayout(layout_path, exclude='derivatives/')
    json_file = join(layout_path, 'models', 'ds-005_type-test_model.json')
    analysis = Analysis(layout, json_file)
    analysis.setup(scan_length=480, subject=['01', '02'], auto_contrasts=True)
    return analysis
示例#27
0
def test_auto_model_analysis(model):

    layout_path = join(get_test_data_path(), 'ds005')
    layout = BIDSLayout(layout_path)

    # Test to make sure an analaysis can be setup from the generated model
    analysis = Analysis(layout, model)
    analysis.setup(scan_length=480)

    assert model['name'] == 'ds005_mixedgamblestask'

    # run level
    block = model['blocks'][0]
    assert block['name'] == 'run'
    assert block['level'] == 'run'
    assert block['transformations'][0]['name'] == 'factor'
    assert block['model']['HRF_variables'][0] == 'trial_type.parametric gain'
    assert block['contrasts'][0]['name'] == 'run_parametric gain'
    assert block['contrasts'][0]['weights'] == [1]

    # subject level
    block = model['blocks'][1]
    assert block['name'] == 'subject'
    assert block['level'] == 'subject'
    assert block['model']['variables'][0] == 'run_parametric gain'
    assert block['contrasts'][0]['name'] == 'subject_run_parametric gain'

    # dataset level
    block = model['blocks'][2]
    assert block['name'] == 'dataset'
    assert block['level'] == 'dataset'
    assert block['model']['variables'][0] == 'subject_run_parametric gain'
    assert block['contrasts'][0][
        'name'] == 'dataset_subject_run_parametric gain'
示例#28
0
def model():
    layout_path = join(get_test_data_path(), 'ds005')
    layout = BIDSLayout(layout_path)

    models = auto_model(layout, scan_length=480, one_vs_rest=True)

    return models[0]
示例#29
0
def get_niftis(subject_id, data_dir):
    # Remember that all the necesary imports need to be INSIDE the function for the Function Interface to work!
    from bids.grabbids import BIDSLayout

    layout = BIDSLayout(data_dir)

    bolds = [
        f.filename for f in layout.get(
            subject=subject_id, type='bold', extensions=['nii', 'nii.gz'])
    ]
    T1ws = [
        f.filename for f in layout.get(
            subject=subject_id, type='T1w', extensions=['nii', 'nii.gz'])
    ]

    return bolds, T1ws
示例#30
0
def main(sourcedata, derivatives, tmp_dir, subject=None):

    dura_layout = BIDSLayout(os.path.join(derivatives, 'masks'))
    dura_mask = get_bids_file(dura_layout, subject, 'dura')

    layout = BIDSLayout('/sourcedata')

    t1w = get_bids_file(layout, subject, 'UNI')
    t1map = get_bids_file(layout, subject, 'T1map')

    fmriprep_layout = BIDSLayout('/derivatives/fmriprep')
    brain_mask = get_bids_file(fmriprep_layout, subject, 'brainmask')

    brain_mask_dil = ndimage.binary_dilation(nb.load(brain_mask).get_data(),
                                             iterations=3)
    brain_mask = image.new_img_like(brain_mask, brain_mask_dil)

    brain_mask = image.resample_to_img(brain_mask, t1w)
    brain_mask.to_filename('/derivatives/mask_test.nii.gz')

    t1w_masked = image.math_img('brain_mask * t1w',
                                brain_mask=brain_mask,
                                t1w=t1w)

    t1map_masked = image.math_img('brain_mask * t1map',
                                  brain_mask=brain_mask,
                                  t1map=t1map)

    t1w_masked_fn = '/derivatives/fmriprep/sub-{subject}/anat/sub-{subject}_T1w_masked.nii.gz'.format(
        subject=subject)
    t1w_masked.to_filename(t1w_masked_fn)

    t1map_masked_fn = '/derivatives/fmriprep/sub-{subject}/anat/sub-{subject}_T1map_masked.nii.gz'.format(
        subject=subject)
    t1map_masked.to_filename(t1map_masked_fn)

    output_dir = os.path.join(derivatives, 'nighres', 'segmentation_dilation',
                              'sub-{}'.format(subject))

    brain.mgdm_segmentation(contrast_image1=t1w_masked_fn,
                            contrast_type1='Mp2rage7T',
                            contrast_image2=t1map_masked_fn,
                            contrast_type2='T1map7T',
                            contrast_image3=dura_mask,
                            contrast_type3='Filters',
                            save_data=True,
                            output_dir=output_dir)
示例#31
0
def drp_seed_fc():
	import numpy as np
	from os import path
	#from labbookdb.report.tracking import treatment_group, append_external_identifiers
	from samri.plotting.overview import multiplot_matrix, multipage_plot
	from samri.utilities import bids_substitution_iterator
	from samri.analysis import fc
	from samri.utilities import N_PROCS

	N_PROCS=max(N_PROCS-8, 2)

	from bids.grabbids import BIDSLayout
	from bids.grabbids import BIDSValidator
	import os

	base = '~/ni_data/ofM.dr/bids/preprocessing/generic/'
	base = os.path.abspath(os.path.expanduser(base))
	validate = BIDSValidator()
	for x in os.walk(base):
		print(x[0])
		print(validate.is_bids(x[0]))
	layout = BIDSLayout(base)
	df = layout.as_data_frame()
	df = df[df.type.isin(['cbv'])]
	print(df)

	#substitutions = bids_substitution_iterator(
	#	list(df['session'].unique()),
	#	all_subjects,
	#	["CogB",],
	#	"~/ni_data/ofM.dr/",
	#	"composite",
	#	acquisitions=["EPI",],
	#	check_file_format='~/ni_data/ofM.dr/preprocessing/{preprocessing_dir}/sub-{subject}/ses-{session}/func/sub-{subject}_ses-{session}_acq-{acquisition}_task-{task}_cbv.nii.gz')

	#substitutions = df.T.to_dict().values()[:2]
	substitutions = df.T.to_dict().values()
	print(substitutions)

	fc_results = fc.seed_based(substitutions, "~/ni_data/templates/roi/DSURQEc_drp.nii.gz", "/usr/share/mouse-brain-atlases/dsurqec_200micron_mask.nii",
		ts_file_template='~/ni_data/ofM.dr/bids/preprocessing/generic/sub-{subject}/ses-{session}/func/sub-{subject}_ses-{session}_acq-{acquisition}_task-{task}_cbv.nii.gz',
		save_results="~/ni_data/ofM.dr/bids/fc/DSURQEc_drp/sub-{subject}/ses-{session}/func/sub-{subject}_ses-{session}_acq-{acquisition}_task-{task}_cbv_zstat.nii.gz",
		n_procs=N_PROCS,
		cachedir='/mnt/data/joblib')
示例#32
0
def get_data_selection(workflow_base,
	match={},
	exclude={},
	measurements=[],
	exclude_measurements=[],
	count_runs=False,
	fail_suffix='_failed',
	):
	"""
	Return a `pandas.DaaFrame` object of the Bruker measurement directories located under a given base directory, and their respective scans, subjects, and tasks.

	Parameters
	----------
	workflow_base : str
		The path in which to query for Bruker measurement directories.
	match : dict
		A dictionary of matching criteria.
		The keys of this dictionary must be full BIDS key names (e.g. "task" or "acquisition"), and the values must be strings (e.g. "CogB") which, combined with the respective BIDS key, identify scans to be included (e.g. scans, the names of which containthe string "task-CogB" - delimited on either side by an underscore or the limit of the string).
	exclude : dict, optional
		A dictionary of exclusion criteria.
		The keys of this dictionary must be full BIDS key names (e.g. "task" or "acquisition"), and the values must be strings (e.g. "CogB") which, combined with the respective BIDS key, identify scans to be excluded(e.g. a scans, the names of which contain the string "task-CogB" - delimited on either side by an underscore or the limit of the string).
	measurements : list of str, optional
		A list of measurement directory names to be included exclusively (i.e. whitelist).
		If the list is empty, all directories (unless explicitly excluded via `exclude_measurements`) will be queried.
	exclude_measurements : list of str, optional
		A list of measurement directory names to be excluded from querying (i.e. a blacklist).

	Notes
	-----
	This data selector function is robust to `ScanProgram.scanProgram` files which have been truncated before the first detected match, but not to files truncated after at least one match.
	"""

	workflow_base = os.path.abspath(os.path.expanduser(workflow_base))

	if not measurements:
		measurements = os.listdir(workflow_base)
	measurement_path_list = [os.path.join(workflow_base,i) for i in measurements]

	selected_measurements=[]
	#create a dummy path for bidsgrabber to parse file names from
	bids_temppath = '/var/tmp/samri_bids_temppaths/'
	try:
		os.mkdir(bids_temppath)
	except FileExistsError:
		pass
	layout = BIDSLayout(bids_temppath)
	#populate a list of lists with acceptable subject names, sessions, and sub_dir's
	for sub_dir in measurement_path_list:
		if sub_dir not in exclude_measurements:
			run_counter = 0
			selected_measurement = {}
			try:
				state_file = open(os.path.join(workflow_base,sub_dir,"subject"), "r")
				read_variables=0 #count variables so that breaking takes place after both have been read
				while True:
					current_line = state_file.readline()
					if "##$SUBJECT_name_string=" in current_line:
						entry=re.sub("[<>\n]", "", state_file.readline())
						if not match_exclude_ss(entry, match, exclude, selected_measurement, 'subject'):
							break
						read_variables +=1 #count recorded variables
					if "##$SUBJECT_study_name=" in current_line:
						entry=re.sub("[<>\n]", "", state_file.readline())
						if not match_exclude_ss(entry, match, exclude, selected_measurement, 'session'):
							break
						read_variables +=1 #count recorded variables
					if read_variables == 2:
						selected_measurement['measurement'] = sub_dir
						scan_program_file = os.path.join(workflow_base,sub_dir,"ScanProgram.scanProgram")
						scan_dir_resolved = False
						try:
							with open(scan_program_file) as search:
								for line in search:
									line_considered = True
									measurement_copy = deepcopy(selected_measurement)
									if re.match(r'^[ \t]+<displayName>[a-zA-Z0-9-_]+? \(E\d+\)</displayName>[\r\n]+', line):
										if fail_suffix and re.match(r'^.+?{} \(E\d+\)</displayName>[\r\n]+'.format(fail_suffix), line):
											continue
										m = re.match(r'^[ \t]+<displayName>(?P<scan_type>.+?) \(E(?P<number>\d+)\)</displayName>[\r\n]+', line)
										number = m.groupdict()['number']
										scan_type = m.groupdict()['scan_type']
										bids_keys = layout.parse_file_entities('{}/{}'.format(bids_temppath,scan_type))
										for key in match:
											# Session and subject fields are not recorded in scan_type and were already checked at this point.
											if key in ['session', 'subject']:
												continue
											try:
												if bids_keys[key] not in match[key]:
													line_considered = False
													break
											except KeyError:
												line_considered = False
												break
										if line_considered:
											measurement_copy['scan_type'] = str(scan_type).strip(' ')
											measurement_copy['scan'] = str(int(number))
											measurement_copy['run'] = run_counter
											scan_type, measurement_copy= assign_modality(scan_type, measurement_copy)
											measurement_copy.update(bids_keys)
											run_counter += 1
											selected_measurements.append(measurement_copy)
											scan_dir_resolved = True
							if not scan_dir_resolved:
								raise IOError()
						except IOError:
							for sub_sub_dir in os.listdir(os.path.join(workflow_base,sub_dir)):
								measurement_copy = deepcopy(selected_measurement)
								acqp_file_path = os.path.join(workflow_base,sub_dir,sub_sub_dir,"acqp")
								scan_subdir_resolved = False
								try:
									with open(acqp_file_path,'r') as search:
										for line in search:
											line_considered = True
											if scan_subdir_resolved:
												break
											if re.match(r'^(?!/)<[a-zA-Z0-9-_]+?-[a-zA-Z0-9-_]+?>[\r\n]+', line):
												if fail_suffix and re.match(r'^.+?{}$'.format(fail_suffix), line):
													continue
												number = sub_sub_dir
												m = re.match(r'^(?!/)<(?P<scan_type>.+?)>[\r\n]+', line)
												scan_type = m.groupdict()['scan_type']
												bids_keys = layout.parse_file_entities('{}/{}'.format(bids_temppath,scan_type))
												for key in match:
													# Session and subject fields are not recorded in scan_type and were already checked at this point.
													if key in ['session', 'subject']:
														continue
													try:
														if bids_keys[key] not in match[key]:
															line_considered = False
															break
													except KeyError:
														line_considered = False
														break
												if line_considered:
													measurement_copy['scan_type'] = str(scan_type).strip(' ')
													measurement_copy['scan'] = str(int(number))
													measurement_copy['run'] = run_counter
													scan_type, measurement_copy= assign_modality(scan_type, measurement_copy)
													measurement_copy.update(bids_keys)
													run_counter += 1
													selected_measurements.append(measurement_copy)
													scan_subdir_resolved = True
										else:
											pass
								except IOError:
									pass
						break #prevent loop from going on forever
			except IOError:
				print('Could not open {}'.format(os.path.join(workflow_base,sub_dir,"subject")))
				pass
	data_selection = pd.DataFrame(selected_measurements)
	try:
		shutil.rmtree(bids_temppath)
	except PermissionError:
		pass
	return data_selection
示例#33
0
def bids_data_selection(base, structural_match, functional_match, subjects, sessions,
	verbose=False,
	joint_conditions=True,
	):
	validate = BIDSValidator()
	if verbose:
		for x in os.walk(base):
			print(x[0])
			if validate.is_bids(x[0]):
				print("Is not BIDS-formatted.")
			else:
				print("Detected!")
	layout = BIDSLayout(base)
	df = layout.as_data_frame()

	# Run is for some reason recorded as float
	df.loc[df['run'].notna(),'run'] = df.loc[df['run'].notnull(),'run'].apply(int).apply(str)
	#df['run'] = df['run'].astype(int)

	# drop event files
	df = df[df.type != 'events']

	# rm .json
	df = df.loc[df.path.str.contains('.nii')]

	# generate scan types for later
	df['scan_type'] = ""

	#print(df.path.str.startswith('task', beg=0,end=len('task')))
	beg = df.path.str.find('task-')
	end = df.path.str.find('.')
	#df.loc[df.modality == 'func', 'scan_type'] = 'acq-'+df['acq']+'_task-'+  df.path.str.partition('task-')[2].str.partition('.')[0]
	#df.loc[df.modality == 'anat', 'scan_type'] = 'acq-'+df['acq']+'_' + df['type']
	#TODO: fix task!=type
	if 'func' in df.columns:
		df.loc[df.modality == 'func', 'task'] = df.path.str.partition('task-')[2].str.partition('_')[0]
		df.loc[df.modality == 'func', 'scan_type'] = 'task-' + df['task'] + '_acq-'+ df['acq']
	if 'anat' in df.columns:
		df.loc[df.modality == 'anat', 'scan_type'] = 'acq-'+df['acq'] +'_' + df['type']

	#TODO: The following should be collapsed into one criterion category
	if functional_match or structural_match:
		res_df = pd.DataFrame()
		if functional_match:
			_df = deepcopy(df)
			try:
				if joint_conditions:
					for match in functional_match.keys():
						_df = _df.loc[_df[match].isin(functional_match[match])]
					res_df = res_df.append(_df)
				else:
					for match in structural_match.keys():
						_df = filter_data(_df, match, functional_match[match])
						res_df = res_df.append(_df)
			except:
				pass
		if structural_match:
			_df = deepcopy(df)
			try:
				if joint_conditions:
					for match in structural_match.keys():
						_df = _df.loc[_df[match].isin(structural_match[match])]
					res_df = res_df.append(_df)
				else:
					for match in structural_match.keys():
						_df = filter_data(_df, match, structural_match[match])
						res_df = res_df.append(_df)
			except:
				pass
		df = res_df

	if subjects:
		df = filter_data(df, 'subject', subjects)
	if sessions:
		df = filter_data(df, 'session', sessions)

	# Unclear in current BIDS specification, we refer to BOLD/CBV as modalities and func/anat as types
	df = df.rename(columns={'modality': 'type', 'type': 'modality'})

	return df
示例#34
0
def collect_data(dataset, participant_label, task=None):
    """
    Uses grabbids to retrieve the input data for a given participant

    >>> bids_root, _ = collect_data('ds054', '100185')
    >>> bids_root['fmap']  # doctest: +ELLIPSIS
    ['.../ds054/sub-100185/fmap/sub-100185_magnitude1.nii.gz', \
'.../ds054/sub-100185/fmap/sub-100185_magnitude2.nii.gz', \
'.../ds054/sub-100185/fmap/sub-100185_phasediff.nii.gz']

    >>> bids_root['bold']  # doctest: +ELLIPSIS
    ['.../ds054/sub-100185/func/sub-100185_task-machinegame_run-01_bold.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-02_bold.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-03_bold.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-04_bold.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-05_bold.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-06_bold.nii.gz']

    >>> bids_root['sbref']  # doctest: +ELLIPSIS
    ['.../ds054/sub-100185/func/sub-100185_task-machinegame_run-01_sbref.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-02_sbref.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-03_sbref.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-04_sbref.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-05_sbref.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-06_sbref.nii.gz']

    >>> bids_root['t1w']  # doctest: +ELLIPSIS
    ['.../ds054/sub-100185/anat/sub-100185_T1w.nii.gz']

    >>> bids_root['t2w']  # doctest: +ELLIPSIS
    []


    """
    layout = BIDSLayout(dataset, exclude='derivatives')
    queries = {
        'fmap': {'subject': participant_label, 'modality': 'fmap',
                 'extensions': ['nii', 'nii.gz']},
        'bold': {'subject': participant_label, 'modality': 'func', 'type': 'bold',
                 'extensions': ['nii', 'nii.gz']},
        'sbref': {'subject': participant_label, 'modality': 'func', 'type': 'sbref',
                  'extensions': ['nii', 'nii.gz']},
        'flair': {'subject': participant_label, 'modality': 'anat', 'type': 'FLAIR',
                  'extensions': ['nii', 'nii.gz']},
        't2w': {'subject': participant_label, 'modality': 'anat', 'type': 'T2w',
                'extensions': ['nii', 'nii.gz']},
        't1w': {'subject': participant_label, 'modality': 'anat', 'type': 'T1w',
                'extensions': ['nii', 'nii.gz']},
        'roi': {'subject': participant_label, 'modality': 'anat', 'type': 'roi',
                'extensions': ['nii', 'nii.gz']},
    }

    if task:
        queries['bold']['task'] = task

    subj_data = {modality: [x.filename for x in layout.get(**query)]
                 for modality, query in queries.items()}

    def _grp_echos(x):
        if '_echo-' not in x:
            return x
        echo = re.search("_echo-\\d*", x).group(0)
        return x.replace(echo, "_echo-?")

    if subj_data["bold"] is not []:
        bold_sess = subj_data["bold"]

        if any(['_echo-' in bold for bold in bold_sess]):
            ses_uids = [list(bold) for _, bold in groupby(bold_sess, key=_grp_echos)]
            ses_uids = [x[0] if len(x) == 1 else x for x in ses_uids]
        else:
            ses_uids = bold_sess

    subj_data.update({"bold": ses_uids})

    return subj_data, layout
示例#35
0
  {
    "function_name": None,
    "id": "quality_check.notes_QC",
    "name": "Notes"
  }
],
"metric_names": None,
"graph_type": None,
"staticURL": file_server,
"usePeerJS": False,
"logPainter": False,
"logContours": False,
"logPoints": True,
"qc_options": {"pass": 1, "fail": 1, "needs_edits": 0, "edited": 0, "assignTo": 0, "notes": 1, "confidence": 1}}

layout = BIDSLayout(bids_dir)

# Loop through files, adding them to the manifest
# add entity types to a set to be used to populate the
# autogenerated settings
manifest = []
entry_types = set()
for img in layout.get(extensions = ".nii.gz"):
    img_dict = {}
    img_dict["check_masks"] = [img.filename.replace(bids_dir,"")]
    entry_types.add(img.type)
    img_dict["entry_type"] = img.type
    img_dict["metrics"] = {}
    img_dict["name"] = os.path.split(img.filename)[1].split('.')[0]
    img_dict["subject"] = 'sub-' + img.subject
    img_dict["session"] = 'ses-' + img.session
示例#36
0
def collect_bids_data(dataset, subject, task=None, session=None, run=None):
    subject = str(subject)
    if subject.startswith('sub-'):
        subject = subject[4:]

    layout = BIDSLayout(dataset)

    if session:
        session_list = [session]
    else:
        session_list = layout.unique('session')
        if session_list == []:
            session_list = [None]

    if run:
        run_list = [run]
    else:
        run_list = layout.unique('run')
        if run_list == []:
            run_list = [None]

    queries = {
        'fmap': {'modality': 'fmap', 'ext': 'nii'},
        'epi': {'modality': 'func', 'type': 'bold', 'ext': 'nii'},
        'sbref': {'modality': 'func', 'type': 'sbref', 'ext': 'nii'},
        't1w': {'type': 'T1w', 'ext': 'nii'}
    }

    if task:
        queries['epi']['task'] = task

    #  Add a subject key pair to each query we make so that we only deal with
    #  files related to this workflows specific subject. Could be made opt...
    for key in queries.keys():
        queries[key]['subject'] = subject

    imaging_data = copy.deepcopy(INPUTS_SPEC)
    fieldmap_files = [x.filename for x in layout.get(**queries['fmap'])]
    imaging_data['fmap'] = fieldmap_files
    t1_files = [x.filename for x in layout.get(**queries['t1w'])]
    imaging_data['t1w'] = t1_files
    sbref_files = [x.filename for x in layout.get(**queries['sbref'])]
    imaging_data['sbref'] = sbref_files
    epi_files = [x.filename for x in layout.get(**queries['epi'])]
    imaging_data['func'] = epi_files

    '''
    loop_on = ['session', 'run', 'acquisition', 'task']
    get_kwargs = {}

    for key in loop_on:
        unique_list = layout.unique(key)
        if unique_list:
            get_kwargs[key] = unique_list

    query_kwargs = []
    for key in get_kwargs:
        query_kwargs.append([(key, x) for x in get_kwargs[key]])

    query_kwargs = itertools.product(*query_kwargs)

    for elem in query_kwargs:
        epi_files = [x.filename for x
                     in layout.get(**dict(dict(elem), **queries['epi']))]
        if epi_files:
            imaging_data['func'] += epi_files
    '''

    return imaging_data