Ejemplo n.º 1
0
def _get_func_and_confounds(fmriprep_folder, sourcedata_folder):

    from bids import BIDSLayout
    fmriprep_layout = BIDSLayout(fmriprep_folder)
    sourcedata_layout = BIDSLayout(sourcedata_folder)

    files = fmriprep_layout.get(extensions=['.nii', 'nii.gz'],
                                modality='func',
                                suffix='preproc')

    confounds = []
    metadata = []

    for f in files:
        kwargs = {}

        for key in ['subject', 'run', 'task', 'session']:
            if hasattr(f, key):
                kwargs[key] = getattr(f, key)

        c = fmriprep_layout.get(suffix='confounds', **kwargs)
        c = c[0]
        confounds.append(c)

        sourcedata_file = sourcedata_layout.get(modality='func',
                                                extensions='nii.gz',
                                                **kwargs)

        assert (len(sourcedata_file) == 1)
        md = sourcedata_layout.get_metadata(sourcedata_file[0].filename)
        metadata.append(md)

    return list(zip(files, confounds, metadata))
Ejemplo n.º 2
0
def main():
    parser = argparse.ArgumentParser(
        'Gets the task list BEFORE the events files are copied into the dataset'
    )
    parser.add_argument("--bidsdir",
                        help="Path to a curated BIDS directory",
                        required=True)

    args = parser.parse_args()
    bidsdir = args.bidsdir

    layout = BIDSLayout(bidsdir)
    task_list = layout.get_tasks()

    # move scenemem to end of list
    if 'scenemem' in task_list:
        task_list.append(task_list.pop(task_list.index('scenemem')))

    # remove unhandled tasks
    if 'rest' in task_list:
        task_list.remove('rest')
    if 'binder' in task_list:
        task_list.remove('binder')
    if 'verbgen' in task_list:
        task_list.remove('verbgen')

    sys.stdout.write(' '.join(task_list))
Ejemplo n.º 3
0
def copy_coord_from_BIDS_A_to_B(path_A, path_B):
    """
    simply copies all coordsystem.json and electrodes.tsv files from BIDS folder A to B
    :param path_A:
    :param path_B:
    :return: None
    """
    layout = BIDSLayout(settings.BIDS_path)
    subjects = layout.get_subjects()

    for patient_idx in range(len(subjects)):

        if patient_idx < 10:
            subject_id = str('00') + str(patient_idx)
        else:
            subject_id = str('0') + str(patient_idx)

        for lat in ['right', 'left']:
            path_A_subject = os.path.join(path_A + 'sub-' + subject_id +
                                          '/ses-' + lat)
            if os.path.exists(path_A_subject):
                path_coord_sys = path_A_subject + '/eeg/' + 'sub-' + subject_id + '_coordsystem.json'
                path_B_subject = os.path.join(path_B + 'sub-' + subject_id +
                                              '/ses-' + lat)
                path_to_paste = path_B_subject + '/eeg/' + 'sub-' + subject_id + '_coordsystem.json'
                copy(path_coord_sys, path_to_paste)

                path_coord_sys = path_A_subject + '/eeg/' + 'sub-' + subject_id + '_electrodes.tsv'
                path_B_subject = os.path.join(path_B + 'sub-' + subject_id +
                                              '/ses-' + lat)
                path_to_paste = path_B_subject + '/eeg/' + 'sub-' + subject_id + '_electrodes.tsv'
                copy(path_coord_sys, path_to_paste)
Ejemplo n.º 4
0
def intendedfor_nearest_fieldmap(bids_dir):
    """

    :param bids_dir: str
        BIDS root directory
    :return:
    """

    layout = BIDSLayout(
        bids_dir,
        absolute_paths=True,
        ignore=['sourcedata', 'work', 'derivatives', 'exclude'])

    for subj in layout.get_subjects():

        # Find all JSON sidecars in bold and fmap folders
        bold_json = layout.get(return_type='file',
                               extensions=['.json'],
                               subject=subj,
                               suffix='bold')
        fmap_json = layout.get(return_type='file',
                               extensions=['.json'],
                               subject=subj,
                               suffix='json')

        print(bold_json)
        print(fmap_json)
Ejemplo n.º 5
0
def collect_data(bids_dir, participant_label, queries, filters=None, bids_validate=True):
    """
    Uses pybids to retrieve the input data for a given participant
    """
    if isinstance(bids_dir, BIDSLayout):
        layout = bids_dir
    else:
        layout = BIDSLayout(str(bids_dir), validate=bids_validate)

    bids_filters = filters or {}
    for acq, entities in bids_filters.items():
        queries[acq].update(entities)

    subj_data = {
        dtype: sorted(
            layout.get(
                return_type="file",
                subject=participant_label,
                extension=["nii", "nii.gz"],
                **query
            )
        )
        for dtype, query in queries.items()
    }

    return subj_data, layout
Ejemplo n.º 6
0
    def init_getters(self):
        """Initializes the getters methods for input paths and feature readers."""

        from visualqc.features import functional_mri_features
        self.feature_extractor = functional_mri_features

        if 'BIDS' in self.in_dir_type.upper():
            from bids import BIDSLayout
            self.bids_layout = BIDSLayout(self.in_dir)
            self.units = func_mri_traverse_bids(self.bids_layout,
                                                **cfg.func_mri_BIDS_filters)

            # file name of each BOLD scan is the unique identifier,
            #   as it essentially contains all the key info.
            self.unit_by_id = {
                basename(sub_data['image']): sub_data
                for _, sub_data in self.units.items()
            }
            self.id_list = list(self.unit_by_id.keys())

        elif 'GENERIC' in self.in_dir_type.upper():
            if self.id_list is None or self.images_for_id is None:
                raise ValueError('id_list or images_for_id can not be None '
                                 'for generic in_dir')
            self.unit_by_id = self.images_for_id.copy()
        else:
            raise NotImplementedError('Only two formats are supported: BIDS and ' \
                                      'GENERIC with regex spec for filenames')
Ejemplo n.º 7
0
def test_collect_data(bids_dir, deriv_dir, sub_fmriprep, sub_metadata,
                      bold_file, preproc_file, sub_events, confounds_file,
                      brainmask_file, sub_rest_metadata, rest_file):

    with open(str(sub_metadata), "r") as sm:
        metadata = json.load(sm)

    expected_out = {
        'brainmask': str(brainmask_file),
        'confounds': str(confounds_file),
        'events': str(sub_events),
        'preproc': str(preproc_file),
        'metadata': metadata
    }
    layout = BIDSLayout(str(bids_dir), derivatives=str(deriv_dir))

    subject_label = '01'
    session = 'pre'
    task = 'waffles'
    run = '1'
    space = 'MNI152NLin2009cAsym'
    desc = 'preproc'
    subject_data = collect_data(layout,
                                subject_label,
                                ses=session,
                                task=task,
                                run=run,
                                space=space,
                                description=desc)[0]

    assert subject_data == expected_out
Ejemplo n.º 8
0
def collect_data(bids_dir, participant_label, task=None, bids_validate=True):
    """
    Uses pybids to retrieve the input data for a given participant

    """
    if isinstance(bids_dir, BIDSLayout):
        layout = bids_dir
    else:
        layout = BIDSLayout(str(bids_dir), validate=bids_validate)

    queries = {
        'fmap': {'datatype': 'fmap'},
        'sbref': {'datatype': 'func', 'suffix': 'sbref'},
        'flair': {'datatype': 'anat', 'suffix': 'FLAIR'},
        't2w': {'datatype': 'anat', 'suffix': 'T2w'},
        't1w': {'datatype': 'anat', 'suffix': 'T1w'},
        'roi': {'datatype': 'anat', 'suffix': 'roi'},
        'dwi': {'datatype': 'dwi', 'suffix': 'dwi'}
    }

    subj_data = {
        dtype: sorted(layout.get(return_type='file', subject=participant_label,
                                 extension=['nii', 'nii.gz'], **query))
        for dtype, query in queries.items()}

    return subj_data, layout
def grab_eventorder(path, subject):
    # pybids
    processed = BIDSLayout(path)
    # Grab all event files
    eventfiles = processed.get(subject=subject,
                               extension='.csv',
                               return_type='filename')
    # design matrices
    design = pd.DataFrame(columns=['CSplus_shock', 'CSplus', 'CSminus'])
    for event in eventfiles:
        ev = pd.read_csv(event,
                         header=None,
                         names=['CSplus_shock', 'CSplus', 'CSminus'])
        design = design.append(ev, sort=False, ignore_index=True)
    # events are marked by 1, append their index to preserve order across runs
    CSplus_shock = design.index[design['CSplus_shock'] == 1]
    CSplus = design.index[design['CSplus'] == 1]
    CSminus = design.index[design['CSminus'] == 1]
    # group each index by CS type
    ev_order = []
    for ty in [CSplus_shock, CSplus, CSminus]:
        for cs in ty:
            if ty is CSplus_shock:
                ev_order.append(['csplusshock', cs])
            if ty is CSplus:
                ev_order.append(['csplus', cs])
            if ty is CSminus:
                ev_order.append(['csminus', cs])
    # reorder according to index
    event_ord = pd.DataFrame(ev_order, columns=['type', 'idx'])
    event_ord.sort_values(by=['idx'], inplace=True)
    event_ord.reset_index(inplace=True, drop=True)
    return event_ord, processed
Ejemplo n.º 10
0
def read_layout_from_dataset(data_path):
    try : 
        layout = BIDSLayout(data_path)
    except ValueError as error : 
        return error 

    return layout
Ejemplo n.º 11
0
def main(num_tracts, participant_label, bids_dir, dmriprep_dir, template_file,
         atlas_file, output_dir):
    """Console script for tractify."""

    layout = BIDSLayout(bids_dir, validate=False)
    subject_list = utils.collect_participants(
        layout, participant_label=participant_label)

    work_dir = os.path.join(output_dir, "scratch")

    # Set parameters based on CLI, pass through object
    parameters = Parameters(layout=layout,
                            subject_list=subject_list,
                            bids_dir=bids_dir,
                            dmriprep_dir=dmriprep_dir,
                            work_dir=work_dir,
                            output_dir=output_dir,
                            template_file=template_file,
                            atlas_file=atlas_file,
                            num_tracts=num_tracts)

    wf = init_tractify_wf(parameters)
    wf.write_graph(graph2use="colored")
    wf.config["execution"]["remove_unnecessary_outputs"] = False
    wf.config["execution"]["keep_inputs"] = True
    wf.run()

    return 0
Ejemplo n.º 12
0
    def reload(self):
        self.layout = BIDSLayout(self.root, derivatives=True)
        self.tr = self.layout.get_tr()
        with open(self.layout._get_unique(scope=self.name, suffix="pipeline").path) as file:
            pipeline = json.load(file)
        sys.path.append(os.path.dirname(self.layout._get_unique(
            scope=self.name, suffix="pipeline").path))
        self.masks = dict()
        for mask, mask_path in pipeline["Masks"].items():
            if not os.path.isabs(mask_path):
                mask_path = join(self.root, "derivatives",
                                 self.name, mask_path)
            self.masks[mask] = Brain_Data(mask_path)

        # Set up the process dictionary

        self.processes = dict()
        for process in pipeline["Processes"]:
            if not os.path.isabs(process["Source"]):
                process["Source"] = join(self.root, "derivatives",
                                         self.name, process["Source"])
            head, tail = os.path.split(os.path.abspath(process["Source"]))
            if tail.endswith(".py"):
                tail = tail[:-3]
            else:
                raise TypeError(f"{tail} is not a Python script.")
            sys.path.append(head)
            self.processes[process["Name"]] = Process(
                key=process["Readable"], process=getattr(__import__(tail), process["Name"]))
            sys.path.remove(head)
Ejemplo n.º 13
0
def read_all_vhdr_filenames(BIDS_path):
    """
    :return: files: list of all vhdr file paths in BIDS_path
    """
    layout = BIDSLayout(BIDS_path)
    files = layout.get(extension='vhdr', return_type='filename')
    return files
Ejemplo n.º 14
0
def bids_db_file(
    bids_dir,
    deriv_dir,
    sub_fmriprep,
    sub_metadata,
    bold_file,
    preproc_file,
    sub_events,
    confounds_file,
    brainmask_file,
    atlas_file,
    atlas_lut,
):
    from bids import BIDSLayout
    from .workflows.utils import BIDSLayoutIndexerPatch

    db_file = bids_dir / ".dbcache"

    layout = BIDSLayout(str(bids_dir),
                        derivatives=str(deriv_dir),
                        index_metadata=False,
                        database_file=str(db_file),
                        reset_database=True)

    # only index bold file metadata
    indexer = BIDSLayoutIndexerPatch(layout)
    metadata_filter = {
        'extension': ['nii', 'nii.gz', 'json'],
        'suffix': 'bold',
    }
    indexer.index_metadata(**metadata_filter)

    return db_file
Ejemplo n.º 15
0
def main(sourcedata,
         derivatives,
         subject,
         session,
         run,
         wf_dir):

    layout = BIDSLayout(sourcedata)

    bolds = layout.get(subject=subject,
                       session=session,
                       run=run,
                       suffix='bold',
                       return_type='file')

    
    bold = bolds 
    for bold in bolds:
        print('Making reference image of {}'.format(bold))

    inputnode = pe.Node(niu.IdentityInterface(fields=['bold']),
                        name='inputnode')
    inputnode.inputs.bold = bolds

    wf = pe.Workflow(name='make_ref_{}_{}_{}'.format(subject,
                                                     session,
                                                     run))

    wf.base_dir = wf_dir

    mc_wf_bold = create_motion_correction_workflow(name='mc_wf_bold',
                                                   method='FSL',
                                                   lightweight=True)

                                              

    wf.connect(inputnode, 'bold', mc_wf_bold, 'inputspec.in_files')
    wf.connect(inputnode, ('bold', pickfirst), mc_wf_bold, 'inputspec.which_file_is_EPI_space')

    mean_bold = pe.MapNode(fsl.MeanImage(dimension='T'), 
                                 iterfield=['in_file'],
                                 name='mean_bold1')

    n4_correct = pe.MapNode(ants.N4BiasFieldCorrection(), 
                            iterfield=['input_image'],
                            name='n4_correct')
    wf.connect(mean_bold, 'out_file', n4_correct, 'input_image')
    
    ds = pe.MapNode(DerivativesDataSink(out_path_base='simple_bold_ref',
                                        suffix='reference',
                                        base_directory=derivatives),
                                iterfield=['in_file', 'source_file'],
                                name='ds_reg_report')
    
    wf.connect(mc_wf_bold, 'outputspec.motion_corrected_files', mean_bold, 'in_file')
    wf.connect(n4_correct, 'output_image', ds, 'in_file')
    wf.connect(inputnode, 'bold', ds, 'source_file')
    

    wf.run()
Ejemplo n.º 16
0
def collect_data(bids_dir,
                 participant_label,
                 filters=None,
                 bids_validate=True):
    """
    Uses pybids to retrieve the input data for a given participant

    """
    if isinstance(bids_dir, BIDSLayout):
        layout = bids_dir
    else:
        layout = BIDSLayout(str(bids_dir), validate=bids_validate)

    queries = {
        'fmap': {
            'datatype': 'fmap'
        },
        'sbref': {
            'datatype': 'func',
            'suffix': 'sbref'
        },
        'flair': {
            'datatype': 'anat',
            'suffix': 'FLAIR'
        },
        't2w': {
            'datatype': 'anat',
            'suffix': 'T2w'
        },
        't1w': {
            'datatype': 'anat',
            'suffix': 'T1w'
        },
        'roi': {
            'datatype': 'anat',
            'suffix': 'roi'
        },
        'dwi': {
            'datatype': 'dwi',
            'suffix': 'dwi'
        }
    }
    bids_filters = filters or {}
    for acq, entities in bids_filters.items():
        queries[acq].update(entities)

    subj_data = {
        dtype: sorted(
            layout.get(
                return_type="file",
                subject=participant_label,
                extension=["nii", "nii.gz"],
                **query,
            ))
        for dtype, query in queries.items()
    }

    return subj_data, layout
Ejemplo n.º 17
0
def test_dcm2bids():
    # tmpBase = os.path.join(TEST_DATA_DIR, "tmp")
    # bidsDir = TemporaryDirectory(dir=tmpBase)
    bidsDir = TemporaryDirectory()

    tmpSubDir = os.path.join(bidsDir.name, DEFAULT.tmpDirName, "sub-01")
    shutil.copytree(os.path.join(TEST_DATA_DIR, "sidecars"), tmpSubDir)

    app = Dcm2bids(
        [TEST_DATA_DIR],
        "01",
        os.path.join(TEST_DATA_DIR, "config_test.json"),
        bidsDir.name,
    )
    app.run()
    layout = BIDSLayout(bidsDir.name, validate=False)

    assert layout.get_subjects() == ["01"]
    assert layout.get_sessions() == []
    assert layout.get_tasks() == ["rest"]
    assert layout.get_runs() == [1, 2, 3]

    app = Dcm2bids(
        [TEST_DATA_DIR],
        "01",
        os.path.join(TEST_DATA_DIR, "config_test.json"),
        bidsDir.name,
    )
    app.run()

    fmapFile = os.path.join(bidsDir.name, "sub-01", "fmap",
                            "sub-01_echo-492_fmap.json")
    data = load_json(fmapFile)
    fmapMtime = os.stat(fmapFile).st_mtime
    assert data["IntendedFor"] == "dwi/sub-01_dwi.nii.gz"

    data = load_json(
        os.path.join(bidsDir.name, "sub-01", "localizer",
                     "sub-01_run-01_localizer.json"))
    assert data["ProcedureStepDescription"] == "Modify by dcm2bids"

    # rerun
    shutil.rmtree(tmpSubDir)
    shutil.copytree(os.path.join(TEST_DATA_DIR, "sidecars"), tmpSubDir)

    app = Dcm2bids(
        [TEST_DATA_DIR],
        "01",
        os.path.join(TEST_DATA_DIR, "config_test.json"),
        bidsDir.name,
    )
    app.run()

    fmapMtimeRerun = os.stat(fmapFile).st_mtime
    assert fmapMtime == fmapMtimeRerun

    if os.name != 'nt':
        bidsDir.cleanup()
Ejemplo n.º 18
0
    def read_BIDS_coordinates(BIDS_path):
        """from BIDS_path np array coordinate arrays are read and returned in list respective to subjects
        
        Args:
            BIDS_path (string): absolute BIDS path
        
        Returns:
            coord_arr (np array): array with shape (len(subjects), 4), where indexes in the following order: left ecog, left stn, right ecog, right stn,
            coord_arr_names (np array): array with shape  (len(subjects), 2), where coord names are saved in order: left, right
        """

        layout = BIDSLayout(BIDS_path)
        subjects = layout.get_subjects()
        sessions = layout.get_sessions()
        coord_arr = np.empty(
            (len(subjects), 4),
            dtype=object)  # left ecog, left stn, right ecog, right stn
        coord_arr_names = np.empty((len(subjects), 2), dtype=object)

        for subject_idx, subject in enumerate(subjects):
            for sess in sessions:

                coord_path = os.path.join(BIDS_path, 'sub-' + subject,
                                          'ses-' + sess, 'eeg',
                                          'sub-' + subject + '_electrodes.tsv')

                print(coord_path)
                if os.path.exists(coord_path) is False:
                    continue
                df = pd.read_csv(coord_path, sep="\t")

                if sess == 'left':
                    if np.array(df['name'].str.contains("ECOG")).any():
                        coord_arr[subject_idx, 0] = np.ndarray.astype(
                            np.array(df[df['name'].str.contains("ECOG")])[:,
                                                                          1:4],
                            float
                        )  # [1:4] due to bipolar referencing (first electrode missing)
                    if np.array(df['name'].str.contains("STN")).any():
                        coord_arr[subject_idx, 1] = np.ndarray.astype(
                            np.array(df[df['name'].str.contains("STN")])[:,
                                                                         1:4],
                            float)
                    coord_arr_names[subject_idx, 0] = list(df['name'])
                elif sess == 'right':
                    if np.array(df['name'].str.contains("ECOG")).any():
                        coord_arr[subject_idx, 2] = np.ndarray.astype(
                            np.array(df[df['name'].str.contains("ECOG")])[:,
                                                                          1:4],
                            float)
                    if np.array(df['name'].str.contains("STN")).any():
                        coord_arr[subject_idx, 3] = np.ndarray.astype(
                            np.array(df[df['name'].str.contains("STN")])[:,
                                                                         1:4],
                            float)
                    coord_arr_names[subject_idx, 1] = list(df['name'])

        return coord_arr, coord_arr_names
Ejemplo n.º 19
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_outputs_exist(parser, args, args.out_json)

    data = []
    layout = BIDSLayout(args.in_bids, index_metadata=False)
    subjects = layout.get_subjects()

    if args.participants_label:
        subjects = [
            nSub for nSub in args.participants_label if nSub in subjects
        ]

    for nSub in subjects:
        dwis = layout.get(subject=nSub,
                          datatype='dwi',
                          extension='nii.gz',
                          suffix='dwi')
        t1s = layout.get(subject=nSub,
                         datatype='anat',
                         extension='nii.gz',
                         suffix='T1w')
        fmaps = layout.get(subject=nSub,
                           datatype='fmap',
                           extension='nii.gz',
                           suffix='epi')
        bvals = layout.get(subject=nSub,
                           datatype='dwi',
                           extension='bval',
                           suffix='dwi')
        bvecs = layout.get(subject=nSub,
                           datatype='dwi',
                           extension='bvec',
                           suffix='dwi')

        # Get associations relatives to DWIs
        associations = get_dwi_associations(fmaps, bvals, bvecs)

        # Get the data for each run of DWIs
        for dwi in dwis:
            data.append(
                get_data(nSub, dwi, t1s, associations, args.readout,
                         args.clean))

    if args.clean:
        data = [d for d in data if d]

    with open(args.out_json, 'w') as outfile:
        json.dump(data,
                  outfile,
                  indent=4,
                  separators=(',', ': '),
                  sort_keys=True)
        # Add trailing newline for POSIX compatibility
        outfile.write('\n')
Ejemplo n.º 20
0
def get_food_temptation_data(dataset_path: str = DATASET_DIR) -> BIDSLayout:
    """
    This project is hardcoded to work with this specific OpenNeuro dataset
    """
    print("Using dataset path: \"{}\"".format(dataset_path))
    layout = BIDSLayout(dataset_path)

    print(layout)
    return layout
Ejemplo n.º 21
0
def reduce_sub_files(bids_dir, output_file, sub_file):
    df = pd.DataFrame([])
    layout = BIDSLayout(bids_dir)
    files = layout.get(extension=sub_file)
    for file in [f.filename for f in files]:
        print(file)
        df_ = read_tsv(file)
        df = pd.concat((df, df_))

    to_tsv(df, os.path.join(bids_dir, output_file))
Ejemplo n.º 22
0
    def get_metadata(self, dataset, content):
        derivative_exist = exists(opj(self.ds.path, 'derivatives'))
        bids = BIDSLayout(self.ds.path, derivatives=derivative_exist)

        dsmeta = self._get_dsmeta(bids)

        if not content:
            return dsmeta, []

        return dsmeta, self._get_cnmeta(bids)
Ejemplo n.º 23
0
def rex_bids_csv(folder_path, to_save, ftype):
    """[CSV generation for BIDS datasets]
    [This function is used to generate a csv for BIDS datasets]
    Arguments:
        folder_path {[string]} -- [Takes the folder to see where to look for
                                   the different modaliies]
        to_save {[string]} -- [Takes the folder as a string to save the csv]
        ftype {[string]} -- [Are you trying to save train, validation or test,
                             if file type is set to test, it does not look for
                             ground truths]
    """
    if ftype == "test":
        csv_file = open(os.path.join(to_save, ftype + ".csv"), "w+")
        csv_file.write("ID,")
    else:
        csv_file = open(os.path.join(to_save, ftype + ".csv"), "w+")
        csv_file.write("ID,gt_path,")
    # load BIDS dataset into memory
    layout = BIDSLayout(folder_path)
    bids_df = layout.to_df()
    bids_modality_df = {
        "t1": bids_df[bids_df["suffix"] == "T1w"],
        "t2": bids_df[bids_df["suffix"] == "T2w"],
        "flair": bids_df[bids_df["suffix"] == "FLAIR"],
        "t1ce": bids_df[bids_df["suffix"] == "T1CE"],
    }
    # check what modalities the dataset contains
    modalities = []
    for modality, df in bids_modality_df.items():
        if not df.empty:
            modalities.append(modality)
    # write headers for those modalities
    for modality in modalities[:-1]:
        csv_file.write(modality + "_path,")
    modality = modalities[-1]
    csv_file.write(modality + "_path\n")
    # write image paths for each subject
    for sub in layout.get_subjects():
        csv_file.write(sub)
        csv_file.write(",")
        if ftype != "test":
            ground_truth = glob.glob(
                os.path.join(folder_path, sub, "*mask.nii.gz"))[0]
            csv_file.write(ground_truth)
            csv_file.write(",")
        for modality in modalities[:-1]:
            img = bids_modality_df[modality][bids_df["subject"] ==
                                             sub].path.values
            csv_file.write(img[0])
            csv_file.write(",")
        modality = modalities[-1]
        img = bids_modality_df[modality][bids_df["subject"] == sub].path.values
        csv_file.write(img[0])
        csv_file.write("\n")
    csv_file.close()
Ejemplo n.º 24
0
    def _resolve_bids(self, fileobj):
        layout = BIDSLayout(fileobj.path, absolute_paths=True, validate=False)

        basemetadata = dict()
        if hasattr(fileobj, "metadata") and isinstance(fileobj.metadata, dict):
            basemetadata = fileobj.metadata

        resolved_files = []

        for filepath, obj in layout.get_files().items():

            entitydict = obj.get_entities()

            tags = dict()
            for k, v in entitydict.items():
                entity = entity_shortnames[k] if k in entity_shortnames else k
                if entity in entities:
                    tags[entity] = str(v)

            filedict = {
                "datatype": entitydict.get("datatype"),
                "suffix": entitydict.get("suffix"),
                "extension": entitydict.get("extension"),
                "path": filepath,
                "tags": tags,
                "metadata": {
                    **basemetadata,
                    **obj.get_metadata()
                }
            }

            if filedict["extension"] is not None:
                extension = filedict["extension"]
                if not extension.startswith("."):
                    filedict["extension"] = f".{extension}"

            if filedict["datatype"] is None:
                continue  # catch README and dataset_description.json etc

            # TODO IntendedFor

            try:
                resolved_fileobj = file_schema.load(filedict, unknown=EXCLUDE)

                self.fileobj_by_filepaths[filepath] = resolved_fileobj
                self.specfileobj_by_filepaths[resolved_fileobj.path] = fileobj

                resolved_files.append(resolved_fileobj)
            except marshmallow.exceptions.ValidationError as e:
                logging.getLogger("halfpipe.ui").warning(
                    f'Ignored validation error for "{filepath}": %s',
                    e,
                    stack_info=False)

        return resolved_files
Ejemplo n.º 25
0
def participant_workflow(args):
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
    inbidslayout = BIDSLayout(args.input_dataset,
                              validate=not args.skip_validation)
    outbidslayout = utils.get_BIDSLayout_with_conf(args.output_folder,
                                                   validate=False)

    wf = pe.Workflow(name='participant')
    if not args.debug_io:
        qformfiles = ['model', 'model_brain_mask', 'atlas']
        if args.subcortical:
            qformfiles.extend([
                'subcortical_model', 'subcortical_model_brain_mask',
                'subcortical_atlas'
            ])
        if args.intracranial_volume:
            qformfiles.append('intracranial_mask')
        qformwf = forceqform_workflow(qformfiles, args.max_shear_angle)
        for qformfile in qformfiles:
            setattr(qformwf.inputs.inputspec, qformfile,
                    getattr(args, qformfile))
        t1inputspec = qformfiles + ['model_brain']
        maskmodel = pe.Node(fsl.ImageMaths(), 'maskmodel')
        wf.connect([(qformwf, maskmodel, [('outputspec.model', 'in_file'),
                                          ('outputspec.model_brain_mask',
                                           'mask_file')])])
        if args.subcortical:
            masksubcortmodel = pe.Node(fsl.ImageMaths(), 'masksubcortmodel')
            wf.connect([(qformwf, masksubcortmodel, [
                ('outputspec.subcortical_model', 'in_file'),
                ('outputspec.subcortical_model_brain_mask', 'mask_file')
            ])])
            t1inputspec.append('subcortical_model_brain')
    else:
        t1inputspec = []
    for T1_scan, T1_entities in _get_scans(
            inbidslayout, args.bids_filter,
            subject_list=args.participant_labels):
        tmpwf = t1_workflow(T1_scan, T1_entities, outbidslayout, args,
                            t1inputspec)
        if not args.debug_io:
            for qformfile in qformfiles:
                wf.connect(qformwf, f'outputspec.{qformfile}', tmpwf,
                           f'inputspec.{qformfile}')
            wf.connect(maskmodel, 'out_file', tmpwf, 'inputspec.model_brain')
            if args.subcortical:
                wf.connect(masksubcortmodel, 'out_file', tmpwf,
                           'inputspec.subcortical_model_brain')
        else:
            wf.add_nodes([tmpwf])
    _update_workdir(wf, args.working_directory)
    if args.resource_input_file is not None:
        _set_resource_data(wf, args.resource_input_file)
    return wf
Ejemplo n.º 26
0
 def read_all_vhdr_filenames(BIDS_path):
     """list of all vhdr file paths in BIDS_path
     
     Args:
         BIDS_path (string): absolute path of BIDS folder
     
     Returns:
         list: all vhdr file in given BIDS path
     """
     layout = BIDSLayout(BIDS_path)
     files = layout.get(extension='vhdr', return_type='filename')
     return files
Ejemplo n.º 27
0
def main(args):   
    analysis_params = json.load(open(args.config_file))
    
    epleptic_windows = pd.read_csv(analysis_params['epleptic_windows_file'])
    epleptic_windows[['Start', 'End']] = (epleptic_windows[['Start', 'End']] * 1000).astype(int)
    epleptic_windows = epleptic_windows.groupby('subject_number')

    root_path = os.path.join(analysis_params['data_path'])
    layout = BIDSLayout(root_path)

    for subject in layout.get(target='subject', extension='edf'): 
        subject_code = int(subject.entities['subject'])
        result_fname = os.path.join(analysis_params['output_path'], 'sub-{}_spectrum.pickle'.format(subject.entities['subject'])) 

        montage_filename = os.path.join(subject.dirname,  'sub-{}_montage.tcsv'.format(subject.entities['subject']))
        electrodes_filename = os.path.join(subject.dirname,  'sub-{}_electrodes.tcsv'.format(subject.entities['subject']))
        data_filename = subject.path

        if not(os.path.exists(montage_filename) and os.path.exists(electrodes_filename) and os.path.exists(data_filename)):
            print('Cannot find data for subject {}'.format(subject.entities['subject']))
            continue

        bipo = make_bipolar(data_filename, montage_filename, analysis_params['lowpass_filter'], analysis_params['pure_bipolar'])

        ref_mask = create_reference_mask(bipo).astype(int)
        electrodes_distance = get_electrode_distance(bipo.ch_names, electrodes_filename)

        if subject_code in epleptic_windows.groups:
            subject_ez_windows = epleptic_windows.get_group(subject_code)
            subject_ez_samples_mask = get_ez_samples_mask(subject_ez_windows, bipo._data)
        else:
            subject_ez_samples_mask = np.ones(bipo._data.shape[1], dtype=bool)

        n_chans = len(bipo.ch_names)

        frequencies = get_frequencies()

        cplv_spectrum = np.zeros((len(frequencies), n_chans, n_chans), dtype=np.complex)
        cplv_surrogate = np.zeros((len(frequencies), n_chans, n_chans), dtype=np.complex)

        for freq_idx, frequency in enumerate(tqdm.tqdm(frequencies, leave=False, desc='Subject {}'.format(subject.entities['subject']))):
            freq_cplv, freq_cplv_surr = routine_cpu(bipo, bipo.info['sfreq'], frequency, subject_ez_samples_mask)

            cplv_spectrum[freq_idx] = freq_cplv*ref_mask
            cplv_surrogate[freq_idx] = freq_cplv_surr*ref_mask

        res = {'frequencies': frequencies, 
                'cplv_spectrum': cplv_spectrum, 'surrogate_spectrum': cplv_surrogate, 
                'reference_mask': ref_mask, 'electrodes_distance': electrodes_distance, 
                'analysis_parameters': analysis_params}
        
        pickle.dump(res, open(result_fname, 'wb'))
Ejemplo n.º 28
0
def main():

    parser = create_parser()
    args = parser.parse_args()

    n_cpus = args.jobs[0]
    
    if args.bids:
        if (args.bids_sub is None):  # if bids folder is provided but no subject, raise error
            parser.error("--bids requires --bids_sub")
        else:  # both bids and bids_sub
            layout = BIDSLayout(args.bids[0])
            if args.bids_sub[0] not in layout.get_subjects():
                parser.error("Unknown subject, not in BIDS structure")
            else:
                f = layout.get(subject=args.bids_sub[0], extension='gii.gz')[0]
                nib_surf, vertices, faces = io.open_gifti_surf(f) # hoping this f contains the file. TODO
    else:
        nib_surf, vertices, faces = io.open_gifti_surf(args.surface[0])
            
    
    nib = nibabel.load(args.data[0])
    if len(nib.darrays) > 1:
        cifti = np.array([n.data for n in nib.darrays]).transpose()
    else:
        cifti = nib.darrays[0].data

    if args.full_brain:
        print("Running full brain analyses")
        if args.mask is None:
            print("A mask file must be provided through the --label flag. See --help")
            quit()
        _, labels = io.open_gifti(args.mask[0])
        cort_index = np.array(labels, np.bool)
        Z = np.array(cort_index, dtype=np.int)
        result = vb.vb_cluster(vertices, faces, n_cpus, cifti, Z, args.norm[0], args.output[0] + "." + args.norm[0], nib_surf)

    elif args.clusters is None:
        print("Running searchlight analyses")
        if args.mask is None:
            print("A mask file must be provided through the --label flag. See --help")
            quit()
        # Read labels
        _, labels = io.open_gifti(args.mask[0])
        cort_index = np.array(labels, np.bool)
        result = vb.vb_index(vertices, faces, n_cpus, cifti, args.norm[0], cort_index, args.output[0] + "." + args.norm[0], nib_surf)

    else:
        print("Running ROI analyses")
        nib, Z = io.open_gifti(args.clusters[0])
        Z = np.array(Z, dtype=np.int)
        result = vb.vb_cluster(vertices, faces, n_cpus, cifti, Z, args.norm[0], args.output[0] + "." + args.norm[0], nib_surf)
Ejemplo n.º 29
0
def anon_acqtimes(dset_dir):
    """
    Anonymize acquisition datetimes for a dataset.

    Anonymize acquisition datetimes for a dataset. Works for both longitudinal
    and cross-sectional studies. The time of day is preserved, but the first
    scan is set to January 1st, 1800. In a longitudinal study, each session is
    anonymized relative to the first session, so that time between sessions is
    preserved.

    Overwrites scan tsv files in dataset. Only run this *after* data collection
    is complete for the study, especially if it's longitudinal.

    Parameters
    ----------
    dset_dir : str
        Path to BIDS dataset to be anonymized.
    """
    bl_dt = parser.parse('1800-01-01')

    layout = BIDSLayout(dset_dir)
    subjects = layout.get_subjects()
    sessions = sorted(layout.get_sessions())

    for sub in subjects:
        if not sessions:
            scans_file = op.join(dset_dir, f'sub-{sub}/sub-{sub}_scans.tsv')
            df = pd.read_csv(scans_file, sep='\t')
            first_scan = df['acq_time'].min()
            first_dt, _ = parser.parse(first_scan.split('T'))
            diff = first_dt - bl_dt
            acq_times = df['acq_time'].apply(parser.parse)
            acq_times = (acq_times - diff).astype(str)
            df['acq_time'] = acq_times
            # df.to_csv(scans_file, sep='\t', index=False)
        else:
            # Separated from dataset sessions in case subject missed some
            sub_ses = sorted(layout.get_sessions(subject=sub))
            for i, ses in enumerate(sub_ses):
                scans_file = op.join(
                    dset_dir,
                    f'sub-{sub}/ses-{ses}/sub-{sub}_ses-{ses}_scans.tsv')
                df = pd.read_csv(scans_file, sep='\t')
                if i == 0:
                    # Anonymize in terms of first scan for subject.
                    first_scan = df['acq_time'].min()
                    first_dt = parser.parse(first_scan.split('T')[0])
                    diff = first_dt - bl_dt

                acq_times = df['acq_time'].apply(parser.parse)
                acq_times = (acq_times - diff).astype(str)
                df['acq_time'] = acq_times
Ejemplo n.º 30
0
def get_scan_duration(output_dir, modality="func", task="rest"):
    layout = BIDSLayout(output_dir)
    df = layout.to_df()
    scans_df = df.query(
        "datatype==@modality & task==@task & extension=='nii.gz'")

    scan_durations = []
    for file in scans_df.path:
        scan_durations.append(layout.get_metadata(file)["ScanDurationSec"])
    scans_df["scan_duration"] = scan_durations
    scans_df.reset_index(drop=True, inplace=True)

    return scans_df