コード例 #1
0
def copy_coord_from_BIDS_A_to_B(path_A, path_B):
    """
    simply copies all coordsystem.json and electrodes.tsv files from BIDS folder A to B
    :param path_A:
    :param path_B:
    :return: None
    """
    layout = BIDSLayout(settings.BIDS_path)
    subjects = layout.get_subjects()

    for patient_idx in range(len(subjects)):

        if patient_idx < 10:
            subject_id = str('00') + str(patient_idx)
        else:
            subject_id = str('0') + str(patient_idx)

        for lat in ['right', 'left']:
            path_A_subject = os.path.join(path_A + 'sub-' + subject_id +
                                          '/ses-' + lat)
            if os.path.exists(path_A_subject):
                path_coord_sys = path_A_subject + '/eeg/' + 'sub-' + subject_id + '_coordsystem.json'
                path_B_subject = os.path.join(path_B + 'sub-' + subject_id +
                                              '/ses-' + lat)
                path_to_paste = path_B_subject + '/eeg/' + 'sub-' + subject_id + '_coordsystem.json'
                copy(path_coord_sys, path_to_paste)

                path_coord_sys = path_A_subject + '/eeg/' + 'sub-' + subject_id + '_electrodes.tsv'
                path_B_subject = os.path.join(path_B + 'sub-' + subject_id +
                                              '/ses-' + lat)
                path_to_paste = path_B_subject + '/eeg/' + 'sub-' + subject_id + '_electrodes.tsv'
                copy(path_coord_sys, path_to_paste)
コード例 #2
0
def intendedfor_nearest_fieldmap(bids_dir):
    """

    :param bids_dir: str
        BIDS root directory
    :return:
    """

    layout = BIDSLayout(
        bids_dir,
        absolute_paths=True,
        ignore=['sourcedata', 'work', 'derivatives', 'exclude'])

    for subj in layout.get_subjects():

        # Find all JSON sidecars in bold and fmap folders
        bold_json = layout.get(return_type='file',
                               extensions=['.json'],
                               subject=subj,
                               suffix='bold')
        fmap_json = layout.get(return_type='file',
                               extensions=['.json'],
                               subject=subj,
                               suffix='json')

        print(bold_json)
        print(fmap_json)
コード例 #3
0
    def read_BIDS_coordinates(BIDS_path):
        """from BIDS_path np array coordinate arrays are read and returned in list respective to subjects
        
        Args:
            BIDS_path (string): absolute BIDS path
        
        Returns:
            coord_arr (np array): array with shape (len(subjects), 4), where indexes in the following order: left ecog, left stn, right ecog, right stn,
            coord_arr_names (np array): array with shape  (len(subjects), 2), where coord names are saved in order: left, right
        """

        layout = BIDSLayout(BIDS_path)
        subjects = layout.get_subjects()
        sessions = layout.get_sessions()
        coord_arr = np.empty(
            (len(subjects), 4),
            dtype=object)  # left ecog, left stn, right ecog, right stn
        coord_arr_names = np.empty((len(subjects), 2), dtype=object)

        for subject_idx, subject in enumerate(subjects):
            for sess in sessions:

                coord_path = os.path.join(BIDS_path, 'sub-' + subject,
                                          'ses-' + sess, 'eeg',
                                          'sub-' + subject + '_electrodes.tsv')

                print(coord_path)
                if os.path.exists(coord_path) is False:
                    continue
                df = pd.read_csv(coord_path, sep="\t")

                if sess == 'left':
                    if np.array(df['name'].str.contains("ECOG")).any():
                        coord_arr[subject_idx, 0] = np.ndarray.astype(
                            np.array(df[df['name'].str.contains("ECOG")])[:,
                                                                          1:4],
                            float
                        )  # [1:4] due to bipolar referencing (first electrode missing)
                    if np.array(df['name'].str.contains("STN")).any():
                        coord_arr[subject_idx, 1] = np.ndarray.astype(
                            np.array(df[df['name'].str.contains("STN")])[:,
                                                                         1:4],
                            float)
                    coord_arr_names[subject_idx, 0] = list(df['name'])
                elif sess == 'right':
                    if np.array(df['name'].str.contains("ECOG")).any():
                        coord_arr[subject_idx, 2] = np.ndarray.astype(
                            np.array(df[df['name'].str.contains("ECOG")])[:,
                                                                          1:4],
                            float)
                    if np.array(df['name'].str.contains("STN")).any():
                        coord_arr[subject_idx, 3] = np.ndarray.astype(
                            np.array(df[df['name'].str.contains("STN")])[:,
                                                                         1:4],
                            float)
                    coord_arr_names[subject_idx, 1] = list(df['name'])

        return coord_arr, coord_arr_names
コード例 #4
0
ファイル: test_dcm2bids.py プロジェクト: cbedetti/Dcm2Bids
def test_dcm2bids():
    tmpBase = os.path.join(TEST_DATA_DIR, "tmp")
    #bidsDir = TemporaryDirectory(dir=tmpBase)
    bidsDir = TemporaryDirectory()

    tmpSubDir = os.path.join(bidsDir.name, DEFAULT.tmpDirName, "sub-01")
    shutil.copytree(
            os.path.join(TEST_DATA_DIR, "sidecars"),
            tmpSubDir)

    app = Dcm2bids(
            [TEST_DATA_DIR], "01",
            os.path.join(TEST_DATA_DIR, "config_test.json"),
            bidsDir.name
            )
    app.run()
    layout = BIDSLayout(bidsDir.name, validate=False)

    assert layout.get_subjects() == ["01"]
    assert layout.get_sessions() == []
    assert layout.get_tasks() == ["rest"]
    assert layout.get_runs() == [1,2,3]

    app = Dcm2bids(
            [TEST_DATA_DIR], "01",
            os.path.join(TEST_DATA_DIR, "config_test.json"),
            bidsDir.name
            )
    app.run()


    fmapFile = os.path.join(
            bidsDir.name, "sub-01", "fmap", "sub-01_echo-492_fmap.json")
    data = load_json(fmapFile)
    fmapMtime = os.stat(fmapFile).st_mtime
    assert data["IntendedFor"] == "dwi/sub-01_dwi.nii.gz"

    data = load_json(os.path.join(
        bidsDir.name, "sub-01", "localizer", "sub-01_run-01_localizer.json"))
    assert data["ProcedureStepDescription"] == "Modify by dcm2bids"

    #rerun
    shutil.rmtree(tmpSubDir)
    shutil.copytree(
            os.path.join(TEST_DATA_DIR, "sidecars"),
            tmpSubDir)

    app = Dcm2bids(
            [TEST_DATA_DIR], "01",
            os.path.join(TEST_DATA_DIR, "config_test.json"),
            bidsDir.name
            )
    app.run()

    fmapMtimeRerun = os.stat(fmapFile).st_mtime
    assert fmapMtime == fmapMtimeRerun

    bidsDir.cleanup()
コード例 #5
0
def test_dcm2bids():
    # tmpBase = os.path.join(TEST_DATA_DIR, "tmp")
    # bidsDir = TemporaryDirectory(dir=tmpBase)
    bidsDir = TemporaryDirectory()

    tmpSubDir = os.path.join(bidsDir.name, DEFAULT.tmpDirName, "sub-01")
    shutil.copytree(os.path.join(TEST_DATA_DIR, "sidecars"), tmpSubDir)

    app = Dcm2bids(
        [TEST_DATA_DIR],
        "01",
        os.path.join(TEST_DATA_DIR, "config_test.json"),
        bidsDir.name,
    )
    app.run()
    layout = BIDSLayout(bidsDir.name, validate=False)

    assert layout.get_subjects() == ["01"]
    assert layout.get_sessions() == []
    assert layout.get_tasks() == ["rest"]
    assert layout.get_runs() == [1, 2, 3]

    app = Dcm2bids(
        [TEST_DATA_DIR],
        "01",
        os.path.join(TEST_DATA_DIR, "config_test.json"),
        bidsDir.name,
    )
    app.run()

    fmapFile = os.path.join(bidsDir.name, "sub-01", "fmap",
                            "sub-01_echo-492_fmap.json")
    data = load_json(fmapFile)
    fmapMtime = os.stat(fmapFile).st_mtime
    assert data["IntendedFor"] == "dwi/sub-01_dwi.nii.gz"

    data = load_json(
        os.path.join(bidsDir.name, "sub-01", "localizer",
                     "sub-01_run-01_localizer.json"))
    assert data["ProcedureStepDescription"] == "Modify by dcm2bids"

    # rerun
    shutil.rmtree(tmpSubDir)
    shutil.copytree(os.path.join(TEST_DATA_DIR, "sidecars"), tmpSubDir)

    app = Dcm2bids(
        [TEST_DATA_DIR],
        "01",
        os.path.join(TEST_DATA_DIR, "config_test.json"),
        bidsDir.name,
    )
    app.run()

    fmapMtimeRerun = os.stat(fmapFile).st_mtime
    assert fmapMtime == fmapMtimeRerun

    if os.name != 'nt':
        bidsDir.cleanup()
コード例 #6
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_outputs_exist(parser, args, args.out_json)

    data = []
    layout = BIDSLayout(args.in_bids, index_metadata=False)
    subjects = layout.get_subjects()

    if args.participants_label:
        subjects = [
            nSub for nSub in args.participants_label if nSub in subjects
        ]

    for nSub in subjects:
        dwis = layout.get(subject=nSub,
                          datatype='dwi',
                          extension='nii.gz',
                          suffix='dwi')
        t1s = layout.get(subject=nSub,
                         datatype='anat',
                         extension='nii.gz',
                         suffix='T1w')
        fmaps = layout.get(subject=nSub,
                           datatype='fmap',
                           extension='nii.gz',
                           suffix='epi')
        bvals = layout.get(subject=nSub,
                           datatype='dwi',
                           extension='bval',
                           suffix='dwi')
        bvecs = layout.get(subject=nSub,
                           datatype='dwi',
                           extension='bvec',
                           suffix='dwi')

        # Get associations relatives to DWIs
        associations = get_dwi_associations(fmaps, bvals, bvecs)

        # Get the data for each run of DWIs
        for dwi in dwis:
            data.append(
                get_data(nSub, dwi, t1s, associations, args.readout,
                         args.clean))

    if args.clean:
        data = [d for d in data if d]

    with open(args.out_json, 'w') as outfile:
        json.dump(data,
                  outfile,
                  indent=4,
                  separators=(',', ': '),
                  sort_keys=True)
        # Add trailing newline for POSIX compatibility
        outfile.write('\n')
コード例 #7
0
ファイル: experiment.py プロジェクト: e-m-albright/CS682
def get_all_subject_data(layout: BIDSLayout):
    """
    Get the relevant data for every subject in the experiment
    """
    data = []

    for subject in layout.get_subjects():
        data.append((subject, *get_subject_data(layout, subject)),)

    return data
コード例 #8
0
def _fill_empty_lists(layout: BIDSLayout, subjects: list, tasks: list, sessions: list, runs: t.List[str]):
    """
    If filters are not provided by the user, load them from layout.
    """

    subjects = subjects if subjects else layout.get_subjects()
    tasks = tasks if tasks else layout.get_tasks()
    sessions = sessions if sessions else layout.get_sessions()
    runs = runs if runs else layout.get_runs()
    return subjects, tasks, sessions, runs
コード例 #9
0
ファイル: csv_creator_adv.py プロジェクト: CBICA/BrainMaGe
def rex_bids_csv(folder_path, to_save, ftype):
    """[CSV generation for BIDS datasets]
    [This function is used to generate a csv for BIDS datasets]
    Arguments:
        folder_path {[string]} -- [Takes the folder to see where to look for
                                   the different modaliies]
        to_save {[string]} -- [Takes the folder as a string to save the csv]
        ftype {[string]} -- [Are you trying to save train, validation or test,
                             if file type is set to test, it does not look for
                             ground truths]
    """
    if ftype == "test":
        csv_file = open(os.path.join(to_save, ftype + ".csv"), "w+")
        csv_file.write("ID,")
    else:
        csv_file = open(os.path.join(to_save, ftype + ".csv"), "w+")
        csv_file.write("ID,gt_path,")
    # load BIDS dataset into memory
    layout = BIDSLayout(folder_path)
    bids_df = layout.to_df()
    bids_modality_df = {
        "t1": bids_df[bids_df["suffix"] == "T1w"],
        "t2": bids_df[bids_df["suffix"] == "T2w"],
        "flair": bids_df[bids_df["suffix"] == "FLAIR"],
        "t1ce": bids_df[bids_df["suffix"] == "T1CE"],
    }
    # check what modalities the dataset contains
    modalities = []
    for modality, df in bids_modality_df.items():
        if not df.empty:
            modalities.append(modality)
    # write headers for those modalities
    for modality in modalities[:-1]:
        csv_file.write(modality + "_path,")
    modality = modalities[-1]
    csv_file.write(modality + "_path\n")
    # write image paths for each subject
    for sub in layout.get_subjects():
        csv_file.write(sub)
        csv_file.write(",")
        if ftype != "test":
            ground_truth = glob.glob(
                os.path.join(folder_path, sub, "*mask.nii.gz"))[0]
            csv_file.write(ground_truth)
            csv_file.write(",")
        for modality in modalities[:-1]:
            img = bids_modality_df[modality][bids_df["subject"] ==
                                             sub].path.values
            csv_file.write(img[0])
            csv_file.write(",")
        modality = modalities[-1]
        img = bids_modality_df[modality][bids_df["subject"] == sub].path.values
        csv_file.write(img[0])
        csv_file.write("\n")
    csv_file.close()
コード例 #10
0
def anon_acqtimes(dset_dir):
    """
    Anonymize acquisition datetimes for a dataset.

    Anonymize acquisition datetimes for a dataset. Works for both longitudinal
    and cross-sectional studies. The time of day is preserved, but the first
    scan is set to January 1st, 1800. In a longitudinal study, each session is
    anonymized relative to the first session, so that time between sessions is
    preserved.

    Overwrites scan tsv files in dataset. Only run this *after* data collection
    is complete for the study, especially if it's longitudinal.

    Parameters
    ----------
    dset_dir : str
        Path to BIDS dataset to be anonymized.
    """
    bl_dt = parser.parse('1800-01-01')

    layout = BIDSLayout(dset_dir)
    subjects = layout.get_subjects()
    sessions = sorted(layout.get_sessions())

    for sub in subjects:
        if not sessions:
            scans_file = op.join(dset_dir, f'sub-{sub}/sub-{sub}_scans.tsv')
            df = pd.read_csv(scans_file, sep='\t')
            first_scan = df['acq_time'].min()
            first_dt, _ = parser.parse(first_scan.split('T'))
            diff = first_dt - bl_dt
            acq_times = df['acq_time'].apply(parser.parse)
            acq_times = (acq_times - diff).astype(str)
            df['acq_time'] = acq_times
            # df.to_csv(scans_file, sep='\t', index=False)
        else:
            # Separated from dataset sessions in case subject missed some
            sub_ses = sorted(layout.get_sessions(subject=sub))
            for i, ses in enumerate(sub_ses):
                scans_file = op.join(
                    dset_dir,
                    f'sub-{sub}/ses-{ses}/sub-{sub}_ses-{ses}_scans.tsv')
                df = pd.read_csv(scans_file, sep='\t')
                if i == 0:
                    # Anonymize in terms of first scan for subject.
                    first_scan = df['acq_time'].min()
                    first_dt = parser.parse(first_scan.split('T')[0])
                    diff = first_dt - bl_dt

                acq_times = df['acq_time'].apply(parser.parse)
                acq_times = (acq_times - diff).astype(str)
                df['acq_time'] = acq_times
コード例 #11
0
def main():

    parser = create_parser()
    args = parser.parse_args()

    n_cpus = args.jobs[0]
    
    if args.bids:
        if (args.bids_sub is None):  # if bids folder is provided but no subject, raise error
            parser.error("--bids requires --bids_sub")
        else:  # both bids and bids_sub
            layout = BIDSLayout(args.bids[0])
            if args.bids_sub[0] not in layout.get_subjects():
                parser.error("Unknown subject, not in BIDS structure")
            else:
                f = layout.get(subject=args.bids_sub[0], extension='gii.gz')[0]
                nib_surf, vertices, faces = io.open_gifti_surf(f) # hoping this f contains the file. TODO
    else:
        nib_surf, vertices, faces = io.open_gifti_surf(args.surface[0])
            
    
    nib = nibabel.load(args.data[0])
    if len(nib.darrays) > 1:
        cifti = np.array([n.data for n in nib.darrays]).transpose()
    else:
        cifti = nib.darrays[0].data

    if args.full_brain:
        print("Running full brain analyses")
        if args.mask is None:
            print("A mask file must be provided through the --label flag. See --help")
            quit()
        _, labels = io.open_gifti(args.mask[0])
        cort_index = np.array(labels, np.bool)
        Z = np.array(cort_index, dtype=np.int)
        result = vb.vb_cluster(vertices, faces, n_cpus, cifti, Z, args.norm[0], args.output[0] + "." + args.norm[0], nib_surf)

    elif args.clusters is None:
        print("Running searchlight analyses")
        if args.mask is None:
            print("A mask file must be provided through the --label flag. See --help")
            quit()
        # Read labels
        _, labels = io.open_gifti(args.mask[0])
        cort_index = np.array(labels, np.bool)
        result = vb.vb_index(vertices, faces, n_cpus, cifti, args.norm[0], cort_index, args.output[0] + "." + args.norm[0], nib_surf)

    else:
        print("Running ROI analyses")
        nib, Z = io.open_gifti(args.clusters[0])
        Z = np.array(Z, dtype=np.int)
        result = vb.vb_cluster(vertices, faces, n_cpus, cifti, Z, args.norm[0], args.output[0] + "." + args.norm[0], nib_surf)
コード例 #12
0
def main():
    parser = _build_args_parser()
    args = parser.parse_args()

    assert_outputs_exists(parser, args, args.output_json)

    data = []
    layout = BIDSLayout(args.bids_folder, index_metadata=False)
    subjects = layout.get_subjects()
    for nSub in subjects:
        dwis = layout.get(subject=nSub,
                          datatype='dwi',
                          extension='nii.gz',
                          suffix='dwi')
        t1s = layout.get(subject=nSub,
                         datatype='anat',
                         extension='nii.gz',
                         suffix='T1w')
        fmaps = layout.get(subject=nSub,
                           datatype='fmap',
                           extension='nii.gz',
                           suffix='epi')
        bvals = layout.get(subject=nSub,
                           datatype='dwi',
                           extension='bval',
                           suffix='dwi')
        bvecs = layout.get(subject=nSub,
                           datatype='dwi',
                           extension='bvec',
                           suffix='dwi')

        # Get associations relatives to DWIs
        associations = get_dwi_associations(fmaps, bvals, bvecs)

        # Get the data for each run of DWIs
        for nRun, dwi in enumerate(dwis):
            data.append(
                get_data(nSub, dwi, t1s, associations, nRun, args.readout))

    with open(args.output_json, 'w') as outfile:
        json.dump(data,
                  outfile,
                  indent=4,
                  separators=(',', ': '),
                  sort_keys=True)
        # Add trailing newline for POSIX compatibility
        outfile.write('\n')
コード例 #13
0
def test_generate_bids_skeleton(tmp_path, test_id, json_layout, n_files,
                                n_subjects, n_sessions):
    root = tmp_path / test_id
    generate_bids_skeleton(root, json_layout)
    datadesc = root / "dataset_description.json"
    assert datadesc.exists()
    assert "BIDSVersion" in datadesc.read_text()

    assert len([x for x in root.glob("**/*") if x.is_file()]) == n_files

    # ensure layout is valid
    layout = BIDSLayout(root)
    assert len(layout.get_subjects()) == n_subjects
    assert len(layout.get_sessions()) == n_sessions

    anat = layout.get(suffix="T1w", extension="nii.gz")[0]
    bold = layout.get(suffix="bold", extension="nii.gz")[0]
    assert anat.get_metadata()
    assert bold.get_metadata()
コード例 #14
0
    def _bidsapp_fired(self):
        """ Callback of the "bidsapp" button. This displays the BIDS App Interface window."""
        print_blue("[Open BIDS App Window]")
        bids_layout = BIDSLayout(self.project_info.base_directory)
        subjects = bids_layout.get_subjects()

        anat_config = os.path.join(self.project_info.base_directory, "code/",
                                   "ref_anatomical_config.json")
        dmri_config = os.path.join(self.project_info.base_directory, "code/",
                                   "ref_diffusion_config.json")
        fmri_config = os.path.join(self.project_info.base_directory, "code/",
                                   "ref_fMRI_config.json")

        self.bidsapp_ui = cmp.bidsappmanager.gui.bidsapp.BIDSAppInterfaceWindow(
            project_info=self.project_info,
            bids_root=self.project_info.base_directory,
            subjects=sorted(subjects),
            anat_config=anat_config,
            dmri_config=dmri_config,
            fmri_config=fmri_config,
        )
        self.bidsapp_ui.configure_traits()
コード例 #15
0
def run_deeid():

    args = get_parser().parse_args()
    subjects_to_analyze = []

    # special variable set in the container
    if os.getenv('IS_DOCKER'):
        exec_env = 'singularity'
        cgroup = Path('/proc/1/cgroup')
        if cgroup.exists() and 'docker' in cgroup.read_text():
            exec_env = 'docker'
    else:
        exec_env = 'local'

    if args.brainextraction is None:
        raise Exception(
            "For post defacing quality it is required to run a form of brainextraction"
            "on the non-deindentified data. Thus please either indicate bet "
            "(--brainextration bet) or nobrainer (--brainextraction nobrainer)."
        )

    if args.skip_bids_validation:
        print("Input data will not be checked for BIDS compliance.")
    else:
        print("Making sure the input data is BIDS compliant "
              "(warnings can be ignored in most cases).")
        validate_input_dir(exec_env, args.bids_dir, args.participant_label)

    layout = BIDSLayout(args.bids_dir)

    if args.analysis_level == "participant":
        if args.participant_label:
            subjects_to_analyze = args.participant_label
        else:
            print("No participant label indicated. Please do so.")
    else:
        subjects_to_analyze = layout.get(return_type='id', target='subject')

    list_part_prob = []
    for part in subjects_to_analyze:
        if part not in layout.get_subjects():
            list_part_prob.append(part)
    if len(list_part_prob) >= 1:
        raise Exception(
            "The participant(s) you indicated are not present in the BIDS dataset, please check again."
            "This refers to:")
        print(list_part_prob)

    sessions_to_analyze = layout.get(return_type='id', target='session')

    if not sessions_to_analyze:
        print('Processing data from one session.')
    else:
        print('Processing data from %s sessions:' %
              str(len(sessions_to_analyze)))
        print(sessions_to_analyze)

    list_check_meta = args.check_meta

    list_field_del = args.del_meta

    for subject_label in subjects_to_analyze:
        if not sessions_to_analyze:
            list_t1w = layout.get(subject=subject_label,
                                  extension='nii.gz',
                                  suffix='T1w',
                                  return_type='filename')
        else:
            list_t1w = layout.get(subject=subject_label,
                                  extension='nii.gz',
                                  suffix='T1w',
                                  return_type='filename',
                                  session=sessions_to_analyze)
        for T1_file in list_t1w:
            check_outpath(args.bids_dir, subject_label)
            if args.brainextraction == 'bet':
                if args.bet_frac is None:
                    raise Exception(
                        "If you want to use BET for pre-defacing brain extraction,"
                        "please provide a Frac value. For example: --bet_frac 0.5"
                    )
                else:
                    run_brain_extraction_bet(T1_file, args.bet_frac[0],
                                             subject_label, args.bids_dir)
            elif args.brainextraction == 'nobrainer':
                run_brain_extraction_nb(T1_file, subject_label, args.bids_dir)

            check_meta_data(args.bids_dir, subject_label, list_check_meta)
            source_t1w = copy_no_deid(args.bids_dir, subject_label, T1_file)

            if args.del_meta:
                del_meta_data(args.bids_dir, subject_label, list_field_del)
            if args.deid == "pydeface":
                run_pydeface(source_t1w, T1_file)
            elif args.deid == "mri_deface":
                run_mri_deface(source_t1w, T1_file)
            elif args.deid == "quickshear":
                run_quickshear(source_t1w, T1_file)
            elif args.deid == "mridefacer":
                run_mridefacer(source_t1w, T1_file)
            elif args.deid == "deepdefacer":
                run_deepdefacer(source_t1w, subject_label, args.bids_dir)

        if args.deface_t2w:
            if not sessions_to_analyze:
                list_t2w = layout.get(subject=subject_label,
                                      extension='nii.gz',
                                      suffix='T2w',
                                      return_type='filename')
            else:
                list_t2w = layout.get(subject=subject_label,
                                      extension='nii.gz',
                                      suffix='T2w',
                                      return_type='filename',
                                      session=sessions_to_analyze)
            if list_t2w == []:
                raise Exception(
                    "You indicated that a T2w image should be defaced as well."
                    "However, no T2w image exists for subject %s."
                    "Please check again." % subject_label)

            for T2_file in list_t2w:
                if args.brainextraction == 'bet':
                    run_brain_extraction_bet(T2_file, args.bet_frac[0],
                                             subject_label, args.bids_dir)
                elif args.brainextraction == 'nobrainer':
                    run_brain_extraction_nb(T2_file, subject_label,
                                            args.bids_dir)

                source_t2w = copy_no_deid(args.bids_dir, subject_label,
                                          T2_file)
                run_t2w_deface(source_t2w, T1_file, T2_file)

        rename_non_deid(args.bids_dir, subject_label)

        if not sessions_to_analyze and args.deface_t2w is False:
            create_graphics(args.bids_dir,
                            subject_label,
                            session=None,
                            t2w=None)
        elif sessions_to_analyze and args.deface_t2w is False:
            for session in sessions_to_analyze:
                create_graphics(args.bids_dir,
                                subject_label,
                                session=session,
                                t2w=None)
        elif not sessions_to_analyze and args.deface_t2w:
            create_graphics(args.bids_dir,
                            subject_label,
                            session=None,
                            t2w=True)
        elif sessions_to_analyze and args.deface_t2w:
            for session in sessions_to_analyze:
                create_graphics(args.bids_dir,
                                subject_label,
                                session=session,
                                t2w=True)

        if not sessions_to_analyze:
            clean_up_files(args.bids_dir, subject_label)
        else:
            for session in sessions_to_analyze:
                clean_up_files(args.bids_dir, subject_label, session=session)
コード例 #16
0
def bidsmri2project(directory, args):

    # initialize empty cde graph...it may get replaced if we're doing variable to term mapping or not
    cde=Graph()

    # Parse dataset_description.json file in BIDS directory
    if (os.path.isdir(os.path.join(directory))):
        try:
            with open(os.path.join(directory,'dataset_description.json')) as data_file:
                dataset = json.load(data_file)
        except OSError:
            logging.critical("Cannot find dataset_description.json file which is required in the BIDS spec")
            exit("-1")
    else:
        logging.critical("Error: BIDS directory %s does not exist!" %os.path.join(directory))
        exit("-1")

    # create project / nidm-exp doc
    project = Project()

    # if there are git annex sources then add them
    num_sources=addGitAnnexSources(obj=project.get_uuid(),bids_root=directory)
    # else just add the local path to the dataset
    if num_sources == 0:
        project.add_attributes({Constants.PROV['Location']:"file:/" + directory})


    # add various attributes if they exist in BIDS dataset
    for key in dataset:
        # if key from dataset_description file is mapped to term in BIDS_Constants.py then add to NIDM object
        if key in BIDS_Constants.dataset_description:
            if type(dataset[key]) is list:
                project.add_attributes({BIDS_Constants.dataset_description[key]:"".join(dataset[key])})
            else:
                project.add_attributes({BIDS_Constants.dataset_description[key]:dataset[key]})




    # get BIDS layout
    bids_layout = BIDSLayout(directory)


    # create empty dictinary for sessions where key is subject id and used later to link scans to same session as demographics
    session={}
    participant={}
    # Parse participants.tsv file in BIDS directory and create study and acquisition objects
    if os.path.isfile(os.path.join(directory,'participants.tsv')):
        with open(os.path.join(directory,'participants.tsv')) as csvfile:
            participants_data = csv.DictReader(csvfile, delimiter='\t')

            # logic to map variables to terms.
            # first iterate over variables in dataframe and check which ones are already mapped as BIDS constants and which are not.  For those that are not
            # we want to use the variable-term mapping functions to help the user do the mapping
            # iterate over columns
            mapping_list=[]
            column_to_terms={}
            for field in participants_data.fieldnames:

                # column is not in BIDS_Constants
                if not (field in BIDS_Constants.participants):
                    # add column to list for column_to_terms mapping
                    mapping_list.append(field)



            #if user didn't supply a json mapping file but we're doing some variable-term mapping create an empty one for column_to_terms to use
            if args.json_map == False:
                #defaults to participants.json because here we're mapping the participants.tsv file variables to terms
                # if participants.json file doesn't exist then run without json mapping file
                if not os.path.isfile(os.path.join(directory,'participants.json')):
                    #maps variables in CSV file to terms
                    temp=DataFrame(columns=mapping_list)
                    if args.no_concepts:
                        column_to_terms,cde = map_variables_to_terms(directory=directory,assessment_name='participants.tsv',
                            df=temp,output_file=os.path.join(directory,'participants.json'),bids=True,associate_concepts=False)
                    else:
                        column_to_terms,cde = map_variables_to_terms(directory=directory,assessment_name='participants.tsv',
                            df=temp,output_file=os.path.join(directory,'participants.json'),bids=True)
                else:
                    #maps variables in CSV file to terms
                    temp=DataFrame(columns=mapping_list)
                    if args.no_concepts:
                        column_to_terms,cde = map_variables_to_terms(directory=directory, assessment_name='participants.tsv', df=temp,
                            output_file=os.path.join(directory,'participants.json'),json_file=os.path.join(directory,'participants.json'),bids=True,associate_concepts=False)
                    else:
                        column_to_terms,cde = map_variables_to_terms(directory=directory, assessment_name='participants.tsv', df=temp,
                            output_file=os.path.join(directory,'participants.json'),json_file=os.path.join(directory,'participants.json'),bids=True)
            else:
                #maps variables in CSV file to terms
                temp=DataFrame(columns=mapping_list)
                if args.no_concepts:
                    column_to_terms, cde = map_variables_to_terms(directory=directory, assessment_name='participants.tsv', df=temp,
                        output_file=os.path.join(directory,'participants.json'),json_file=args.json_map,bids=True,associate_concepts=False)
                else:
                    column_to_terms, cde = map_variables_to_terms(directory=directory, assessment_name='participants.tsv', df=temp,
                        output_file=os.path.join(directory,'participants.json'),json_file=args.json_map,bids=True)


            for row in participants_data:
                #create session object for subject to be used for participant metadata and image data
                #parse subject id from "sub-XXXX" string
                temp = row['participant_id'].split("-")
                #for ambiguity in BIDS datasets.  Sometimes participant_id is sub-XXXX and othertimes it's just XXXX
                if len(temp) > 1:
                    subjid = temp[1]
                else:
                    subjid = temp[0]
                logging.info(subjid)
                session[subjid] = Session(project)

                #add acquisition object
                acq = AssessmentAcquisition(session=session[subjid])

                acq_entity = AssessmentObject(acquisition=acq)
                participant[subjid] = {}
                participant[subjid]['person'] = acq.add_person(attributes=({Constants.NIDM_SUBJECTID:row['participant_id']}))

                # add nfo:filename entry to assessment entity to reflect provenance of where this data came from
                acq_entity.add_attributes({Constants.NIDM_FILENAME:getRelPathToBIDS(os.path.join(directory,'participants.tsv'),directory)})
                #acq_entity.add_attributes({Constants.NIDM_FILENAME:os.path.join(directory,'participants.tsv')})

                #add qualified association of participant with acquisition activity
                acq.add_qualified_association(person=participant[subjid]['person'],role=Constants.NIDM_PARTICIPANT)
                # print(acq)

                # if there are git annex sources for participants.tsv file then add them
                num_sources=addGitAnnexSources(obj=acq_entity.get_uuid(),bids_root=directory)
                # else just add the local path to the dataset
                if num_sources == 0:
                    acq_entity.add_attributes({Constants.PROV['Location']:"file:/" + os.path.join(directory,'participants.tsv')})

                 # if there's a JSON sidecar file then create an entity and associate it with all the assessment entities
                if os.path.isfile(os.path.join(directory,'participants.json')):
                    json_sidecar = AssessmentObject(acquisition=acq)
                    json_sidecar.add_attributes({PROV_TYPE:QualifiedName(Namespace("bids",Constants.BIDS),"sidecar_file"), Constants.NIDM_FILENAME:
                        getRelPathToBIDS(os.path.join(directory,'participants.json'),directory)})

                    # add Git Annex Sources
                    # if there are git annex sources for participants.tsv file then add them
                    num_sources=addGitAnnexSources(obj=json_sidecar.get_uuid(),filepath=os.path.join(directory,'participants.json'),bids_root=directory)
                    # else just add the local path to the dataset
                    if num_sources == 0:
                        json_sidecar.add_attributes({Constants.PROV['Location']:"file:/" + os.path.join(directory,'participants.json')})


                # check if json_sidecar entity exists and if so associate assessment entity with it
                if 'json_sidecar' in  locals():
                    #connect json_entity with acq_entity
                    acq_entity.add_attributes({Constants.PROV["wasInfluencedBy"]:json_sidecar})

                for key,value in row.items():
                    if not value:
                        continue
                    #for variables in participants.tsv file who have term mappings in BIDS_Constants.py use those, add to json_map so we don't have to map these if user
                    #supplied arguments to map variables
                    if key in BIDS_Constants.participants:
                        # WIP
                        # Here we are adding to CDE graph data elements for BIDS Constants that remain fixed for each BIDS-compliant dataset

                        if not (BIDS_Constants.participants[key] == Constants.NIDM_SUBJECTID):


                            # create a namespace with the URL for fixed BIDS_Constants term
                            # item_ns = Namespace(str(Constants.BIDS.namespace.uri))
                            # add prefix to namespace which is the BIDS fixed variable name
                            # cde.bind(prefix="bids", namespace=item_ns)
                            # ID for BIDS variables is always the same bids:[bids variable]
                            cde_id = Constants.BIDS[key]
                            # add the data element to the CDE graph
                            cde.add((cde_id,RDF.type, Constants.NIDM['DataElement']))
                            cde.add((cde_id,RDF.type, Constants.PROV['Entity']))
                            # add some basic information about this data element
                            cde.add((cde_id,Constants.RDFS['label'],Literal(BIDS_Constants.participants[key].localpart)))
                            cde.add((cde_id,Constants.NIDM['isAbout'],URIRef(BIDS_Constants.participants[key].uri)))
                            cde.add((cde_id,Constants.NIDM['source_variable'],Literal(key)))
                            cde.add((cde_id,Constants.NIDM['description'],Literal("participant/subject identifier")))
                            cde.add((cde_id,Constants.RDFS['comment'],Literal("BIDS participants_id variable fixed in specification")))
                            cde.add((cde_id,Constants.RDFS['valueType'],URIRef(Constants.XSD["string"])))

                            acq_entity.add_attributes({cde_id:Literal(value)})

                        # if this was the participant_id, we already handled it above creating agent / qualified association
                        # if not (BIDS_Constants.participants[key] == Constants.NIDM_SUBJECTID):
                        #    acq_entity.add_attributes({BIDS_Constants.participants[key]:value})


                    # else if user added -mapvars flag to command line then we'll use the variable-> term mapping procedures to help user map variables to terms (also used
                    # in CSV2NIDM.py)
                    else:

                        # WIP: trying to add new support for CDEs...
                        add_attributes_with_cde(prov_object=acq_entity,cde=cde,row_variable=key,value=value)
                        # if key in column_to_terms:
                        #    acq_entity.add_attributes({QualifiedName(provNamespace(Core.safe_string(None,string=str(key)), column_to_terms[key]["url"]), ""):value})
                        # else:

                        #    acq_entity.add_attributes({Constants.BIDS[key.replace(" ", "_")]:value})


    # create acquisition objects for each scan for each subject

    # loop through all subjects in dataset
    for subject_id in bids_layout.get_subjects():
        logging.info("Converting subject: %s" %subject_id)
        # skip .git directories...added to support datalad datasets
        if subject_id.startswith("."):
            continue

        # check if there are a session numbers.  If so, store it in the session activity and create a new
        # sessions for these imaging acquisitions.  Because we don't know which imaging session the root
        # participants.tsv file data may be associated with we simply link the imaging acquisitions to different
        # sessions (i.e. the participants.tsv file goes into an AssessmentAcquisition and linked to a unique
        # sessions and the imaging acquisitions go into MRAcquisitions and has a unique session)
        imaging_sessions = bids_layout.get_sessions(subject=subject_id)
        # if session_dirs has entries then get any metadata about session and store in session activity

        # bids_layout.get(subject=subject_id,type='session',extensions='.tsv')
        # bids_layout.get(subject=subject_id,type='scans',extensions='.tsv')
        # bids_layout.get(extensions='.tsv',return_type='obj')

        # loop through each session if there is a sessions directory
        if len(imaging_sessions) > 0:
            for img_session in imaging_sessions:
                # create a new session
                ses = Session(project)
                # add session number as metadata
                ses.add_attributes({Constants.BIDS['session_number']:img_session})
                addimagingsessions(bids_layout=bids_layout,subject_id=subject_id,session=ses,participant=participant, directory=directory,img_session=img_session)
        # else we have no ses-* directories in the BIDS layout
        addimagingsessions(bids_layout=bids_layout,subject_id=subject_id,session=Session(project),participant=participant, directory=directory)



        # Added temporarily to support phenotype files
        # for each *.tsv / *.json file pair in the phenotypes directory
        # WIP: ADD VARIABLE -> TERM MAPPING HERE
        for tsv_file in glob.glob(os.path.join(directory,"phenotype","*.tsv")):
            # for now, open the TSV file, extract the row for this subject, store it in an acquisition object and link to
            # the associated JSON data dictionary file
            with open(tsv_file) as phenofile:
                pheno_data = csv.DictReader(phenofile, delimiter='\t')
                for row in pheno_data:
                    subjid = row['participant_id'].split("-")
                    if not subjid[1] == subject_id:
                        continue
                    else:
                        # add acquisition object
                        acq = AssessmentAcquisition(session=session[subjid[1]])
                        # add qualified association with person
                        acq.add_qualified_association(person=participant[subject_id]['person'],role=Constants.NIDM_PARTICIPANT)

                        acq_entity = AssessmentObject(acquisition=acq)



                        for key,value in row.items():
                            if not value:
                                continue
                            # we're using participant_id in NIDM in agent so don't add to assessment as a triple.
                            # BIDS phenotype files seem to have an index column with no column header variable name so skip those
                            if ((not key == "participant_id") and (key != "")):
                                # for now we're using a placeholder namespace for BIDS and simply the variable names as the concept IDs..
                                acq_entity.add_attributes({Constants.BIDS[key]:value})

                        # link TSV file
                        acq_entity.add_attributes({Constants.NIDM_FILENAME:getRelPathToBIDS(tsv_file,directory)})
                        #acq_entity.add_attributes({Constants.NIDM_FILENAME:tsv_file})

                        # if there are git annex sources for participants.tsv file then add them
                        num_sources=addGitAnnexSources(obj=acq_entity.get_uuid(),bids_root=directory)
                        # else just add the local path to the dataset
                        if num_sources == 0:
                            acq_entity.add_attributes({Constants.PROV['Location']:"file:/" + tsv_file})


                        # link associated JSON file if it exists
                        data_dict = os.path.join(directory,"phenotype",os.path.splitext(os.path.basename(tsv_file))[0]+ ".json")
                        if os.path.isfile(data_dict):
                            # if file exists, create a new entity and associate it with the appropriate activity  and a used relationship
                            # with the TSV-related entity
                            json_entity = AssessmentObject(acquisition=acq)
                            json_entity.add_attributes({PROV_TYPE:Constants.BIDS["sidecar_file"], Constants.NIDM_FILENAME:
                                getRelPathToBIDS(data_dict,directory)})

                            # add Git Annex Sources
                            # if there are git annex sources for participants.tsv file then add them
                            num_sources=addGitAnnexSources(obj=json_entity.get_uuid(),filepath=data_dict,bids_root=directory)
                            # else just add the local path to the dataset
                            if num_sources == 0:
                                json_entity.add_attributes({Constants.PROV['Location']:"file:/" + data_dict})

                            #connect json_entity with acq_entity
                            acq_entity.add_attributes({Constants.PROV["wasInfluencedBy"]:json_entity.get_uuid()})


    return project, cde
コード例 #17
0
def main(**args):

    outfiles = ['fitts', 'errts', 'stats', 'betas']

    path = args['path']
    pipeline = args['pipeline']

    command = '3dDeconvolve -input {files} -jobs {n_jobs} -polort {polort} -float {confounds} {events_string} '+ \
              ' -mask {mask} -allzero_OK -fout -tout -x1D {design_matrix_txt} -xjpeg {design_matrix_jpg} -xsave '+\
              '-fitts {fitts} -errts {errts} -bucket {stats} -cbucket {betas} -rout -gltsym "SYM: RESP+L -RESP+R" -glt_label 1 RespLvsRespR'

    extra_event = process_extraevent_arg(args['extra_event'])

    derivatives = os.path.join(path, "derivatives", pipeline)
    print("mkdir -p {}".format(derivatives))
    os.system("mkdir -p {}".format(derivatives))

    derivatives_pattern = os.path.join(derivatives, 'sub-{subject}',
                                       "ses-{session}")
    pattern = os.path.join(
        derivatives_pattern, "{datatype}",
        "sub-{subject}[_ses-{session}][_space-{space}][_desc-{desc}]_{suffix}.{extension}"
    )

    layout = BIDSLayout(path, derivatives=True)

    subjects = layout.get_subjects()
    subjects.remove('lormat')
    sessions = layout.get_sessions()

    # TODO: Check if there are sessions
    for session in sessions:
        for subj in subjects:

            deriv_dir = derivatives_pattern.format(session=session,
                                                   subject=subj)
            print("mkdir -p {}".format(deriv_dir))
            os.system("mkdir -p {}".format(deriv_dir))

            # Create func
            func_dir = os.path.join(derivatives_pattern,
                                    "{datatype}").format(session=session,
                                                         subject=subj,
                                                         datatype='func')

            print("mkdir -p {}".format(func_dir))
            os.system("mkdir -p {}".format(func_dir))

            # Main command
            files = layout.get(subject=subj,
                               session=session,
                               task=session,
                               desc='afniproc',
                               extension='nii.gz')
            entities = files[0].get_entities()
            files = " ".join(f.path for f in files)

            args['files'] = files

            confounds = ''
            for desc in ['bpass', 'demean']:
                ort_files = layout.get(subject=subj,
                                       session=session,
                                       desc=desc)
                confounds += '-ortvec {} {} '.format(ort_files[0].path, desc)

            args['confounds'] = confounds

            # Stimuli
            stims = bids2afni_events(subj,
                                     session,
                                     layout,
                                     pattern,
                                     extra_event=extra_event)

            write_afni(stims)
            args['events_string'] = stims_times(stims)

            # Mask
            mask = layout.get(subject=subj,
                              session=session,
                              suffix='mask',
                              extension='nii.gz')
            args['mask'] = mask[0].path

            # Buckets
            for desc in outfiles:
                entities['desc'] = pipeline
                entities['suffix'] = desc
                args[desc] = layout.build_path(entities,
                                               pattern,
                                               validate=False)

            for extension in ['jpg', 'txt']:
                entities['suffix'] = 'dmatrix'
                entities['extension'] = extension
                args['design_matrix_' + extension] = layout.build_path(
                    entities, pattern, validate=False)

            print(command.format(**args))
            os.system(command.format(**args))
コード例 #18
0

if args.analysis_level == "participant":
    if not args.wf_base_dir:
        wf_dir = "/scratch"
    else:
        wf_dir = args.wf_base_dir

    if args.ants_reg_quick:
        print("Use AntsRegistrationSynQuick for registration")
    else:
        print("Use AntsRegistrationSyn for registration")

    layout = BIDSLayout(args.bids_dir)
    if not subjects:
        subjects = layout.get_subjects(datatype="dwi")
    print(f"{len(subjects)} subject(s) found {subjects}")

    for subject in subjects:
        print(subject)
        # get sessions
        sessions = layout.get_sessions(subject=subject, datatype="dwi")
        sessions.sort()

        # set up acq for eddy
        if "lhab" in subject:
            acq_str = "0 1 0 {TotalReadoutTime}"
            study = "lhab"
        elif "CC" in subject:
            acq_str = "0 -1 0 0.0684"
            study = "camcan"
コード例 #19
0
def main(bids_dir):
    """ Extract CMP3 connectome in a bids dataset and create PDF report"""

    # Read BIDS dataset
    try:
        bids_layout = BIDSLayout(bids_dir)
        print("BIDS: %s" % bids_layout)

        subjects = []
        for subj in bids_layout.get_subjects():
            subjects.append('sub-' + str(subj))

        print("Available subjects : ")
        print(subjects)

    except Exception:
        print(
            "BIDS ERROR: Invalid BIDS dataset. Please see documentation for more details."
        )
        sys.exit(1)

    c = canvas.Canvas(os.path.join(bids_dir, 'derivatives', __cmp_directory__,
                                   'report.pdf'),
                      pagesize=A4)
    width, height = A4

    print("Page size : %s x %s" % (width, height))

    startY = 841.89 - 50

    c.drawString(245, startY, 'Report')
    c.drawString(10, startY - 20, 'BIDS : %s ' % bids_dir)

    offset = 0
    for subj in bids_layout.get_subjects():
        print("Processing %s..." % subj)

        sessions = bids_layout.get(target='session',
                                   return_type='id',
                                   subject=subj)
        if len(sessions) > 0:
            print("Warning: multiple sessions")
            for ses in sessions:
                gpickle_fn = os.path.join(
                    bids_dir, 'derivatives', __cmp_directory__,
                    'sub-' + str(subj), 'ses-' + str(ses), 'dwi',
                    'sub-%s_ses-%s_label-L2008_res-scale1_conndata-snetwork_connectivity.gpickle'
                    % (str(subj), str(ses)))
                if os.path.isfile(gpickle_fn):
                    # c.drawString(10,20+offset,'Subject: %s / Session: %s '%(str(subj),str(sess)))
                    G = nx.read_gpickle(gpickle_fn)
                    con_metric = 'number_of_fibers'
                    con = nx.to_numpy_matrix(G,
                                             weight=con_metric,
                                             dtype=np.float64)

                    fig = figure(figsize=(8, 8))
                    suptitle('Subject: %s / Session: %s ' %
                             (str(subj), str(ses)),
                             fontsize=11)
                    title('Connectivity metric: %s' % con_metric, fontsize=10)
                    # copy the default cmap (0,0,0.5156)
                    my_cmap = copy.copy(cm.get_cmap('inferno'))
                    my_cmap.set_bad((0, 0, 0))
                    imshow(con,
                           interpolation='nearest',
                           norm=colors.LogNorm(),
                           cmap=my_cmap)
                    colorbar()

                    imgdata = io.StringIO()
                    fig.savefig(imgdata, format='png')
                    imgdata.seek(0)  # rewind the data

                    Image = ImageReader(imgdata)
                    posY = startY - 20 - 4.5 * inch - offset
                    c.drawImage(Image, 10, posY, 4 * inch, 4 * inch)

                    offset += 4.5 * inch
                    if posY - offset < 0:
                        c.showPage()
                        offset = 0

        else:
            print("No session")
            gpickle_fn = os.path.join(
                bids_dir, 'derivatives', __cmp_directory__, 'sub-' + str(subj),
                'connectivity',
                'sub-%s_label-L2008_res-scale1_conndata-snetwork_connectivity.gpickle'
                % (str(subj)))
            if os.path.isfile(gpickle_fn):
                # c.drawString(10,20+offset,'Subject : %s '%str(subj))
                G = nx.read_gpickle(gpickle_fn)
                con_metric = 'number_of_fibers'
                con = nx.to_numpy_matrix(G,
                                         weight=con_metric,
                                         dtype=np.float64)

                fig = figure(figsize=(8, 8))
                suptitle('Subject: %s ' % (str(subj)), fontsize=11)
                title('Connectivity metric: %s' % con_metric, fontsize=10)
                # copy the default cmap (0,0,0.5156)
                my_cmap = copy.copy(cm.get_cmap('inferno'))
                my_cmap.set_bad((0, 0, 0))
                imshow(con,
                       interpolation='nearest',
                       norm=colors.LogNorm(),
                       cmap=my_cmap)
                colorbar()

                imgdata = io.StringIO()
                fig.savefig(imgdata, format='png')
                imgdata.seek(0)  # rewind the data

                Image = ImageReader(imgdata)
                posY = startY - 20 - 4.5 * inch - offset
                c.drawImage(Image, 10, posY, 4 * inch, 4 * inch)

                offset += 4.5 * inch
                if posY - offset < 0:
                    c.showPage()
                    offset = 0

    c.save()
コード例 #20
0
ファイル: run.py プロジェクト: funcworks/funcworks
def build_workflow(opts, retval):
    """
    Create the Nipype Workflow for a graph given the inputs.

    All the checks and the construction of the workflow are done
    inside this function that has pickleable inputs and output
    dictionary (``retval``) to allow isolation using a
    ``multiprocessing.Process`` that allows funcworks to enforce
    a hard-limited memory-scope.
    """
    from bids import BIDSLayout

    from nipype import logging as nlogging, config as ncfg
    from ..workflows.base import init_funcworks_wf
    from .. import __version__

    build_log = nlogging.getLogger("nipype.workflow")

    output_dir = opts.output_dir.resolve()
    bids_dir = opts.bids_dir.resolve()
    work_dir = mkdtemp() if opts.work_dir is None else opts.work_dir.resolve()
    retval["return_code"] = 1
    retval["workflow"] = None
    retval["bids_dir"] = bids_dir
    retval["output_dir"] = output_dir
    retval["work_dir"] = work_dir

    if not opts.database_path:
        database_path = str(opts.work_dir.resolve() / "dbcache")
        layout = BIDSLayout(
            bids_dir,
            derivatives=opts.derivatives,
            validate=True,
            database_file=database_path,
            reset_database=True,
        )
    else:
        database_path = opts.database_path
        layout = BIDSLayout.load(database_path)

    if output_dir == bids_dir:
        build_log.error(
            "The selected output folder is the same as the input BIDS folder. "
            "Please modify the output path (suggestion: %s).",
            (bids_dir / "derivatives" /
             ("funcworks-%s" % __version__.split("+")[0])),
        )
        retval["return_code"] = 1
        return retval

    if bids_dir in opts.work_dir.parents:
        build_log.error("The selected working directory is a subdirectory "
                        "of the input BIDS folder. "
                        "Please modify the output path.")
        retval["return_code"] = 1
        return retval

    # Set up some instrumental utilities
    runtime_uuid = "%s_%s" % (strftime("%Y%m%d-%H%M%S"), uuid.uuid4())
    retval["runtime_uuid"] = runtime_uuid

    if opts.participant_label:
        retval["participant_label"] = opts.participant_label
    else:
        retval["participant_label"] = layout.get_subjects()

    # Load base plugin_settings from file if --use-plugin
    plugin_settings = {
        "plugin": "MultiProc",
        "plugin_args": {
            "raise_insufficient": False,
            "maxtasksperchild": 1
        },
    }
    if opts.use_plugin is not None:
        with open(opts.use_plugin) as f:
            plugin_settings = json.load(f)

    # Resource management options
    # Note that we're making strong assumptions about valid plugin args
    # This may need to be revisited if people try to use batch plugins
    # nthreads = plugin_settings['plugin_args'].get('n_procs')
    # Permit overriding plugin config with specific CLI options
    # if nthreads is None or opts.nthreads is not None:
    #    nthreads = opts.nthreads
    #    if nthreads is None or nthreads < 1:
    #        nthreads = cpu_count()
    #    plugin_settings['plugin_args']['n_procs'] = nthreads
    # if opts.mem_mb:
    #    plugin_settings['plugin_args']['memory_gb'] = opts.mem_mb / 1024
    # omp_nthreads = opts.omp_nthreads
    # if omp_nthreads == 0:
    #    omp_nthreads = min(nthreads - 1 if nthreads > 1 else cpu_count(), 8)
    # if 1 < nthreads < omp_nthreads:
    #    build_log.warning(
    #        'Per-process threads (--omp-nthreads=%d) exceed total '
    #        'threads (--nthreads/--n_cpus=%d)', omp_nthreads, nthreads)
    retval["plugin_settings"] = plugin_settings

    # Set up directories
    # Check and create output and working directories
    output_dir.mkdir(exist_ok=True, parents=True)
    work_dir.mkdir(exist_ok=True, parents=True)

    # Nipype config (logs and execution)
    ncfg.update_config({
        "logging": {
            "log_to_file": True
        },
        "execution": {
            "crashfile_format": "txt",
            "get_linked_libs": False,
            # 'stop_on_first_crash': opts.stop_on_first_crash,
        },
        "monitoring": {
            "enabled": opts.resource_monitor,
            "sample_frequency": "0.5",
            "summary_append": True,
        },
    })

    if opts.resource_monitor:
        ncfg.enable_resource_monitor()
    # Called with reports only
    # if opts.reports_only:
    #     build_log.log(25, 'Running --reports-only on participants %s',
    #                   ', '.join(opts.participant_label))
    #     if opts.runtime_uuid is not None:
    #         runtime_uuid = opts.runtime_uuid
    #         retval['runtime_uuid'] = runtime_uuid
    #     retval['return_code'] = generate_reports(
    #         opts.participant_label, output_dir, work_dir, runtime_uuid,
    #         packagename='funcworks')
    #     return retval

    # Build main workflow
    build_log.log(
        25,
        (f"""
        Running FUNCWORKS version {__version__}:
          * BIDS dataset path: {bids_dir}.
          * Participant list: {retval['participant_label']}.
          * Run identifier: {runtime_uuid}.
        """),
    )

    if not opts.model_file:
        model_file = Path(bids_dir) / "models" / "model-default_smdl.json"
        if not model_file.exists():
            raise ValueError("Default Model File not Found")
    else:
        model_file = opts.model_file

    retval["workflow"] = init_funcworks_wf(
        model_file=model_file,
        bids_dir=opts.bids_dir,
        output_dir=opts.output_dir,
        work_dir=opts.work_dir,
        database_path=str(database_path),
        participants=retval["participant_label"],
        analysis_level=opts.analysis_level,
        smoothing=opts.smoothing,
        runtime_uuid=runtime_uuid,
        use_rapidart=opts.use_rapidart,
        detrend_poly=opts.detrend_poly,
        align_volumes=opts.align_volumes,
        smooth_autocorrelations=opts.smooth_autocorrelations,
        despike=opts.despike,
    )

    retval["return_code"] = 0
    """
    logs_path = Path(output_dir) / 'funcworks' / 'logs'
    boilerplate = retval['workflow'].visit_desc()

    if boilerplate:
        citation_files = {
            ext: logs_path / ('CITATION.%s' % ext)
            for ext in ('bib', 'tex', 'md', 'html')
        }
        # To please git-annex users and also to guarantee consistency
        # among different renderings of the same file, first remove any
        # existing one
        for citation_file in citation_files.values():
            try:
                citation_file.unlink()
            except FileNotFoundError:
                pass

        citation_files['md'].write_text(boilerplate)
        build_log.log(25, 'Works derived from this FUNCWorks execution should '
                      'include the following boilerplate:\n\n%s', boilerplate)
    """
    return retval
コード例 #21
0
ファイル: bids.py プロジェクト: cmaumet/fitlins
def collect_participants(bids_dir, participant_label=None, strict=False):
    """
    List the participants under the BIDS root and checks that participants
    designated with the participant_label argument exist in that folder.

    Returns the list of participants to be finally processed.

    Requesting all subjects in a BIDS directory root:

    >>> collect_participants('ds114')
    ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10']

    Requesting two subjects, given their IDs:

    >>> collect_participants('ds114', participant_label=['02', '04'])
    ['02', '04']

    Requesting two subjects, given their IDs (works with 'sub-' prefixes):

    >>> collect_participants('ds114', participant_label=['sub-02', 'sub-04'])
    ['02', '04']

    Requesting two subjects, but one does not exist:

    >>> collect_participants('ds114', participant_label=['02', '14'])
    ['02']

    >>> collect_participants('ds114', participant_label=['02', '14'],
    ...                      strict=True)  # doctest: +IGNORE_EXCEPTION_DETAIL
    Traceback (most recent call last):
    fmriprep.utils.bids.BIDSError:
    ...


    """
    layout = BIDSLayout(bids_dir)
    all_participants = layout.get_subjects()

    # Error: bids_dir does not contain subjects
    if not all_participants:
        raise BIDSError(
            'Could not find participants. Please make sure the BIDS data '
            'structure is present and correct. Datasets can be validated online '
            'using the BIDS Validator (http://incf.github.io/bids-validator/).\n'
            'If you are using Docker for Mac or Docker for Windows, you '
            'may need to adjust your "File sharing" preferences.', bids_dir)

    # No --participant-label was set, return all
    if not participant_label:
        return all_participants

    # Drop sub- prefixes
    participant_label = [
        sub[4:] if sub.startswith('sub-') else sub for sub in participant_label
    ]

    found_label = layout.get_subjects(subject=participant_label)

    if not found_label:
        raise BIDSError(
            'Could not find participants [{}]'.format(
                ', '.join(participant_label)), bids_dir)

    # Warn if some IDs were not found
    notfound_label = sorted(set(participant_label) - set(found_label))
    if notfound_label:
        exc = BIDSError(
            'Some participants were not found: {}'.format(
                ', '.join(notfound_label)), bids_dir)
        if strict:
            raise exc
        warnings.warn(exc.msg, BIDSWarning)

    return found_label
コード例 #22
0
query = {'time_series': time_series_params, 'struct': struct_params}

layout = BIDSLayout(args['directory'])

#TODO: eliminate search paths before files are created and resoruces are wasted

file_grabber = Node(BIDSDataGrabber(), name="file_grabber")
file_grabber.inputs.base_dir = args['directory']
file_grabber.inputs.output_query = query

if args['subject'] is not None:
    # file_grabber.iterables = ('subject', layout.get_subjects())#file_grabber.inputs.subject = 'M10999905'#args['subject']
    file_grabber.iterables = ('subject', [args['subject']])
    #file_grabber.inputs.subject = args['subject']
else:
    file_grabber.iterables = ('subject', layout.get_subjects())

#if not args.has_key('session') and not:

#TODO: handle the subjects that dont have the specific session somehiwm i think they are causing jsut edning with a crash rn
# also, there are result files created for these failed iterations rn, but theres no point to the
file_grabber.inputs.raise_on_empty = True


# The BIDSDataGrabber outputs the files inside of a list, but all other nodes only accepts file paths, not lsits
def unlist(time_series, struct):
    print(struct)
    return time_series[0], struct[0]


file_unwrapper = Node(Function(function=unlist,
コード例 #23
0
ファイル: BIDSMRI2NIDM.py プロジェクト: josephmje/PyNIDM
def bidsmri2project(directory, args):
    #Parse dataset_description.json file in BIDS directory
    if (os.path.isdir(os.path.join(directory))):
        try:
            with open(os.path.join(directory,
                                   'dataset_description.json')) as data_file:
                dataset = json.load(data_file)
        except OSError:
            logging.critical(
                "Cannot find dataset_description.json file which is required in the BIDS spec"
            )
            exit("-1")
    else:
        logging.critical("Error: BIDS directory %s does not exist!" %
                         os.path.join(directory))
        exit("-1")

    #create project / nidm-exp doc
    project = Project()

    #add various attributes if they exist in BIDS dataset
    for key in dataset:
        #if key from dataset_description file is mapped to term in BIDS_Constants.py then add to NIDM object
        if key in BIDS_Constants.dataset_description:
            if type(dataset[key]) is list:
                project.add_attributes({
                    BIDS_Constants.dataset_description[key]:
                    "".join(dataset[key])
                })
            else:
                project.add_attributes(
                    {BIDS_Constants.dataset_description[key]: dataset[key]})
        #add absolute location of BIDS directory on disk for later finding of files which are stored relatively in NIDM document
        project.add_attributes({Constants.PROV['Location']: directory})

    #get BIDS layout
    bids_layout = BIDSLayout(directory)

    #create empty dictinary for sessions where key is subject id and used later to link scans to same session as demographics
    session = {}
    participant = {}
    #Parse participants.tsv file in BIDS directory and create study and acquisition objects
    if os.path.isfile(os.path.join(directory, 'participants.tsv')):
        with open(os.path.join(directory, 'participants.tsv')) as csvfile:
            participants_data = csv.DictReader(csvfile, delimiter='\t')

            #logic to map variables to terms.#########################################################################################################

            #first iterate over variables in dataframe and check which ones are already mapped as BIDS constants and which are not.  For those that are not
            #we want to use the variable-term mapping functions to help the user do the mapping
            #iterate over columns
            mapping_list = []
            column_to_terms = {}
            for field in participants_data.fieldnames:

                #column is not in BIDS_Constants
                if not (field in BIDS_Constants.participants):
                    #add column to list for column_to_terms mapping
                    mapping_list.append(field)

            #do variable-term mappings
            if ((args.json_map != False) or (args.key != None)):

                #if user didn't supply a json mapping file but we're doing some variable-term mapping create an empty one for column_to_terms to use
                if args.json_map == False:
                    #defaults to participants.json because here we're mapping the participants.tsv file variables to terms
                    # if participants.json file doesn't exist then run without json mapping file
                    if not os.path.isfile(
                            os.path.join(directory, 'participants.json')):
                        #maps variables in CSV file to terms
                        temp = DataFrame(columns=mapping_list)

                        column_to_terms, cde = map_variables_to_terms(
                            directory=directory,
                            assessment_name='participants.tsv',
                            df=temp,
                            apikey=args.key,
                            output_file=os.path.join(directory,
                                                     'participants.json'))
                    else:
                        #maps variables in CSV file to terms
                        temp = DataFrame(columns=mapping_list)
                        column_to_terms, cde = map_variables_to_terms(
                            directory=directory,
                            assessment_name='participants.tsv',
                            df=temp,
                            apikey=args.key,
                            output_file=os.path.join(directory,
                                                     'participants.json'),
                            json_file=os.path.join(directory,
                                                   'participants.json'))

                else:
                    #maps variables in CSV file to terms
                    temp = DataFrame(columns=mapping_list)
                    column_to_terms, cde = map_variables_to_terms(
                        directory=directory,
                        assessment_name='participants.tsv',
                        df=temp,
                        apikey=args.key,
                        output_file=os.path.join(directory,
                                                 'participants.json'),
                        json_file=args.json_map)

            for row in participants_data:
                #create session object for subject to be used for participant metadata and image data
                #parse subject id from "sub-XXXX" string
                temp = row['participant_id'].split("-")
                #for ambiguity in BIDS datasets.  Sometimes participant_id is sub-XXXX and othertimes it's just XXXX
                if len(temp) > 1:
                    subjid = temp[1]
                else:
                    subjid = temp[0]
                logging.info(subjid)
                session[subjid] = Session(project)

                #add acquisition object
                acq = AssessmentAcquisition(session=session[subjid])

                acq_entity = AssessmentObject(acquisition=acq)
                participant[subjid] = {}
                participant[subjid]['person'] = acq.add_person(
                    attributes=({
                        Constants.NIDM_SUBJECTID: row['participant_id']
                    }))

                #add qualified association of participant with acquisition activity
                acq.add_qualified_association(
                    person=participant[subjid]['person'],
                    role=Constants.NIDM_PARTICIPANT)
                print(acq)

                for key, value in row.items():
                    if not value:
                        continue
                    #for variables in participants.tsv file who have term mappings in BIDS_Constants.py use those, add to json_map so we don't have to map these if user
                    #supplied arguments to map variables
                    if key in BIDS_Constants.participants:

                        #if this was the participant_id, we already handled it above creating agent / qualified association
                        if not (BIDS_Constants.participants[key]
                                == Constants.NIDM_SUBJECTID):
                            acq_entity.add_attributes(
                                {BIDS_Constants.participants[key]: value})

                    #else if user added -mapvars flag to command line then we'll use the variable-> term mapping procedures to help user map variables to terms (also used
                    # in CSV2NIDM.py)
                    else:

                        # WIP: trying to add new support for CDEs...
                        add_attributes_with_cde(prov_object=acq_entity,
                                                cde=cde,
                                                row_variable=key,
                                                value=value)
                        # if key in column_to_terms:
                        #    acq_entity.add_attributes({QualifiedName(provNamespace(Core.safe_string(None,string=str(key)), column_to_terms[key]["url"]), ""):value})
                        #else:

                        #    acq_entity.add_attributes({Constants.BIDS[key.replace(" ", "_")]:value})

    #create acquisition objects for each scan for each subject

    #loop through all subjects in dataset
    for subject_id in bids_layout.get_subjects():
        logging.info("Converting subject: %s" % subject_id)
        #skip .git directories...added to support datalad datasets
        if subject_id.startswith("."):
            continue

        #check if there's a session number.  If so, store it in the session activity
        session_dirs = bids_layout.get(target='session',
                                       subject=subject_id,
                                       return_type='dir')
        #if session_dirs has entries then get any metadata about session and store in session activity

        #bids_layout.get(subject=subject_id,type='session',extensions='.tsv')
        #bids_layout.get(subject=subject_id,type='scans',extensions='.tsv')
        #bids_layout.get(extensions='.tsv',return_type='obj')

        #check whether sessions have been created (i.e. was there a participants.tsv file?  If not, create here
        if not (subject_id in session):
            session[subject_id] = Session(project)

        for file_tpl in bids_layout.get(subject=subject_id,
                                        extensions=['.nii', '.nii.gz']):
            #create an acquisition activity
            acq = MRAcquisition(session[subject_id])

            #check whether participant (i.e. agent) for this subject already exists (i.e. if participants.tsv file exists) else create one
            if not (subject_id in participant):
                participant[subject_id] = {}
                participant[subject_id]['person'] = acq.add_person(
                    attributes=({
                        Constants.NIDM_SUBJECTID: subject_id
                    }))

            #add qualified association with person
            acq.add_qualified_association(
                person=participant[subject_id]['person'],
                role=Constants.NIDM_PARTICIPANT)

            if file_tpl.entities['datatype'] == 'anat':
                #do something with anatomicals
                acq_obj = MRObject(acq)
                #add image contrast type
                if file_tpl.entities['suffix'] in BIDS_Constants.scans:
                    acq_obj.add_attributes({
                        Constants.NIDM_IMAGE_CONTRAST_TYPE:
                        BIDS_Constants.scans[file_tpl.entities['suffix']]
                    })
                else:
                    logging.info(
                        "WARNING: No matching image contrast type found in BIDS_Constants.py for %s"
                        % file_tpl.entities['suffix'])

                #add image usage type
                if file_tpl.entities['datatype'] in BIDS_Constants.scans:
                    acq_obj.add_attributes({
                        Constants.NIDM_IMAGE_USAGE_TYPE:
                        BIDS_Constants.scans[file_tpl.entities['datatype']]
                    })
                else:
                    logging.info(
                        "WARNING: No matching image usage type found in BIDS_Constants.py for %s"
                        % file_tpl.entities['datatype'])
                #add file link
                #make relative link to
                acq_obj.add_attributes({
                    Constants.NIDM_FILENAME:
                    getRelPathToBIDS(join(file_tpl.dirname, file_tpl.filename),
                                     directory)
                })
                #WIP: add absolute location of BIDS directory on disk for later finding of files
                acq_obj.add_attributes({Constants.PROV['Location']: directory})

                #add sha512 sum
                if isfile(join(directory, file_tpl.dirname,
                               file_tpl.filename)):
                    acq_obj.add_attributes({
                        Constants.CRYPTO_SHA512:
                        getsha512(
                            join(directory, file_tpl.dirname,
                                 file_tpl.filename))
                    })
                else:
                    logging.info(
                        "WARNINGL file %s doesn't exist! No SHA512 sum stored in NIDM files..."
                        % join(directory, file_tpl.dirname, file_tpl.filename))
                #get associated JSON file if exists
                #There is T1w.json file with information
                json_data = (bids_layout.get(
                    suffix=file_tpl.entities['suffix'],
                    subject=subject_id))[0].metadata
                if len(json_data.info) > 0:
                    for key in json_data.info.items():
                        if key in BIDS_Constants.json_keys:
                            if type(json_data.info[key]) is list:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key.replace(
                                        " ", "_")]:
                                    ''.join(
                                        str(e) for e in json_data.info[key])
                                })
                            else:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key.replace(
                                        " ", "_")]:
                                    json_data.info[key]
                                })

                #Parse T1w.json file in BIDS directory to add the attributes contained inside
                if (os.path.isdir(os.path.join(directory))):
                    try:
                        with open(os.path.join(directory,
                                               'T1w.json')) as data_file:
                            dataset = json.load(data_file)
                    except OSError:
                        logging.critical(
                            "Cannot find T1w.json file which is required in the BIDS spec"
                        )
                        exit("-1")
                else:
                    logging.critical(
                        "Error: BIDS directory %s does not exist!" %
                        os.path.join(directory))
                    exit("-1")

                #add various attributes if they exist in BIDS dataset
                for key in dataset:
                    #if key from T1w.json file is mapped to term in BIDS_Constants.py then add to NIDM object
                    if key in BIDS_Constants.json_keys:
                        if type(dataset[key]) is list:
                            acq_obj.add_attributes({
                                BIDS_Constants.json_keys[key]:
                                "".join(dataset[key])
                            })
                        else:
                            acq_obj.add_attributes(
                                {BIDS_Constants.json_keys[key]: dataset[key]})

            elif file_tpl.entities['datatype'] == 'func':
                #do something with functionals
                acq_obj = MRObject(acq)
                #add image contrast type
                if file_tpl.entities['suffix'] in BIDS_Constants.scans:
                    acq_obj.add_attributes({
                        Constants.NIDM_IMAGE_CONTRAST_TYPE:
                        BIDS_Constants.scans[file_tpl.entities['suffix']]
                    })
                else:
                    logging.info(
                        "WARNING: No matching image contrast type found in BIDS_Constants.py for %s"
                        % file_tpl.entities['suffix'])

                #add image usage type
                if file_tpl.entities['datatype'] in BIDS_Constants.scans:
                    acq_obj.add_attributes({
                        Constants.NIDM_IMAGE_USAGE_TYPE:
                        BIDS_Constants.scans[file_tpl.entities['datatype']]
                    })
                else:
                    logging.info(
                        "WARNING: No matching image usage type found in BIDS_Constants.py for %s"
                        % file_tpl.entities['datatype'])
                #make relative link to
                acq_obj.add_attributes({
                    Constants.NIDM_FILENAME:
                    getRelPathToBIDS(join(file_tpl.dirname, file_tpl.filename),
                                     directory)
                })
                #WIP: add absolute location of BIDS directory on disk for later finding of files
                acq_obj.add_attributes({Constants.PROV['Location']: directory})

                #add sha512 sum
                if isfile(join(directory, file_tpl.dirname,
                               file_tpl.filename)):
                    acq_obj.add_attributes({
                        Constants.CRYPTO_SHA512:
                        getsha512(
                            join(directory, file_tpl.dirname,
                                 file_tpl.filename))
                    })
                else:
                    logging.info(
                        "WARNINGL file %s doesn't exist! No SHA512 sum stored in NIDM files..."
                        % join(directory, file_tpl.dirname, file_tpl.filename))

                if 'run' in file_tpl.entities:
                    acq_obj.add_attributes({
                        BIDS_Constants.json_keys["run"]:
                        file_tpl.entities['run']
                    })

                #get associated JSON file if exists
                json_data = (bids_layout.get(
                    suffix=file_tpl.entities['suffix'],
                    subject=subject_id))[0].metadata

                if len(json_data.info) > 0:
                    for key in json_data.info.items():
                        if key in BIDS_Constants.json_keys:
                            if type(json_data.info[key]) is list:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key.replace(
                                        " ", "_")]:
                                    ''.join(
                                        str(e) for e in json_data.info[key])
                                })
                            else:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key.replace(
                                        " ", "_")]:
                                    json_data.info[key]
                                })
                #get associated events TSV file
                if 'run' in file_tpl.entities:
                    events_file = bids_layout.get(
                        subject=subject_id,
                        extensions=['.tsv'],
                        modality=file_tpl.entities['datatype'],
                        task=file_tpl.entities['task'],
                        run=file_tpl.entities['run'])
                else:
                    events_file = bids_layout.get(
                        subject=subject_id,
                        extensions=['.tsv'],
                        modality=file_tpl.entities['datatype'],
                        task=file_tpl.entities['task'])
                #if there is an events file then this is task-based so create an acquisition object for the task file and link
                if events_file:
                    #for now create acquisition object and link it to the associated scan
                    events_obj = AcquisitionObject(acq)
                    #add prov type, task name as prov:label, and link to filename of events file

                    events_obj.add_attributes({
                        PROV_TYPE:
                        Constants.NIDM_MRI_BOLD_EVENTS,
                        BIDS_Constants.json_keys["TaskName"]:
                        json_data["TaskName"],
                        Constants.NIDM_FILENAME:
                        getRelPathToBIDS(events_file[0].filename, directory)
                    })
                    #link it to appropriate MR acquisition entity
                    events_obj.wasAttributedTo(acq_obj)

                #Parse task-rest_bold.json file in BIDS directory to add the attributes contained inside
                if (os.path.isdir(os.path.join(directory))):
                    try:
                        with open(
                                os.path.join(
                                    directory,
                                    'task-rest_bold.json')) as data_file:
                            dataset = json.load(data_file)
                    except OSError:
                        logging.critical(
                            "Cannot find task-rest_bold.json file which is required in the BIDS spec"
                        )
                        exit("-1")
                else:
                    logging.critical(
                        "Error: BIDS directory %s does not exist!" %
                        os.path.join(directory))
                    exit("-1")

                #add various attributes if they exist in BIDS dataset
                for key in dataset:
                    #if key from task-rest_bold.json file is mapped to term in BIDS_Constants.py then add to NIDM object
                    if key in BIDS_Constants.json_keys:
                        if type(dataset[key]) is list:
                            acq_obj.add_attributes({
                                BIDS_Constants.json_keys[key]:
                                ",".join(map(str, dataset[key]))
                            })
                        else:
                            acq_obj.add_attributes(
                                {BIDS_Constants.json_keys[key]: dataset[key]})

            elif file_tpl.entities['datatype'] == 'dwi':
                #do stuff with with dwi scans...
                acq_obj = MRObject(acq)
                #add image contrast type
                if file_tpl.entities['suffix'] in BIDS_Constants.scans:
                    acq_obj.add_attributes({
                        Constants.NIDM_IMAGE_CONTRAST_TYPE:
                        BIDS_Constants.scans[file_tpl.entities['suffix']]
                    })
                else:
                    logging.info(
                        "WARNING: No matching image contrast type found in BIDS_Constants.py for %s"
                        % file_tpl.entities['suffix'])

                #add image usage type
                if file_tpl.entities['datatype'] in BIDS_Constants.scans:
                    acq_obj.add_attributes({
                        Constants.NIDM_IMAGE_USAGE_TYPE:
                        BIDS_Constants.scans["dti"]
                    })
                else:
                    logging.info(
                        "WARNING: No matching image usage type found in BIDS_Constants.py for %s"
                        % file_tpl.entities['datatype'])
                #make relative link to
                acq_obj.add_attributes({
                    Constants.NIDM_FILENAME:
                    getRelPathToBIDS(join(file_tpl.dirname, file_tpl.filename),
                                     directory)
                })
                #add sha512 sum
                if isfile(join(directory, file_tpl.dirname,
                               file_tpl.filename)):
                    acq_obj.add_attributes({
                        Constants.CRYPTO_SHA512:
                        getsha512(
                            join(directory, file_tpl.dirname,
                                 file_tpl.filename))
                    })
                else:
                    logging.info(
                        "WARNINGL file %s doesn't exist! No SHA512 sum stored in NIDM files..."
                        % join(directory, file_tpl.dirname, file_tpl.filename))

                if 'run' in file_tpl._fields:
                    acq_obj.add_attributes(
                        {BIDS_Constants.json_keys["run"]: file_tpl.run})

                #get associated JSON file if exists
                json_data = (bids_layout.get(
                    suffix=file_tpl.entities['suffix'],
                    subject=subject_id))[0].metadata

                if len(json_data.info) > 0:
                    for key in json_data.info.items():
                        if key in BIDS_Constants.json_keys:
                            if type(json_data.info[key]) is list:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key.replace(
                                        " ", "_")]:
                                    ''.join(
                                        str(e) for e in json_data.info[key])
                                })
                            else:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key.replace(
                                        " ", "_")]:
                                    json_data.info[key]
                                })
                #for bval and bvec files, what to do with those?

                #for now, create new generic acquisition objects, link the files, and associate with the one for the DWI scan?
                acq_obj_bval = AcquisitionObject(acq)
                acq_obj_bval.add_attributes(
                    {PROV_TYPE: BIDS_Constants.scans["bval"]})
                #add file link to bval files
                acq_obj_bval.add_attributes({
                    Constants.NIDM_FILENAME:
                    getRelPathToBIDS(
                        join(file_tpl.dirname,
                             bids_layout.get_bval(file_tpl.filename)),
                        directory)
                })
                #WIP: add absolute location of BIDS directory on disk for later finding of files
                acq_obj_bval.add_attributes(
                    {Constants.PROV['Location']: directory})

                #add sha512 sum
                if isfile(join(directory, file_tpl.dirname,
                               file_tpl.filename)):
                    acq_obj_bval.add_attributes({
                        Constants.CRYPTO_SHA512:
                        getsha512(
                            join(directory, file_tpl.dirname,
                                 file_tpl.filename))
                    })
                else:
                    logging.info(
                        "WARNINGL file %s doesn't exist! No SHA512 sum stored in NIDM files..."
                        % join(directory, file_tpl.dirname, file_tpl.filename))
                acq_obj_bvec = AcquisitionObject(acq)
                acq_obj_bvec.add_attributes(
                    {PROV_TYPE: BIDS_Constants.scans["bvec"]})
                #add file link to bvec files
                acq_obj_bvec.add_attributes({
                    Constants.NIDM_FILENAME:
                    getRelPathToBIDS(
                        join(file_tpl.dirname,
                             bids_layout.get_bvec(file_tpl.filename)),
                        directory)
                })
                #WIP: add absolute location of BIDS directory on disk for later finding of files
                acq_obj_bvec.add_attributes(
                    {Constants.PROV['Location']: directory})

                if isfile(join(directory, file_tpl.dirname,
                               file_tpl.filename)):
                    #add sha512 sum
                    acq_obj_bvec.add_attributes({
                        Constants.CRYPTO_SHA512:
                        getsha512(
                            join(directory, file_tpl.dirname,
                                 file_tpl.filename))
                    })
                else:
                    logging.info(
                        "WARNINGL file %s doesn't exist! No SHA512 sum stored in NIDM files..."
                        % join(directory, file_tpl.dirname, file_tpl.filename))

                #link bval and bvec acquisition object entities together or is their association with DWI scan...

        #Added temporarily to support phenotype files
        #for each *.tsv / *.json file pair in the phenotypes directory
        #WIP: ADD VARIABLE -> TERM MAPPING HERE
        for tsv_file in glob.glob(os.path.join(directory, "phenotype",
                                               "*.tsv")):
            #for now, open the TSV file, extract the row for this subject, store it in an acquisition object and link to
            #the associated JSON data dictionary file
            with open(tsv_file) as phenofile:
                pheno_data = csv.DictReader(phenofile, delimiter='\t')
                for row in pheno_data:
                    subjid = row['participant_id'].split("-")
                    if not subjid[1] == subject_id:
                        continue
                    else:
                        #add acquisition object
                        acq = AssessmentAcquisition(session=session[subjid[1]])
                        #add qualified association with person
                        acq.add_qualified_association(
                            person=participant[subject_id]['person'],
                            role=Constants.NIDM_PARTICIPANT)

                        acq_entity = AssessmentObject(acquisition=acq)

                        for key, value in row.items():
                            if not value:
                                continue
                            #we're using participant_id in NIDM in agent so don't add to assessment as a triple.
                            #BIDS phenotype files seem to have an index column with no column header variable name so skip those
                            if ((not key == "participant_id") and (key != "")):
                                #for now we're using a placeholder namespace for BIDS and simply the variable names as the concept IDs..
                                acq_entity.add_attributes(
                                    {Constants.BIDS[key]: value})

                        #link TSV file
                        acq_entity.add_attributes({
                            Constants.NIDM_FILENAME:
                            getRelPathToBIDS(tsv_file, directory)
                        })
                        #WIP: add absolute location of BIDS directory on disk for later finding of files
                        acq_entity.add_attributes(
                            {Constants.PROV['Location']: directory})

                        #link associated JSON file if it exists
                        data_dict = os.path.join(
                            directory, "phenotype",
                            os.path.splitext(os.path.basename(tsv_file))[0] +
                            ".json")
                        if os.path.isfile(data_dict):
                            acq_entity.add_attributes({
                                Constants.BIDS["data_dictionary"]:
                                getRelPathToBIDS(data_dict, directory)
                            })

    return project, cde
コード例 #24
0
if args['subject'] is not None:
    data_grabber_node_iterables.append(('subject', [args['subject']]))
elif args['subjects'] is not None:

    def get_sub_from_path(path):
        if path.find('sub') == -1 or path.find('ses') == -1:
            return path
        return path[path.find('sub-') + 4:path.find('/ses-')]

    sub_list = []
    for line in open(abspath(args['subjects']), 'r'):
        sub_list.append(get_sub_from_path(line).rstrip())
    data_grabber_node_iterables.append(('subject', sub_list))
else:
    data_grabber_node_iterables.append(('subject', layout.get_subjects()))
data_grabber_node.iterables = data_grabber_node_iterables

data_grabber_node.inputs.raise_on_empty = False  #True
"""
Inputs:
1) fMRI timeseries
2) T1 Structural Image
3) template image

processing:
1) motion correction (mcflrt);
    compute mean (fslmaths);
    skull strip (BET);
    scaling within mask: zscale (python code or fslstats + fslmaths),multiply by -1 = rcFe;                              
    coregister mean fMRI to skullstripped T1 (FLIRT)    60oF     -> image, mat file
コード例 #25
0
ファイル: bids.py プロジェクト: poldracklab/niworkflows
def collect_participants(bids_dir, participant_label=None, strict=False,
                         bids_validate=True):
    """
    List the participants under the BIDS root and checks that participants
    designated with the participant_label argument exist in that folder.
    Returns the list of participants to be finally processed.
    Requesting all subjects in a BIDS directory root:
    >>> collect_participants(str(datadir / 'ds114'), bids_validate=False)
    ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10']

    Requesting two subjects, given their IDs:
    >>> collect_participants(str(datadir / 'ds114'), participant_label=['02', '04'],
    ...                      bids_validate=False)
    ['02', '04']

    Requesting two subjects, given their IDs (works with 'sub-' prefixes):
    >>> collect_participants(str(datadir / 'ds114'), participant_label=['sub-02', 'sub-04'],
    ...                      bids_validate=False)
    ['02', '04']

    Requesting two subjects, but one does not exist:
    >>> collect_participants(str(datadir / 'ds114'), participant_label=['02', '14'],
    ...                      bids_validate=False)
    ['02']
    >>> collect_participants(
    ...     str(datadir / 'ds114'), participant_label=['02', '14'],
    ...     strict=True, bids_validate=False)  # doctest: +IGNORE_EXCEPTION_DETAIL
    Traceback (most recent call last):
    fmriprep.utils.bids.BIDSError:
    ...
    """

    if isinstance(bids_dir, BIDSLayout):
        layout = bids_dir
    else:
        layout = BIDSLayout(str(bids_dir), validate=bids_validate)

    all_participants = set(layout.get_subjects())

    # Error: bids_dir does not contain subjects
    if not all_participants:
        raise BIDSError(
            'Could not find participants. Please make sure the BIDS data '
            'structure is present and correct. Datasets can be validated online '
            'using the BIDS Validator (http://bids-standard.github.io/bids-validator/).\n'
            'If you are using Docker for Mac or Docker for Windows, you '
            'may need to adjust your "File sharing" preferences.', bids_dir)

    # No --participant-label was set, return all
    if not participant_label:
        return sorted(all_participants)

    if isinstance(participant_label, str):
        participant_label = [participant_label]

    # Drop sub- prefixes
    participant_label = [sub[4:] if sub.startswith('sub-') else sub for sub in participant_label]
    # Remove duplicates
    participant_label = sorted(set(participant_label))
    # Remove labels not found
    found_label = sorted(set(participant_label) & all_participants)
    if not found_label:
        raise BIDSError('Could not find participants [{}]'.format(
            ', '.join(participant_label)), bids_dir)

    # Warn if some IDs were not found
    notfound_label = sorted(set(participant_label) - all_participants)
    if notfound_label:
        exc = BIDSError('Some participants were not found: {}'.format(
            ', '.join(notfound_label)), bids_dir)
        if strict:
            raise exc
        warnings.warn(exc.msg, BIDSWarning)

    return found_label
コード例 #26
0
import os
import time
from pathlib import Path

from bids import BIDSLayout

t = time.process_time()

proj_root = Path() / '..'
data_raw_dir = proj_root / 'data_raw'
data_bids_dir = proj_root / 'data_bids_test2'
data_deriv_dir = data_bids_dir / 'derivatives'

layout = BIDSLayout(data_bids_dir, validate=True)
subjects = layout.get_subjects()

subj_path_raw = list(data_bids_dir.glob('sub-*\ses-*\meg\*vid*.fif'))

for i, sub_id in enumerate(subjects):
    for vid_i in range(1, 5):
        # open file
        template_f = open(f'template_preproc_report.cbmd', 'r')
        template_str = template_f.read()
        template_f.close()

        # specify sub and condition
        template_str = template_str.replace('{sub_bids_id}', sub_id)
        template_str = template_str.replace('{vid_index}', str(vid_i))

        # save cbmd file
        template_f_subj = open(
コード例 #27
0
ファイル: bids.py プロジェクト: dlevitas/niworkflows
def collect_participants(bids_dir,
                         participant_label=None,
                         strict=False,
                         bids_validate=True):
    """
    List the participants under the BIDS root and checks that participants
    designated with the participant_label argument exist in that folder.
    Returns the list of participants to be finally processed.
    Requesting all subjects in a BIDS directory root:

    Examples
    --------
    >>> collect_participants(str(datadir / 'ds114'), bids_validate=False)
    ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10']

    Requesting two subjects, given their IDs:

    >>> collect_participants(str(datadir / 'ds114'), participant_label=['02', '04'],
    ...                      bids_validate=False)
    ['02', '04']

    Requesting two subjects, given their IDs (works with 'sub-' prefixes):

    >>> collect_participants(str(datadir / 'ds114'), participant_label=['sub-02', 'sub-04'],
    ...                      bids_validate=False)
    ['02', '04']

    Requesting two subjects, but one does not exist:

    >>> collect_participants(str(datadir / 'ds114'), participant_label=['02', '14'],
    ...                      bids_validate=False)
    ['02']
    >>> collect_participants(
    ...     str(datadir / 'ds114'), participant_label=['02', '14'],
    ...     strict=True, bids_validate=False)  # doctest: +IGNORE_EXCEPTION_DETAIL
    Traceback (most recent call last):
    BIDSError:
    ...

    """

    if isinstance(bids_dir, BIDSLayout):
        layout = bids_dir
    else:
        layout = BIDSLayout(str(bids_dir), validate=bids_validate)

    all_participants = set(layout.get_subjects())

    # Error: bids_dir does not contain subjects
    if not all_participants:
        raise BIDSError(
            "Could not find participants. Please make sure the BIDS data "
            "structure is present and correct. Datasets can be validated online "
            "using the BIDS Validator (http://bids-standard.github.io/bids-validator/).\n"
            "If you are using Docker for Mac or Docker for Windows, you "
            'may need to adjust your "File sharing" preferences.',
            bids_dir,
        )

    # No --participant-label was set, return all
    if not participant_label:
        return sorted(all_participants)

    if isinstance(participant_label, str):
        participant_label = [participant_label]

    # Drop sub- prefixes
    participant_label = [
        sub[4:] if sub.startswith("sub-") else sub for sub in participant_label
    ]
    # Remove duplicates
    participant_label = sorted(set(participant_label))
    # Remove labels not found
    found_label = sorted(set(participant_label) & all_participants)
    if not found_label:
        raise BIDSError(
            "Could not find participants [{}]".format(
                ", ".join(participant_label)),
            bids_dir,
        )

    # Warn if some IDs were not found
    notfound_label = sorted(set(participant_label) - all_participants)
    if notfound_label:
        exc = BIDSError(
            "Some participants were not found: {}".format(
                ", ".join(notfound_label)),
            bids_dir,
        )
        if strict:
            raise exc
        warnings.warn(exc.msg, BIDSWarning)

    return found_label
コード例 #28
0
    # The figure title should not overlap with the subplots.
    fig.tight_layout(rect=[0, 0.03, 1, 0.95])

    # Save the figure in a subfolder 'Bad_Channels'
    pyplot.savefig(savefile)
    pyplot.close('all')


proj_root = Path() / '..'
data_raw_dir = proj_root / 'data_raw'
data_bids_dir = proj_root / 'data_bids'
data_deriv_dir = data_bids_dir / 'derivatives'

layout = BIDSLayout(data_bids_dir, validate=True)
subjects = layout.get_subjects()[:-1]  # without emptyroom

template = os.path.join('sub-{subject}', 'ses-{session}', 'meg',
                        'sub-{subject}_ses-{session}_task-{task}')

subjects_long_flat = {
    '1011': ('vid2', 'vid4'),
    '1017': ('vid1', 'vid3'),
    '1018': ('vid1', 'vid2'),
    '2003': ('vid1', 'vid2'),
    '2010': ('vid1', 'vid3')
}

for subject in subjects:
    meg_files_subj = layout.get(subject=subject,
                                task='vid*',
コード例 #29
0
              display_mode='z',
              black_bg=True,
              cut_coords=np.arange(-30, 70, 15))

# Notice how left motor cortex is among the ROIs with the highest similarity value?  Unfortunately, we can only plot the similarity values and can't threshold them yet because we didn't calculate any p-values.
#
# We could calculate p-values using a permutation test, but this would require us to repeatedly recalculate the similarity between the two matrices and would take a long time (i.e., 5,000 correlations X 50 ROIS). Plus, the inference we want to make isn't really at the single-subject level, but across participants.
#
# Let's now run this same analysis across all participants and run a one-sample t-test across each ROI.

# ### RSA Group Inference
# Here we calculate the RSA for each ROI for every participant.  This will take a little bit of time to run (30 participants X 50 ROIs).

# In[113]:

sub_list = layout.get_subjects(scope='derivatives')

all_sub_similarity = {}
all_sub_motor_rsa = {}
for sub in sub_list:
    file_list = glob.glob(
        os.path.join(data_dir, 'derivatives', 'fmriprep', f'sub-{sub}', 'func',
                     '*denoised*.nii.gz'))
    file_list = [x for x in file_list if 'betas' not in x]
    file_list.sort()
    conditions = [
        os.path.basename(x).split(f'sub-{sub}_')[1].split('_denoised')[0]
        for x in file_list
    ]
    beta = Brain_Data(file_list)
コード例 #30
0
    def check_input(self, layout, gui=True):
        """Method that checks if inputs of the diffusion pipeline are available in the datasets.

        Parameters
        -----------
        layout : bids.BIDSLayout
            BIDSLayout object used to query

        gui : bool
            If True, display message in GUI

        Returns
        -------
        valid_inputs : bool
            True in all inputs of the anatomical pipeline are available
        """
        print('**** Check Inputs  ****')
        diffusion_available = False
        bvecs_available = False
        bvals_available = False
        valid_inputs = False

        if self.global_conf.subject_session == '':
            subject = self.subject
        else:
            subject = "_".join(
                (self.subject, self.global_conf.subject_session))

        dwi_file = os.path.join(self.subject_directory, 'dwi',
                                subject + '_dwi.nii.gz')
        bval_file = os.path.join(self.subject_directory, 'dwi',
                                 subject + '_dwi.bval')
        bvec_file = os.path.join(self.subject_directory, 'dwi',
                                 subject + '_dwi.bvec')

        subjid = self.subject.split("-")[1]

        try:
            layout = BIDSLayout(self.base_directory)
            print("Valid BIDS dataset with %s subjects" %
                  len(layout.get_subjects()))
            for subj in layout.get_subjects():
                self.global_conf.subjects.append('sub-' + str(subj))
            # self.global_conf.subjects = ['sub-'+str(subj) for subj in layout.get_subjects()]
            # self.global_conf.modalities = [
            #     str(mod) for mod in layout.get_modalities()]
            # mods = layout.get_modalities()
            types = layout.get_modalities()
            # print "Available modalities :"
            # for mod in mods:
            #     print "-%s" % mod

            if self.global_conf.subject_session == '':

                files = layout.get(subject=subjid,
                                   suffix='dwi',
                                   extensions='.nii.gz')
                if len(files) > 0:
                    dwi_file = os.path.join(files[0].dirname,
                                            files[0].filename)
                    print(dwi_file)
                else:
                    error(message="Diffusion image not found for subject %s." %
                          subjid,
                          title="Error",
                          buttons=['OK', 'Cancel'],
                          parent=None)
                    return

                files = layout.get(subject=subjid,
                                   suffix='dwi',
                                   extensions='.bval')
                if len(files) > 0:
                    bval_file = os.path.join(files[0].dirname,
                                             files[0].filename)
                    print(bval_file)
                else:
                    error(
                        message="Diffusion bval image not found for subject %s."
                        % subjid,
                        title="Error",
                        buttons=['OK', 'Cancel'],
                        parent=None)
                    return

                files = layout.get(subject=subjid,
                                   suffix='dwi',
                                   extensions='.bvec')
                if len(files) > 0:
                    bvec_file = os.path.join(files[0].dirname,
                                             files[0].filename)
                    print(bvec_file)
                else:
                    error(
                        message="Diffusion bvec image not found for subject %s."
                        % subjid,
                        title="Error",
                        buttons=['OK', 'Cancel'],
                        parent=None)
                    return
            else:
                sessid = self.global_conf.subject_session.split("-")[1]

                files = layout.get(subject=subjid,
                                   suffix='dwi',
                                   extensions='.nii.gz',
                                   session=sessid)
                if len(files) > 0:
                    dwi_file = os.path.join(files[0].dirname,
                                            files[0].filename)
                    print(dwi_file)
                else:
                    error(
                        message=
                        "Diffusion image not found for subject %s, session %s."
                        % (subjid, self.global_conf.subject_session),
                        title="Error",
                        buttons=['OK', 'Cancel'],
                        parent=None)
                    return

                files = layout.get(subject=subjid,
                                   suffix='dwi',
                                   extensions='.bval',
                                   session=sessid)
                if len(files) > 0:
                    bval_file = os.path.join(files[0].dirname,
                                             files[0].filename)
                    print(bval_file)
                else:
                    error(
                        message=
                        "Diffusion bval image not found for subject %s, session %s."
                        % (subjid, self.global_conf.subject_session),
                        title="Error",
                        buttons=['OK', 'Cancel'],
                        parent=None)
                    return

                files = layout.get(subject=subjid,
                                   suffix='dwi',
                                   extensions='.bvec',
                                   session=sessid)
                if len(files) > 0:
                    bvec_file = os.path.join(files[0].dirname,
                                             files[0].filename)
                    print(bvec_file)
                else:
                    error(
                        message=
                        "Diffusion bvec image not found for subject %s, session %s."
                        % (subjid, self.global_conf.subject_session),
                        title="Error",
                        buttons=['OK', 'Cancel'],
                        parent=None)
                    return

            print("Looking for....")
            print("dwi_file : %s" % dwi_file)
            print("bvecs_file : %s" % bvec_file)
            print("bvals_file : %s" % bval_file)

            if os.path.isfile(dwi_file):
                print("DWI available")
                diffusion_available = True

        except Exception:
            error(
                message=
                "Invalid BIDS dataset. Please see documentation for more details.",
                title="Error",
                buttons=['OK', 'Cancel'],
                parent=None)
            return

        if os.path.isfile(bval_file):
            bvals_available = True

        if os.path.isfile(bvec_file):
            bvecs_available = True

        if diffusion_available:
            if bvals_available and bvecs_available:
                self.stages[
                    'Diffusion'].config.diffusion_imaging_model_choices = self.diffusion_imaging_model

                # Copy diffusion data to derivatives / cmp  / subject / dwi
                if self.global_conf.subject_session == '':
                    out_dwi_file = os.path.join(self.derivatives_directory,
                                                'cmp', self.subject, 'dwi',
                                                subject + '_dwi.nii.gz')
                    out_bval_file = os.path.join(self.derivatives_directory,
                                                 'cmp', self.subject, 'dwi',
                                                 subject + '_dwi.bval')
                    out_bvec_file = os.path.join(self.derivatives_directory,
                                                 'cmp', self.subject, 'dwi',
                                                 subject + '_dwi.bvec')
                else:
                    out_dwi_file = os.path.join(
                        self.derivatives_directory, 'cmp', self.subject,
                        self.global_conf.subject_session, 'dwi',
                        subject + '_dwi.nii.gz')
                    out_bval_file = os.path.join(
                        self.derivatives_directory, 'cmp', self.subject,
                        self.global_conf.subject_session, 'dwi',
                        subject + '_dwi.bval')
                    out_bvec_file = os.path.join(
                        self.derivatives_directory, 'cmp', self.subject,
                        self.global_conf.subject_session, 'dwi',
                        subject + '_dwi.bvec')

                if not os.path.isfile(out_dwi_file):
                    shutil.copy(src=dwi_file, dst=out_dwi_file)
                if not os.path.isfile(out_bvec_file):
                    shutil.copy(src=bvec_file, dst=out_bvec_file)
                if not os.path.isfile(out_bval_file):
                    shutil.copy(src=bval_file, dst=out_bval_file)

                valid_inputs = True
                input_message = 'Inputs check finished successfully.\nDiffusion and morphological data available.'
            else:
                input_message = 'Error during inputs check.\nDiffusion bvec or bval files not available.'
        else:
            if self.global_conf.subject_session == '':
                input_message = 'Error during inputs check. No diffusion data available in folder ' + os.path.join(
                    self.base_directory, self.subject, 'dwi') + '!'
            else:
                input_message = 'Error during inputs check. No diffusion data available in folder ' + os.path.join(
                    self.base_directory, self.subject,
                    self.global_conf.subject_session, 'dwi') + '!'

        if gui:
            # input_notification = Check_Input_Notification(message=input_message,
            #                                               diffusion_imaging_model_options=diffusion_imaging_model,
            #                                               diffusion_imaging_model=diffusion_imaging_model)
            # input_notification.configure_traits()
            print(input_message)
            self.global_conf.diffusion_imaging_model = self.diffusion_imaging_model
            self.stages[
                'Registration'].config.diffusion_imaging_model = self.diffusion_imaging_model
            self.stages[
                'Diffusion'].config.diffusion_imaging_model = self.diffusion_imaging_model
        else:
            print(input_message)
            self.global_conf.diffusion_imaging_model = self.diffusion_imaging_model
            self.stages[
                'Registration'].config.diffusion_imaging_model = self.diffusion_imaging_model
            self.stages[
                'Diffusion'].config.diffusion_imaging_model = self.diffusion_imaging_model

        if diffusion_available:
            valid_inputs = True
        else:
            print("Missing required inputs.")
            error(
                message=
                "Missing diffusion inputs. Please see documentation for more details.",
                title="Error",
                buttons=['OK', 'Cancel'],
                parent=None)

        # for stage in self.stages.values():
        #     if stage.enabled:
        #         print stage.name
        #         print stage.stage_dir

        # self.fill_stages_outputs()

        return valid_inputs
コード例 #31
0
def main(**args):

    path = "/mnt/DATA_4Tera/Dati_Sherlock/bids/"

    derivatives = os.path.join(path, "derivatives", "afniproc")
    print("mkdir -p {}".format(derivatives))
    os.system("mkdir -p {}".format(derivatives))

    subj_deriv = os.path.join(derivatives, 'sub-{subject}', "ses-{session}")

    layout = BIDSLayout(path)

    subjects = layout.get_subjects()
    sessions = layout.get_sessions()

    for session in sessions:
        for subj in subjects:

            deriv_dir = subj_deriv.format(session=session, subject=subj)
            print("mkdir -p {}".format(deriv_dir))
            os.system("mkdir -p {}".format(deriv_dir))

            # Create anat and func
            anat_dir = os.path.join(subj_deriv,
                                    "{datatype}").format(session=session,
                                                         subject=subj,
                                                         datatype='anat')
            func_dir = os.path.join(subj_deriv,
                                    "{datatype}").format(session=session,
                                                         subject=subj,
                                                         datatype='func')

            print("mkdir -p {}".format(anat_dir))
            os.system("mkdir -p {}".format(anat_dir))

            print("mkdir -p {}".format(func_dir))
            os.system("mkdir -p {}".format(func_dir))

            # Check and convert T1 to send Freesurfer segmentation
            t1 = layout.get(subject=subj, session=session, suffix='T1w')[0]

            entities = t1.get_entities()
            pattern = os.path.join(
                subj_deriv, "{datatype}",
                "sub-{subject}[_ses-{session}][_desc-{desc}]_{suffix}.{extension}"
            )
            entities['desc'] = 'fsprep'
            t1_fs = layout.build_path(entities, pattern, validate=False)

            entities['extension'] = 'txt'
            t1_log = layout.build_path(entities, pattern, validate=False)

            command = "check_dset_for_fs.py -input %s -fix_all -fix_out_prefix %s -fix_out_vox_dim 1 -verb > %s"
            command = command % (t1.path, t1_fs, t1_log)
            print(command)
            os.system(command)

            check_fs(layout, subj, session, subj_deriv)

            ### T1 to MNI space coreg
            entities = t1.get_entities()
            entities['desc'] = 'fsprep'
            entities['space'] = 'MNI152'

            pattern = os.path.join(
                subj_deriv, "{datatype}",
                "sub-{subject}[_ses-{session}][_space-{space}][_desc-{desc}]_{suffix}.{extension}"
            )
            t1_mni = layout.build_path(entities, pattern, validate=False)

            command = "@auto_tlrc -base MNI152_2009_template.nii.gz -pad_base 35 -prefix {prefix} -input {input}"
            command = command.format(prefix=t1_mni, input=t1_fs)
            print(command)
            os.system(command)

            runs = layout.get_runs()
            ordered_bold = []
            for run in runs:
                fname = layout.get(session=session,
                                   subject=subj,
                                   run=run,
                                   suffix='bold')
                if len(fname) != 0:
                    ordered_bold.append(fname[0])

            # Slice time correction - motion correction - align EPI to Anat to MNI
            bold = layout.get(subject=subj,
                              session=session,
                              suffix='bold',
                              extension='nii.gz')

            # 1D File
            slice_timing = np.array(bold[0].get_metadata()['SliceTiming'])
            slice_fname = os.path.join(path, "slice_timing.txt")
            np.savetxt(slice_fname,
                       slice_timing,
                       delimiter=' ',
                       newline=' ',
                       fmt='%.5f')

            epi = layout.get(subject=subj,
                             session=session,
                             suffix='bold',
                             extension='nii.gz',
                             run=1)[0].path
            child_epi = " ".join([b.path for b in ordered_bold])

            command = "align_epi_anat.py -anat {anat} -epi {epi} -child_epi {child_epi}"+\
                      " -epi_base 0 -tshift_opts -tpattern {tpattern} -epi2anat -giant_move"+\
                      " -tlrc_apar {tlrc_apar}"
            command = command.format(anat=t1_fs,
                                     epi=epi,
                                     child_epi=child_epi,
                                     tpattern=slice_fname,
                                     tlrc_apar=t1_mni)
            print(command)
            os.system(command)

            # Create mask
            automask = " ".join(
                [b.filename[:-7] + '_tlrc_al+tlrc.HEAD' for b in bold])
            mean_mask_prefix = 'mean_mni.nii.gz'
            command = '3dTstat -prefix {prefix} {input}'.format(
                prefix=mean_mask_prefix, input=automask)
            print(command)
            os.system(command)

            entities = bold[0].get_entities()
            entities['suffix'] = 'mask'
            entities['space'] = 'MNI152'

            pattern = os.path.join(
                subj_deriv, "{datatype}",
                "sub-{subject}[_ses-{session}][_space-{space}][_desc-{desc}]_{suffix}.{extension}"
            )
            mask_prefix = layout.build_path(entities, pattern, validate=False)
            command = '3dAutomask -prefix {prefix} {input}'.format(
                input=mean_mask_prefix, prefix=mask_prefix)
            print(command)
            os.system(command)

            # Clean files
            removed = " ".join([b.filename[:-7] + '_al+orig.*' for b in bold])
            command = "rm " + removed
            print(command)
            os.system(command)

            command = "rm __tt_*.*"
            print(command)
            os.system(command)

            command = "rm malldump.*"
            print(command)
            os.system(command)

            # Put files in BIDS
            header = [
                'trans_x', 'trans_y', 'trans_z', 'rot_x', 'rot_y', 'rot_z'
            ]
            motion_ordered = list()
            for bold in ordered_bold:

                motion_fname = bold.filename[:-7] + "_vr_motion.1D"
                motion = np.genfromtxt(motion_fname)

                motion_ordered.append(motion_fname)

                entities = bold.get_entities()
                entities['suffix'] = 'motion'
                entities['desc'] = 'volreg'
                entities['extension'] = 'tsv'

                pattern = os.path.join(
                    subj_deriv, "{datatype}",
                    "sub-{subject}_ses-{session}_task-{task}_run-{run:02d}_desc-{desc}_{suffix}.{extension}"
                )
                motion_bids = pattern.format(**entities)

                motion = motion[:, [3, 4, 5, 0, 1, 2]]
                np.savetxt(motion_bids,
                           motion,
                           fmt="%f",
                           delimiter="\t",
                           header="\t".join(header))

                print("rm " + motion_fname)
                os.system("rm " + motion_fname)

                for desc in ['mat', "reg_mat", "tlrc_mat"]:
                    affine_fname = bold.filename[:-7] + "_al_" + desc + ".aff12.1D"
                    entities = bold.get_entities()
                    entities['suffix'] = 'affine'
                    entities['desc'] = desc.replace("_", "")
                    entities['extension'] = 'tsv'

                    pattern = os.path.join(
                        subj_deriv, "{datatype}",
                        "sub-{subject}_ses-{session}_task-{task}_run-{run:02d}_desc-{desc}_{suffix}.{extension}"
                    )
                    affine_bids = pattern.format(**entities)

                    command = "mv {0} {1}".format(affine_fname, affine_bids)
                    print(command)
                    os.system(command)

                afni_bold = bold.filename[:-7] + "_tlrc_al+tlrc"
                entities = bold.get_entities()
                entities['desc'] = "afniproc"
                entities['extension'] = 'nii.gz'
                entities['space'] = 'MNI152'
                pattern = os.path.join(
                    subj_deriv, "{datatype}",
                    "sub-{subject}_ses-{session}_task-{task}_run-{run:02d}_space-{space}_desc-{desc}_{suffix}.{extension}"
                )

                afni_bids = pattern.format(**entities)
                command = "3dcopy {0} {1}".format(afni_bold, afni_bids)
                print(command)
                os.system(command)

                print("rm {}*".format(afni_bold))
                os.system("rm {}*".format(afni_bold))

            # Create confound regressors
            # Motion

            motion_files = list()
            for run in runs:
                f = layout.get(subject=subj,
                               session=session,
                               task=session,
                               run=run,
                               suffix='motion')
                if len(f) != 0:
                    motion_files.append(f[0])

            motion_df = [
                pd.read_csv(m.path, delimiter="\t") for m in motion_files
            ]

            motion_demean = [m - m.mean(0) for m in motion_df]
            motion_demean = pd.concat(motion_demean)

            entities = motion_files[0].get_entities()
            entities['desc'] = 'demean'

            pattern = os.path.join(
                subj_deriv, "{datatype}",
                "sub-{subject}_ses-{session}_desc-{desc}_{suffix}.{extension}")
            demean_fname = pattern.format(**entities)
            motion_demean.to_csv(demean_fname,
                                 header=False,
                                 index=False,
                                 sep="\t")

            motion_deriv = [m.diff() for m in motion_df]
            motion_deriv = [m.fillna(0) for m in motion_deriv]
            motion_deriv = [m - m.mean(0) for m in motion_deriv]
            motion_deriv = pd.concat(motion_deriv)

            entities['desc'] = 'deriv'
            deriv_fname = pattern.format(**entities)
            motion_deriv.to_csv(deriv_fname,
                                header=False,
                                index=False,
                                sep="\t")

            tr_counts = [m.shape[0] for m in motion_df]

            for j, t in enumerate(tr_counts):
                command = "1dBport -nodata {ntr} 1 -band 0.01 999 -invert -nozero > bpass.1D".format(
                    ntr=t)
                print(command)
                os.system(command)

                command = "1d_tool.py -infile bpass.1D -pad_into_many_runs {run:1d} {n_runs}"+\
                          " -set_run_lengths {tr_counts} -write bpass.{run:02d}.1D"
                command = command.format(run=j + 1,
                                         n_runs=str(len(tr_counts)),
                                         tr_counts=" ".join(
                                             [str(t) for t in tr_counts]))
                print(command)
                os.system(command)

            entities['desc'] = 'bpass'
            entities['suffix'] = 'timeseries'
            entities['extension'] = '1D'
            bpass_fname = pattern.format(**entities)

            command = "1dcat bpass.*.1D > {}".format(bpass_fname)
            print(command)
            os.system(command)

            command = "rm bpass*"
            print(command)
            os.system(command)
コード例 #32
0
from bids import BIDSLayout
import os

dataset_path = "data/friends"
derivatives_path = os.path.join(dataset_path, "derivatives/fmriprep-20.1.0/fmriprep")
out_path = "utils/s1_file_paths.txt"

layout=BIDSLayout(dataset_path, derivatives=derivatives_path)
layout.save("data/external/pybids_cache")
print(layout.get_subjects())
file_paths = layout.get(subject="", session="", task=["s01", "s02"], suffix="^bold$", extension="nii.gz",
                        scope="derivatives", regex_search=True, return_type="file")

print(file_paths)
with open(out_path, "w") as f:
    for path in file_paths:
        f.write(path+"\n")

print("Paths written to {}.".format(out_path))