示例#1
0
def get_files():
    with open('dset_config.json', 'r') as fo:
        CONFIG = json.load(fo)

    DATA_DIR = op.abspath('/home/data/nbc/external-datasets/ds001491/')

    all_info = {}
    for dset_name in list(CONFIG.keys())[:3]:
        layout = BIDSLayout(op.join(DATA_DIR, dset_name))
        cfg = CONFIG[dset_name]
        task = cfg['task']
        dset_info = {}
        for sub in layout.get_subjects():
            runs = layout.get_runs(subject=sub, task=task)
            sub_info = {}
            for run in runs:
                run_info = {}
                run_info['files'] = []
                run_info['echo_times'] = []
                for echo in sorted(
                        layout.get_echoes(subject=sub, task=task, run=run)):
                    raw_files = layout.get(subject=sub,
                                           task=task,
                                           run=run,
                                           echo=echo,
                                           extensions='.nii.gz')
                    preproc_files = layout.get(subject=sub,
                                               task=task,
                                               run=run,
                                               root='afni-step1',
                                               echo=echo,
                                               extensions='.nii.gz',
                                               desc='realign')
                    preproc_files = raw_files[:]
                    if len(preproc_files) != 1:
                        print(preproc_files)
                        raise Exception('BAD')

                    # Replace filename with path when using new version of bids
                    run_info['files'].append(preproc_files[0].filename)
                    metadata = layout.get_metadata(preproc_files[0].filename)
                    run_info['echo_times'].append(metadata['EchoTime'])
                sub_info[run] = run_info
            dset_info[sub] = sub_info
        all_info[dset_name] = dset_info

    with open('all_files.json', 'w') as fo:
        json.dump(all_info, fo, indent=4, sort_keys=True)
示例#2
0
        fmap_args = {
            "fmapmag": "NONE",
            "fmapphase": "NONE",
            "echodiff": "NONE",
            "t1samplespacing": "NONE",
            "t2samplespacing": "NONE",
            "unwarpdir": "NONE",
            "avgrdcmethod": "NONE",
            "SEPhaseNeg": "NONE",
            "SEPhasePos": "NONE",
            "echospacing": "NONE",
            "seunwarpdir": "NONE"
        }

        if fieldmap_set:
            t1_spacing = layout.get_metadata(t1ws[0])["DwellTime"]
            t2_spacing = layout.get_metadata(t2ws[0])["DwellTime"]

            # use an unwarpdir specified on the command line
            # this is different from the SE direction
            unwarpdir = args.anat_unwarpdir

            fmap_args.update({
                "t1samplespacing": "%.8f" % t1_spacing,
                "t2samplespacing": "%.8f" % t2_spacing,
                "unwarpdir": unwarpdir
            })

            if fieldmap_set[0]["suffix"] == "phasediff":
                merged_file = "%s/tmp/%s/magfile.nii.gz" % (args.output_dir,
                                                            subject_label)
示例#3
0
文件: bids_.py 项目: szho42/banana
    def find_data(self, subject_ids=None, visit_ids=None):
        """
        Return subject and session information for a project in the local
        repository

        Parameters
        ----------
        subject_ids : list(str)
            List of subject IDs with which to filter the tree with. If None all
            are returned
        visit_ids : list(str)
            List of visit IDs with which to filter the tree with. If None all
            are returned

        Returns
        -------
        project : arcana.repository.Tree
            A hierarchical tree of subject, session and fileset information for
            the repository
        """
        filesets = []
        layout = BIDSLayout(self.root_dir)
        all_subjects = layout.get_subjects()
        all_visits = layout.get_sessions()
        if not all_visits:
            all_visits = [self.DEFAULT_VISIT_ID]
            self._depth = 1
        else:
            self._depth = 2
        for item in layout.get(return_type='object'):
            if item.path.startswith(self.derivatives_dir):
                # We handle derivatives using the BasicRepo base
                # class methods
                continue
            if not hasattr(item, 'entities') or not item.entities.get('suffix',
                                                                      False):
                logger.warning("Skipping unrecognised file '{}' in BIDS tree"
                               .format(op.join(item.dirname, item.filename)))
                continue  # Ignore hidden file
            try:
                subject_ids = [item.entities['subject']]
            except KeyError:
                # If item exists in top-levels of in the directory structure
                # it is inferred to exist for all subjects in the tree
                subject_ids = all_subjects
            try:
                visit_ids = [item.entities['session']]
            except KeyError:
                # If item exists in top-levels of in the directory structure
                # it is inferred to exist for all visits in the tree
                visit_ids = all_visits
            for subject_id in subject_ids:
                for visit_id in visit_ids:
                    aux_files = {}
                    metadata = layout.get_metadata(item.path)
                    if metadata and not item.path.endswith('.json'):
                        # Write out the combined JSON side cars to a temporary
                        # file to include in extended NIfTI filesets
                        metadata_path = op.join(
                            self.metadata_dir,
                            'sub-{}'.format(subject_id),
                            'ses-{}'.format(visit_id),
                            item.filename + '.json')
                        os.makedirs(op.dirname(metadata_path), exist_ok=True)
                        if not op.exists(metadata_path):
                            with open(metadata_path, 'w') as f:
                                json.dump(metadata, f)
                        aux_files['json'] = metadata_path
                    fileset = BidsFileset(
                        path=op.join(item.dirname, item.filename),
                        type=item.entities['suffix'],
                        subject_id=subject_id, visit_id=visit_id,
                        repository=self,
                        modality=item.entities.get('modality', None),
                        task=item.entities.get('task', None),
                        aux_files=aux_files)
                    filesets.append(fileset)
        # Get derived filesets, fields and records using the same method using
        # the method in the BasicRepo base class
        derived_filesets, fields, records = super().find_data(
            subject_ids=subject_ids, visit_ids=visit_ids)
        filesets.extend(derived_filesets)
        return filesets, fields, records
示例#4
0
def main():
    parser = makeParser()
    results = parser.parse_args()

    if results.boutiques:
        createDescriptor(parser, results)
        return 0

    verb = results.verbose
    fsldir = results.fsldir
    mni152 = op.join(fsldir, "data", "standard", "MNI152_T1_2mm_brain.nii.gz")
    mni152bn = op.basename(mni152).split(".")[0]

    outdir = results.output_dir
    partis = results.participant_label
    labels = results.parcellation

    if verb:
        print("BIDS Dir: {0}".format(results.bids_dir), flush=True)
        print("Output Dir: {0}".format(results.output_dir), flush=True)
        print("Analysis level: {0}".format(results.analysis_level), flush=True)

    # This preprocessing workflow is modified from the FSL recommendations here:
    #    https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FDT/UserGuide

    # Step 0, 1: Begin interrogation of BIDS dataset

    # Due to current super-linear slowdown in BIDS Layout, exclude all but
    # participant of interest. Explored in the following Github issue:
    #   https://github.com/bids-standard/pybids/issues/285
    if partis is not None:
        pattrn = 'sub-(?!{0})(.*)$'.format("|".join(partis))
    else:
        pattrn = ''

    dset = BIDSLayout(results.bids_dir, exclude=pattrn)
    subjects = dset.get_subjects()
    if results.participant_label is not None:
        subjects = [pl for pl in results.participant_label if pl in subjects]
        assert (len(subjects) > 0)
    if verb:
        print("Participants: {0}".format(", ".join(s for s in subjects)),
              flush=True)

    sessions = dset.get_sessions()
    if results.session_label is not None:
        sessions = [sl for sl in results.session_label if sl in sessions]
        assert (len(sessions) > 0)
    if verb:
        print("Sessions: {0}".format(", ".join(s for s in sessions)),
              flush=True)

    # Step 0, 2: Prune dataset to subjects/sessions that have necessary files
    ftypes = [".nii", ".bval", ".bvec"]
    collections = []
    for subj in subjects:
        for sess in sessions:
            tf_dwi = dset.get(subject=subj,
                              session=sess,
                              datatype="dwi",
                              suffix="dwi",
                              return_type="file")
            tf_anat = dset.get(subject=subj,
                               session=sess,
                               datatype="anat",
                               suffix="T1w",
                               return_type="file")
            if (all(any(ftype in fl for fl in tf_dwi) for ftype in ftypes)
                    and any(ftypes[0] in fl for fl in tf_anat)):

                collections += [{
                    "subject":
                    subj,
                    "session":
                    sess,
                    "anat": [t for t in tf_anat if ftypes[0] in t][0],
                    "bval": [t for t in tf_dwi if ftypes[1] in t][0],
                    "bvec": [t for t in tf_dwi if ftypes[2] in t][0],
                    "dwi": [t for t in tf_dwi if ftypes[0] in t][0]
                }]
            else:
                if verb:
                    print("Skipping sub-{0}".format(subj) +
                          " / ses-{0} due to missing data.".format(sess),
                          flush=True)

    complete_collection = []
    for col in collections:
        dwibn = op.basename(col["dwi"]).split('.')[0]
        anatbn = op.basename(col["anat"]).split('.')[0]
        subses = op.join('sub-{0}'.format(col['subject']),
                         'ses-{0}'.format(col['session']))

        derivdir_d = op.join(outdir, subses, "dwi")
        derivdir_a = op.join(outdir, subses, "anat")
        execute("mkdir -p {0}".format(derivdir_d),
                verbose=verb,
                skipif=op.isdir(derivdir_d))
        execute("mkdir -p {0}".format(derivdir_a),
                verbose=verb,
                skipif=op.isdir(derivdir_a))

        # Step 1: Extract B0 volumes

        # Make even number of spatial voxels? (req'd for eddy for some reason)
        # TODO : above, if actually needed - docs inconsistent

        # Get B0 locations
        with open(col["bval"]) as fhandle:
            bvals = fhandle.read().split(" ")
            bvals = [int(b) for b in bvals if b != '' and b != '\n']
            b0_loc = [i for i, b in enumerate(bvals) if b == np.min(bvals)]

        # Get B0 volumes
        col["b0_scans"] = []
        for idx, b0 in enumerate(b0_loc):
            b0ind = "b0_{0}".format(idx)
            col["b0_scans"] += [
                op.join(derivdir_d, dwibn + "_" + b0ind + ".nii.gz")
            ]
            execute(fsl.fslroi(col["dwi"], col["b0_scans"][-1], *[b0, 1]),
                    verbose=verb,
                    skipif=op.isfile(col["b0_scans"][-1]))

        # Merge B0 volumes
        col["b0s"] = op.join(derivdir_d, dwibn + "_b0s.nii.gz")
        execute(fsl.fslmerge(col["b0s"], *col["b0_scans"]),
                verbose=verb,
                skipif=op.isfile(col["b0s"]))

        # Create acquisition parameters file
        col["acqparams"] = op.join(derivdir_d, dwibn + "_acq.txt")
        acqs = {
            "i": "1 0 0",
            "i-": "-1 0 0",
            "j": "0 1 0",
            "j-": "0 -1 0",
            "k": "0 0 1",
            "k-": "0 0 -1"
        }
        with open(col["acqparams"], 'w') as fhandle:
            meta = dset.get_metadata(path=col["dwi"])
            pedir = meta["PhaseEncodingDirection"]
            trout = meta["TotalReadoutTime"]
            line = "{0} {1}".format(acqs[pedir], trout)
            fhandle.write("\n".join([line] * len(b0_loc)))

        # Step 1.5: Run Top-up on Diffusion data
        # TODO: remove; topup only applies with multiple PEs (rare in open data)
        # col["topup"] = op.join(derivdir_d, dwibn + "_topup")
        # col["hifi_b0"] = op.join(derivdir_d, dwibn + "_hifi_b0")
        # execute(fsl.topup(col["b0s"], col["acqparams"],
        #                   col["topup"], col["hifi_b0"]),
        #         verbose=verb)
        # execute(fsl.fslmaths(col["hifi_b0"], "-Tmean", col["hifi_b0"]),
        #         verbose=verb)

        # Step 2: Brain extraction
        # ... Diffusion:
        col["dwi_brain"] = op.join(derivdir_d, dwibn + "_brain.nii.gz")
        col["dwi_mask"] = op.join(derivdir_d, dwibn + "_brain_mask.nii.gz")
        execute(fsl.bet(col["dwi"], col["dwi_brain"], "-F", "-m"),
                verbose=verb,
                skipif=op.isfile(col["dwi_brain"]))

        # ... Structural:
        col["anat_brain"] = op.join(derivdir_a, anatbn + "_brain.nii.gz")
        col["anat_mask"] = op.join(derivdir_a, anatbn + "_brain.nii.gz")
        execute(fsl.bet(col["anat"], col["anat_brain"], "-m"),
                verbose=verb,
                skipif=op.isfile(col["anat_brain"]))

        # Step 3: Produce prelimary DTIfit QC figures
        col["dwi_qc_pre"] = op.join(derivdir_d, dwibn + "_dtifit_pre")
        execute(fsl.dtifit(col["dwi_brain"], col["dwi_qc_pre"],
                           col["dwi_mask"], col["bvec"], col["bval"]),
                verbose=verb,
                skipif=op.isfile(col["dwi_qc_pre"] + "_FA.nii.gz"))

        # Step 4: Perform eddy correction
        # ... Create index
        col["index"] = op.join(derivdir_d, dwibn + "_eddy_index.txt")
        with open(col["index"], 'w') as fhandle:
            fhandle.write(" ".join(["1"] * len(bvals)))

        # ... Run eddy
        col["eddy_dwi"] = op.join(derivdir_d, dwibn + "_eddy")
        if results.gpu:
            eddy_exe = "eddy_cuda8.0"
        else:
            eddy_exe = "eddy_openmp"
        execute(fsl.eddy(col["dwi_brain"],
                         col["dwi_mask"],
                         col["acqparams"],
                         col["index"],
                         col["bvec"],
                         col["bval"],
                         col["eddy_dwi"],
                         exe=eddy_exe),
                verbose=verb,
                skipif=op.isfile(col["eddy_dwi"] + ".nii.gz"))

        # Step 5: Registration to template
        # ... Compute transforms
        col["t1w2mni"] = op.join(derivdir_a, anatbn + "_to_mni_xfm.mat")
        execute(fsl.flirt(col["anat_brain"], omat=col["t1w2mni"], ref=mni152),
                verbose=verb,
                skipif=op.isfile(col["t1w2mni"]))

        col["dwi2t1w"] = op.join(derivdir_d, dwibn + "_to_t1w_xfm.mat")
        execute(fsl.flirt(col["eddy_dwi"],
                          ref=col["anat_brain"],
                          omat=col["dwi2t1w"]),
                verbose=verb,
                skipif=op.isfile(col["dwi2t1w"]))

        col["dwi2mni"] = op.join(derivdir_d, dwibn + "_to_mni_xfm.mat")
        execute(fsl.convert_xfm(concat=col["t1w2mni"],
                                inp=col["dwi2t1w"],
                                omat=col["dwi2mni"]),
                verbose=verb,
                skipif=op.isfile(col["dwi2mni"]))

        # ... Invert transforms towards diffusion space
        col["mni2dwi"] = op.join(derivdir_d, dwibn + "_from_mni_xfm.mat")
        execute(fsl.convert_xfm(inverse=col["dwi2mni"], omat=col["mni2dwi"]),
                verbose=verb,
                skipif=op.isfile(col["mni2dwi"]))

        col["t1w2dwi"] = op.join(derivdir_a, anatbn + "_dwi_xfm.mat")
        execute(fsl.convert_xfm(inverse=col["dwi2t1w"], omat=col["t1w2dwi"]),
                verbose=verb,
                skipif=op.isfile(col["t1w2dwi"]))

        # Step 6: Apply registrations to anatomical and template images
        col["anat_in_dwi"] = op.join(derivdir_a, anatbn + "_brain_dwi.nii.gz")
        execute(fsl.flirt(col["anat_brain"],
                          applyxfm=True,
                          out=col["anat_in_dwi"],
                          init=col["t1w2dwi"],
                          ref=col["eddy_dwi"]),
                verbose=verb,
                skipif=op.isfile(col["anat_in_dwi"]))

        col["mni_in_dwi"] = op.join(
            derivdir_d, ("atlas_" + dwibn + "_" + mni152bn + "_dwi.nii.gz"))
        execute(fsl.flirt(mni152,
                          applyxfm=True,
                          out=col["mni_in_dwi"],
                          init=col["mni2dwi"],
                          ref=col["eddy_dwi"]),
                verbose=verb,
                skipif=op.isfile(col["mni_in_dwi"]))

        # Step 7: Perform tissue segmentation on anatomical images in DWI space
        col["tissue_masks"] = op.join(derivdir_d, anatbn + "_fast")
        execute(fsl.fast(col["anat_in_dwi"],
                         col["tissue_masks"],
                         classes=3,
                         imtype=1),
                verbose=verb,
                skipif=op.isfile(col["tissue_masks"] + "_seg_2.nii.gz"))

        # Step 8: Transform parcellations into DWI space
        col["labels_in_dwi"] = []
        for label in labels:
            lbn = op.basename(label).split('.')[0]
            col["labels_in_dwi"] += [
                op.join(derivdir_d,
                        ("labels_" + dwibn + "_" + lbn + ".nii.gz"))
            ]
            execute(fsl.flirt(label,
                              applyxfm=True,
                              out=col["labels_in_dwi"][-1],
                              init=col["mni2dwi"],
                              ref=col["eddy_dwi"],
                              interp="nearestneighbour"),
                    verbose=verb,
                    skipif=op.isfile(col["labels_in_dwi"][-1]))

        if verb:
            print("Finished processing sub-{0}".format(subj) +
                  " / ses-{0} !".format(sess),
                  flush=True)
        complete_collection += [col]
示例#5
0
def run_rsHRF():
    parser = get_parser()
    args = parser.parse_args()
    arg_groups = {}
    for group in parser._action_groups:
        group_dict = {
            a.dest: getattr(args, a.dest, None)
            for a in group._group_actions
        }
        arg_groups[group.title] = group_dict
    para = arg_groups['Parameters']
    nargs = len(sys.argv)
    temporal_mask = []

    if (not args.GUI) and (args.output_dir is None):
        parser.error(
            '--output_dir is required when executing in command-line interface'
        )

    if (not args.GUI) and (args.estimation is None):
        parser.error(
            '--estimation rule is required when executing in command-line interface'
        )

    if (args.GUI):
        if (nargs == 2):
            try:
                from .rsHRF_GUI import run
                run.run()
            except ModuleNotFoundError:
                parser.error(
                    '--GUI should not be used inside a Docker container')
        else:
            parser.error('--no other arguments should be supplied with --GUI')

    if (args.input_file is not None
            or args.ts is not None) and args.analysis_level:
        parser.error(
            'analysis_level cannot be used with --input_file or --ts, do not supply it'
        )

    if (args.input_file is not None
            or args.ts is not None) and args.participant_label:
        parser.error(
            'participant_labels are not to be used with --input_file or --ts, do not supply it'
        )

    if args.input_file is not None and args.brainmask:
        parser.error(
            '--brainmask cannot be used with --input_file, use --atlas instead'
        )

    if args.ts is not None and (args.brainmask or args.atlas):
        parser.error(
            '--atlas or --brainmask cannot be used with --ts, do not supply it'
        )

    if args.bids_dir is not None and not (args.brainmask or args.atlas):
        parser.error(
            '--atlas or --brainmask needs to be supplied with --bids_dir')

    if args.bids_dir is not None and not args.analysis_level:
        parser.error(
            'analysis_level needs to be supplied with bids_dir, choices=[participant]'
        )

    if args.input_file is not None and (not args.input_file.endswith(
        ('.nii', '.nii.gz', '.gii', '.gii.gz'))):
        parser.error(
            '--input_file should end with .gii, .gii.gz, .nii or .nii.gz')

    if args.atlas is not None and (not args.atlas.endswith(
        ('.nii', '.nii.gz', '.gii', '.gii.gz'))):
        parser.error('--atlas should end with .gii, .gii.gz, .nii or .nii.gz')

    if args.ts is not None and (not args.ts.endswith(('.txt'))):
        parser.error('--ts file should end with .txt')

    if args.temporal_mask is not None and (not args.temporal_mask.endswith(
        ('.dat'))):
        parser.error('--temporal_mask ile should end with ".dat"')

    if args.temporal_mask is not None:
        f = open(args.temporal_mask, 'r')
        for line in f:
            for each in line:
                if each in ['0', '1']:
                    temporal_mask.append(int(each))

    if args.estimation == 'sFIR' or args.estimation == 'FIR':
        para['T'] = 1

    if args.ts is not None:
        file_type = op.splitext(args.ts)
        if para['TR'] <= 0:
            parser.error('Please supply a valid TR using -TR argument')
        else:
            TR = para['TR']
        para['dt'] = para['TR'] / para['T']
        para['lag'] = np.arange(np.fix(para['min_onset_search'] / para['dt']),
                                np.fix(para['max_onset_search'] / para['dt']) +
                                1,
                                dtype='int')
        fourD_rsHRF.demo_rsHRF(args.ts,
                               None,
                               args.output_dir,
                               para,
                               args.n_jobs,
                               file_type,
                               mode='time-series',
                               temporal_mask=temporal_mask,
                               wiener=args.wiener)

    if args.input_file is not None:
        if args.atlas is not None:
            if (args.input_file.endswith(
                ('.nii', '.nii.gz')) and args.atlas.endswith(
                    ('.gii', '.gii.gz'))) or (args.input_file.endswith(
                        ('.gii', '.gii.gz')) and args.atlas.endswith(
                            ('.nii', '.nii.gz'))):
                parser.error(
                    '--atlas and input_file should be of the same type [NIfTI or GIfTI]'
                )

        # carry analysis with input_file and atlas
        file_type = op.splitext(args.input_file)
        if file_type[-1] == ".gz":
            file_type = op.splitext(file_type[-2])[-1] + file_type[-1]
        else:
            file_type = file_type[-1]
        if ".nii" in file_type:
            TR = (spm_dep.spm.spm_vol(args.input_file).header.get_zooms())[-1]
        else:
            if para['TR'] == -1:
                parser.error('Please supply a valid TR using -TR argument')
            else:
                TR = para['TR']
        if TR <= 0:
            if para['TR'] <= 0:
                parser.error('Please supply a valid TR using -TR argument')
        else:
            if para['TR'] == -1:
                para['TR'] = TR
            elif para['TR'] <= 0:
                print('Invalid TR supplied, using implicit TR: {0}'.format(TR))
                para['TR'] = TR
        para['dt'] = para['TR'] / para['T']
        para['lag'] = np.arange(np.fix(para['min_onset_search'] / para['dt']),
                                np.fix(para['max_onset_search'] / para['dt']) +
                                1,
                                dtype='int')
        fourD_rsHRF.demo_rsHRF(args.input_file,
                               args.atlas,
                               args.output_dir,
                               para,
                               args.n_jobs,
                               file_type,
                               mode='input',
                               temporal_mask=temporal_mask,
                               wiener=args.wiener)

    if args.bids_dir is not None:
        utils.bids.write_derivative_description(args.bids_dir, args.output_dir)
        bids_dir = Path(args.bids_dir)
        fname = bids_dir / 'dataset_description.json'

        if fname.exists():
            desc = json.loads(Path(fname).read_text())
            if 'DataType' in desc:
                if desc['DataType'] != 'derivative':
                    parser.error(
                        'Input data is not a derivative dataset'
                        ' (DataType in dataset_description.json is not equal to "derivative")'
                    )

            else:
                parser.error(
                    'DataType is not defined in the dataset_description.json file. Please make sure DataType is defined. '
                    'Information on the dataset_description.json file can be found online '
                    '(https://bids-specification.readthedocs.io/en/stable/03-modality-agnostic-files.html'
                    '#derived-dataset-and-pipeline-description)')
        else:
            parser.error(
                'Could not find dataset_description.json file. Please make sure the BIDS data '
                'structure is present and correct. Datasets can be validated online '
                'using the BIDS Validator (http://incf.github.io/bids-validator/).'
            )

    if args.bids_dir is not None and args.atlas is not None:
        # carry analysis with bids_dir and 1 atlas
        layout = BIDSLayout(args.bids_dir,
                            validate=False,
                            config=['bids', 'derivatives'])

        if args.participant_label:
            input_subjects = args.participant_label
            subjects_to_analyze = layout.get_subjects(subject=input_subjects)
        else:
            subjects_to_analyze = layout.get_subjects()

        if not subjects_to_analyze:
            parser.error(
                'Could not find participants. Please make sure the BIDS data '
                'structure is present and correct. Datasets can be validated online '
                'using the BIDS Validator (http://incf.github.io/bids-validator/).'
            )

        if not args.atlas.endswith(('.nii', '.nii.gz')):
            parser.error('--atlas should end with .nii or .nii.gz')

        if args.bids_filter_file is not None:
            filter_list = json.loads(Path(args.bids_filter_file).read_text())

            default_input = {
                'extension': 'nii.gz',
                'datatype': 'func',
                'desc': 'preproc',
                'task': 'rest',
                'suffix': 'bold'
            }
            default_input['subject'] = subjects_to_analyze
            default_input.update(filter_list['bold'])

            all_inputs = layout.get(return_type='filename', **default_input)

        else:
            all_inputs = layout.get(return_type='filename',
                                    datatype='func',
                                    subject=subjects_to_analyze,
                                    task='rest',
                                    desc='preproc',
                                    suffix='bold',
                                    extension=['nii', 'nii.gz'])

        if not all_inputs != []:
            parser.error(
                'There are no files of type *bold.nii / *bold.nii.gz '
                'Please make sure to have at least one file of the above type '
                'in the BIDS specification')
        else:
            num_errors = 0
            for file_count in range(len(all_inputs)):
                try:
                    TR = layout.get_metadata(
                        all_inputs[file_count])['RepetitionTime']
                except KeyError as e:
                    TR = spm_dep.spm.spm_vol(
                        all_inputs[file_count]).header.get_zooms()[-1]
                para['TR'] = TR
                para['dt'] = para['TR'] / para['T']
                para['lag'] = np.arange(
                    np.fix(para['min_onset_search'] / para['dt']),
                    np.fix(para['max_onset_search'] / para['dt']) + 1,
                    dtype='int')
                num_errors += 1
                try:
                    fourD_rsHRF.demo_rsHRF(all_inputs[file_count],
                                           args.atlas,
                                           args.output_dir,
                                           para,
                                           args.n_jobs,
                                           file_type,
                                           mode='bids w/ atlas',
                                           temporal_mask=temporal_mask,
                                           wiener=args.wiener)
                    num_errors -= 1
                except ValueError as err:
                    print(err.args[0])
                except:
                    print("Unexpected error:", sys.exc_info()[0])
            success = len(all_inputs) - num_errors
            if success == 0:
                raise RuntimeError(
                    'Dimensions were inconsistent for all input-mask pairs; \n'
                    'No inputs were processed!')

    if args.bids_dir is not None and args.brainmask:
        # carry analysis with bids_dir and brainmask
        layout = BIDSLayout(args.bids_dir,
                            validate=False,
                            config=['bids', 'derivatives'])

        if args.participant_label:
            input_subjects = args.participant_label
            subjects_to_analyze = layout.get_subjects(subject=input_subjects)
        else:
            subjects_to_analyze = layout.get_subjects()

        if not subjects_to_analyze:
            parser.error(
                'Could not find participants. Please make sure the BIDS data '
                'structure is present and correct. Datasets can be validated online '
                'using the BIDS Validator (http://incf.github.io/bids-validator/).'
            )

        if args.bids_filter_file is not None:
            filter_list = json.loads(Path(args.bids_filter_file).read_text())

            default_input = {
                'extension': 'nii.gz',
                'datatype': 'func',
                'desc': 'preproc',
                'task': 'rest',
                'suffix': 'bold'
            }
            default_input['subject'] = subjects_to_analyze
            default_input.update(filter_list['bold'])

            all_inputs = layout.get(return_type='filename', **default_input)

            default_mask = {
                'extension': 'nii.gz',
                'datatype': 'func',
                'desc': 'brain',
                'task': 'rest',
                'suffix': 'mask'
            }
            default_mask['subject'] = subjects_to_analyze
            default_mask.update(filter_list['mask'])

            all_masks = layout.get(return_type='filename', **default_mask)

        else:
            all_inputs = layout.get(return_type='filename',
                                    datatype='func',
                                    subject=subjects_to_analyze,
                                    task='rest',
                                    desc='preproc',
                                    suffix='bold',
                                    extension=['nii', 'nii.gz'])
            all_masks = layout.get(return_type='filename',
                                   datatype='func',
                                   subject=subjects_to_analyze,
                                   task='rest',
                                   desc='brain',
                                   suffix='mask',
                                   extension=['nii', 'nii.gz'])

        if not all_inputs != []:
            parser.error(
                'There are no files of type *bold.nii / *bold.nii.gz '
                'Please make sure to have at least one file of the above type '
                'in the BIDS specification')
        if not all_masks != []:
            parser.error(
                'There are no files of type *mask.nii / *mask.nii.gz '
                'Please make sure to have at least one file of the above type '
                'in the BIDS specification')
        if len(all_inputs) != len(all_masks):
            parser.error(
                'The number of *bold.nii / .nii.gz and the number of '
                '*mask.nii / .nii.gz are different. Please make sure that '
                'there is one mask for each input_file present')

        all_inputs.sort()
        all_masks.sort()

        all_prefix_match = False
        prefix_match_count = 0
        for i in range(len(all_inputs)):
            input_prefix = all_inputs[i].split('/')[-1].split('_desc')[0]
            mask_prefix = all_masks[i].split('/')[-1].split('_desc')[0]
            if input_prefix == mask_prefix:
                prefix_match_count += 1
            else:
                all_prefix_match = False
                break
        if prefix_match_count == len(all_inputs):
            all_prefix_match = True

        if not all_prefix_match:
            parser.error(
                'The mask and input files should have the same prefix for correspondence. '
                'Please consider renaming your files')
        else:
            num_errors = 0
            for file_count in range(len(all_inputs)):
                file_type = all_inputs[file_count].split('bold')[1]
                if file_type == ".nii" or file_type == ".nii.gz":
                    try:
                        TR = layout.get_metadata(
                            all_inputs[file_count])['RepetitionTime']
                    except KeyError as e:
                        TR = spm_dep.spm.spm_vol(
                            all_inputs[file_count]).header.get_zooms()[-1]
                    para['TR'] = TR
                else:
                    spm_dep.spm.spm_vol(all_inputs[file_count])
                    TR = spm_dep.spm.spm_vol(
                        all_inputs[file_count]).get_arrays_from_intent(
                            "NIFTI_INTENT_TIME_SERIES")[0].meta.get_metadata(
                            )["TimeStep"]
                    para['TR'] = float(TR) * 0.001

                para['dt'] = para['TR'] / para['T']
                para['lag'] = np.arange(
                    np.fix(para['min_onset_search'] / para['dt']),
                    np.fix(para['max_onset_search'] / para['dt']) + 1,
                    dtype='int')
                num_errors += 1
                try:
                    fourD_rsHRF.demo_rsHRF(all_inputs[file_count],
                                           all_masks[file_count],
                                           args.output_dir,
                                           para,
                                           args.n_jobs,
                                           mode='bids',
                                           temporal_mask=temporal_mask,
                                           wiener=args.wiener)
                    num_errors -= 1
                except ValueError as err:
                    print(err.args[0])
                except:
                    print("Unexpected error:", sys.exc_info()[0])
            success = len(all_inputs) - num_errors
            if success == 0:
                raise RuntimeError(
                    'Dimensions were inconsistent for all input-mask pairs; \n'
                    'No inputs were processed!')
示例#6
0
                               return_type='file')[0]
subjInfo = pd.read_csv(file_participants, delimiter='\t')




# IMAGE META DATA

# Images for sub-01
listImages_sub01 = layout.get(subject='01',
                              extension=['nii', 'nii.gz'],
                              return_type='file')


# meta data asccoiated with T1 weighted
metaT1w = layout.get_metadata(listImages_sub01[0])

# meta data associated with fMRI (run1)
metafMRI = layout.get_metadata(listImages_sub01[1])


# locations of meta data files (T1w)
metalocT1w = layout.get(suffix='T1w',
                        extension='json',
                        return_type='file')



# TASK EVENTS

# Task events tsv file for sub-01, run-1
示例#7
0
def get_niftis(datapath, subject_id=[]):
    """
    Loads BIDS formatted files.
    """

    from bids.layout import BIDSLayout
    # Get rid of "sub" from "sub-xx"
    subject_id = subject_id[-2:]
    layout = BIDSLayout(datapath)

    from bids.layout import BIDSLayout
    # Get rid of "sub" from "sub-xx"
    subject_id = subject_id[-2:]
    layout = BIDSLayout(datapath)

    func = layout.get(subject=subject_id,
                      modality='func',
                      type='bold',
                      task='SPINN',
                      return_type='file',
                      extensions=['nii', 'nii.gz'])
    # Whole brain EPI
    wb = layout.get(subject=subject_id,
                    modality='func',
                    type='bold',
                    task='wholehead',
                    return_type='file',
                    extensions=['nii', 'nii.gz'])[0]

    func_des = layout.get(subject=subject_id,
                          modality='func',
                          type='bold',
                          return_type='file',
                          extensions=['json'])

    UNI = layout.get(subject=subject_id,
                     modality='anat',
                     type='UNI',
                     return_type='file',
                     extensions=['nii', 'nii.gz'])

    events = layout.get(subject=subject_id,
                        modality='func',
                        type='events',
                        return_type='file',
                        extensions=['txt', 'tsv'])

    physio = layout.get(subject=subject_id,
                        modality='func',
                        type='physio',
                        return_type='file',
                        extensions=['txt', 'tsv'])

    fmaps = layout.get(subject=subject_id,
                       modality='fmap',
                       return_type='file',
                       extensions=['nii', 'nii.gz'])

    subject_id = 'sub-' + subject_id

    fmapMag = [f for f in fmaps if layout.get_metadata(f)[
        'ImageType'][2] == 'M']
    fmapPh = [f for f in fmaps if layout.get_metadata(
        f)['ImageType'][2] == 'P'][0]

    if not fmapMag == []:
        fmapM1, fmapM2 = fmapMag
    else:
        fmapM1, fmapM2 = [], []

    return (func, wb, events, physio, UNI, fmapM1, fmapPh, subject_id)
示例#8
0
def main(argv=sys.argv):
    parser = generate_parser()
    args = parser.parse_args()

    # Set environment variables for FSL dir based on CLI
    os.environ['FSL_DIR'] = args.fsl_dir
    os.environ['FSLDIR'] = args.fsl_dir
    # for this script's usage of FSL_DIR...
    fsl_dir = args.fsl_dir + '/bin'

    # Load the bids layout
    layout = BIDSLayout(args.bids_dir)
    subsess = read_bids_layout(layout, subject_list=args.subject_list, collect_on_subject=args.collect)

    for subject,sessions in subsess:
        # fmap directory = base dir
        fmap = layout.get(subject=subject, session=sessions, modality='fmap', extensions='.nii.gz')
        base_temp_dir = os.path.dirname(fmap[0].filename)
 
        # Check if fieldmaps are concatenated
        if layout.get(subject=subject, session=sessions, modality='fmap', extensions='.nii.gz', acq='func', dir='both'):
            print("Func fieldmaps are concatenated. Running seperate_concatenate_fm")
            seperate_concatenated_fm(layout, subject, sessions, fsl_dir)
            # recreate layout with the additional SEFMS
            layout = BIDSLayout(args.bids_dir)
        

        fmap = layout.get(subject=subject, session=sessions, modality='fmap', extensions='.nii.gz', acq='func')        
        # Check if there are func fieldmaps and return a list of each SEFM pos/neg pair
        if fmap:
            print("Running SEFM select")
            bes_pos, best_neg = sefm_select(layout, subject, sessions,
                                            base_temp_dir, fsl_dir, args.mre_dir,
                                            args.debug)
            for sefm in [x.filename for x in fmap]:
                sefm_json = sefm.replace('.nii.gz', '.json')
                sefm_metadata = layout.get_metadata(sefm)

                if 'Philips' in sefm_metadata['Manufacturer']:
                    insert_edit_json(sefm_json, 'EffectiveEchoSpacing', 0.00062771)
                if 'GE' in sefm_metadata['Manufacturer']:
                    insert_edit_json(sefm_json, 'EffectiveEchoSpacing', 0.000536)
                if 'Siemens' in sefm_metadata['Manufacturer']:
                    insert_edit_json(sefm_json, 'EffectiveEchoSpacing', 0.000510012)

        # Check if there are dwi fieldmaps and insert IntendedFor field accordingly
        if layout.get(subject=subject, session=sessions, modality='fmap', extensions='.nii.gz', acq='dwi'):
            print("Editing DWI jsons")
            edit_dwi_jsons(layout, subject, sessions)
                    


        # Additional edits to the anat json sidecar
        anat = layout.get(subject=subject, session=sessions, modality='anat', extensions='.nii.gz')
        if anat:
            for TX in [x.filename for x in anat]:
                TX_json = TX.replace('.nii.gz', '.json') 
                TX_metadata = layout.get_metadata(TX)
                    #if 'T1' in TX_metadata['SeriesDescription']:

                if 'Philips' in TX_metadata['Manufacturer']:
                    insert_edit_json(TX_json, 'DwellTime', 0.00062771)
                if 'GE' in TX_metadata['Manufacturer']:
                    insert_edit_json(TX_json, 'DwellTime', 0.000536)
                if 'Siemens' in TX_metadata['Manufacturer']:
                    insert_edit_json(TX_json, 'DwellTime', 0.000510012)
        
        # add EffectiveEchoSpacing if it doesn't already exist

        # PE direction vs axis
        func = layout.get(subject=subject, session=sessions, modality='func', extensions='.nii.gz')
        if func:
            for task in [x.filename for x in func]:
                task_json = task.replace('.nii.gz', '.json')
                task_metadata = layout.get_metadata(task)
                if 'Philips' in task_metadata['Manufacturer']:
                    insert_edit_json(task_json, 'EffectiveEchoSpacing', 0.00062771)
                if 'GE' in task_metadata['Manufacturer']:
                    if 'DV25' in task_metadata['SoftwareVersions']:
                        insert_edit_json(task_json, 'EffectiveEchoSpacing', 0.000536)
                    if 'DV26' in task_metadata['SoftwareVersions']:
                        insert_edit_json(task_json, 'EffectiveEchoSpacing', 0.000556)
                if 'Siemens' in task_metadata['Manufacturer']:
                    insert_edit_json(task_json, 'EffectiveEchoSpacing', 0.000510012)                
                if "PhaseEncodingAxis" in task_metadata:
                    insert_edit_json(task_json, 'PhaseEncodingDirection', task_metadata['PhaseEncodingAxis'])
                elif "PhaseEncodingDirection" in task_metadata:
                    insert_edit_json(task_json, 'PhaseEncodingAxis', task_metadata['PhaseEncodingDirection'].strip('-'))