Пример #1
0
def anon_acqtimes(dset_dir):
    """
    Anonymize acquisition datetimes for a dataset. Works for both longitudinal
    and cross-sectional studies. The time of day is preserved, but the first
    scan is set to January 1st, 1800. In a longitudinal study, each session is
    anonymized relative to the first session, so that time between sessions is
    preserved.

    Overwrites scan tsv files in dataset. Only run this *after* data collection
    is complete for the study, especially if it's longitudinal.

    Parameters
    ----------
    dset_dir : str
        Path to BIDS dataset to be anonymized.
    """
    bl_dt = parser.parse('1800-01-01')

    layout = BIDSLayout(dset_dir)
    subjects = layout.get_subjects()
    sessions = sorted(layout.get_sessions())

    for sub in subjects:
        if not sessions:
            scans_file = op.join(dset_dir,
                                 'sub-{0}/sub-{0}_scans.tsv'.format(sub))
            df = pd.read_csv(scans_file, sep='\t')
            first_scan = df['acq_time'].min()
            first_dt = parser.parse(first_scan.split('T')[0])
            diff = first_dt - bl_dt
            acq_times = df['acq_time'].apply(parser.parse)
            acq_times = (acq_times - diff).astype(str)
            df['acq_time'] = acq_times
            # df.to_csv(scans_file, sep='\t', index=False)
        else:
            # Separated from dataset sessions in case subject missed some
            sub_ses = sorted(layout.get_sessions(subject=sub))
            for i, ses in enumerate(sub_ses):
                scans_file = op.join(dset_dir,
                                     'sub-{0}/ses-{1}/sub-{0}_ses-{1}_scans.'
                                     'tsv'.format(sub, ses))
                df = pd.read_csv(scans_file, sep='\t')
                if i == 0:
                    # Anonymize in terms of first scan for subject.
                    first_scan = df['acq_time'].min()
                    first_dt = parser.parse(first_scan.split('T')[0])
                    diff = first_dt - bl_dt

                acq_times = df['acq_time'].apply(parser.parse)
                acq_times = (acq_times - diff).astype(str)
                df['acq_time'] = acq_times
Пример #2
0
def get_bids_surf_data_node(path_bids):
    layout = BIDSLayout(path_bids)
    subjects = layout.get_subjects()
    sessions = layout.get_sessions()
    print("Found {} subjects and {} sessions in the dataset".format(
        len(subjects), len(sessions)))
    bids_data_grabber = Node(Function(
        function=get_bids_surf_data,
        input_names=["path_bids", "subject", "session", "output_dir"],
        output_names=[
            "lh_surf", "rh_surf", "confounds", "outputDir", "prefix"
        ]),
                             name="SurfaceDataGrabber")
    bids_data_grabber.inputs.path_bids = path_bids
    bids_data_grabber.inputs.output_dir = opj(path_bids, "derivatives",
                                              "connectivityWorkflowSurface")
    bids_data_grabber.iterables = [("subject", subjects),
                                   ("session", sessions)]
    return bids_data_grabber
Пример #3
0
def get_scan_duration(output_dir, modality="func", task="rest"):
    """

    """
    layout = BIDSLayout(output_dir)
    subjects_list = layout.get_subjects()

    scan_duration = pd.DataFrame([])

    #
    for sub_id in subjects_list:
        sub_dir = os.path.join(output_dir, "sub-" + sub_id)
        ses_id_list = layout.get_sessions(subject=sub_id)

        for ses_id in ses_id_list:
            sub_ses_path = os.path.join(sub_dir, "ses-" + ses_id)
            f = layout.get(subject=sub_id,
                           session=ses_id,
                           modality=modality,
                           task=task,
                           extensions='.nii.gz')
            if len(f) > 1:
                raise Exception(
                    "something went wrong, more than one %s %s file detected: %s"
                    % (modality, task, f))
            elif len(f) == 1:
                duration = (layout.get_metadata(
                    f[0].filename)["ScanDurationSec"])
                scan_duration_sub = pd.DataFrame(
                    OrderedDict([("subject_id", sub_id),
                                 ("sesssion_id", ses_id),
                                 ("scan_duration_s", [duration])]))
                scan_duration = scan_duration.append(scan_duration_sub)

    out_str = modality
    if task:
        out_str += "_" + task
    output_file = os.path.join(output_dir, "scan_duration_%s.tsv" % out_str)
    print("Writing scan duration to %s" % output_file)
    to_tsv(scan_duration, output_file)
Пример #4
0
def GetBidsDataGrabberNode(pathBids):
    layout = BIDSLayout(pathBids)
    subjects = layout.get_subjects()
    sessions = layout.get_sessions()
    print("Found {} subjects and {} sessions in the dataset".format(
        len(subjects), len(sessions)))
    #Initialize the dataGrabber node
    BIDSDataGrabber = Node(Function(
        function=get_BidsData,
        input_names=["pathBids", "subject", "session", "outputDir"],
        output_names=[
            "aparcaseg", "preproc", "confounds", "outputDir", "prefix"
        ]),
                           name="FunctionalDataGrabber")
    #Specify path to dataset
    BIDSDataGrabber.inputs.pathBids = pathBids
    BIDSDataGrabber.inputs.outputDir = opj(pathBids, "derivatives",
                                           "connectivityWorkflow")
    #Specify subjects and sessions to iterate over them
    #Stored in iterables for multiprocessing purpose
    BIDSDataGrabber.iterables = [("subject", subjects), ("session", sessions)]
    #Return the node
    return BIDSDataGrabber
Пример #5
0
import os
import ants
import glob
import shutil
from bids.grabbids import BIDSLayout
import time
from joblib import Parallel, delayed
import multiprocessing

derivatives = '/Volumes/data/prebiostress/data/derivatives'
layout = BIDSLayout(derivatives) # ignore the error here

# do T1
for subj in layout.get_subjects(): # loop on number of subjects
    print(subj)
    for ses in layout.get_sessions(): # loop on number of sessions
        targ = os.path.join(derivatives,"sub-"+subj,"ses-"+ses,"anat") # find path to anat folder for this subject and this session
        fpaths = glob.glob(targ+"/*T1w_dn.nii.gz") # find the T1
        fpaths = ''.join(fpaths)
        if len(fpaths) > 0:
            img = ants.image_read(fpaths)

####### TESTING ############
img = ants.image_read('/Volumes/data/prebiostress/data/derivatives/sub-01/ses-1/anat/sub-01_ses-1_T1w_dn.nii.gz',reorient='LSP')
img = ants.image_read('/Volumes/data/prebiostress/data/derivatives/sub-01/ses-1/anat/sub-01_ses-1_T1w_dn.nii.gz')
img = ants.reorient_image2(img,orientation='LSP')
ants.image_write(img,'output.nii')
img.get_sessions()

img = ants.registration.reorient_image(img,orientation='LSP')
Пример #6
0
def generateApine(bids_dir, dset=None):
    """generateApine takes a bids directory and optionally dataset name,
    and generates an Apine JSON object.

    Parameters
    ----------
    bids_dir : str
        The BIDS data directory.
    dset : str
        The dataset name. If none is provided, the directory will be used.

    Returns
    -------
    dict
        Apine dictionary object.
    """
    bids = BIDSLayout(bids_dir)
    apine = list()

    # For every entity...
    for subid in bids.get_subjects():
        current = OrderedDict()
        current["dataset"] = bids_dir if dset is None else dset
        current["participant"] = subid

        if not op.isdir(op.join(bids_dir, 'sub-{}'.format(subid))):
            print("sub-{} detected, but no directory found!!".format(subid))
            continue

        # And for every session...
        nosesh = len(bids.get_sessions()) == 0
        sesh_array = [None] if nosesh else bids.get_sessions()
        for sesid in sesh_array:
            if not nosesh:
                current["session"] = sesid

            # And for every modality...
            for mod in bids.get_modalities():
                current["modality"] = mod

                # Get corresponding data
                if nosesh:
                    data = bids.get(subject=subid,
                                    modality=mod,
                                    extensions="nii|nii.gz")
                else:
                    data = bids.get(subject=subid,
                                    session=sesid,
                                    modality=mod,
                                    extensions="nii|nii.gz")

                # Now, for every piece of data for this participant, session, and modality...
                for dat in data:
                    # Add the filename
                    current["filename"] = op.basename(dat.filename)
                    cleanname = op.basename(dat.filename).split('.')[0]
                    current["filename_keys"] = [
                        keyval for keyval in cleanname.split("_")
                        if "sub-" not in keyval and "ses-" not in keyval
                    ]
                    tmp = deepcopy(current)
                    apine += [tmp]

    return apine
Пример #7
0
import os
import ants
import glob
import shutil
from bids.grabbids import BIDSLayout
import time
from joblib import Parallel, delayed
import multiprocessing

derivatives = '/Volumes/data/prebiostress/data/derivatives'
layout = BIDSLayout(derivatives) # ignore the error here

# do T1
for subj in layout.get_subjects(): # loop on number of subjects
    print(subj)
    for ses in layout.get_sessions(): # loop on number of sessions
        targ = os.path.join(derivatives,"sub-"+subj,"ses-"+ses,"anat") # find path to anat folder for this subject and this session
        fpaths = glob.glob(targ+"/*T1w.nii.gz") # find the T1
        fpaths = ''.join(fpaths)
        if len(fpaths) > 0:
            img = ants.image_read(fpaths)
            mask  = ants.get_mask(img)
            dn = ants.denoise_image(img,mask,noise_model='Gaussian')
            ants.image_write(dn,fpaths[0:-7]+'_dn.nii.gz',)

# do T2
def runeachsess(subj,ses):
        targ = os.path.join(derivatives,"sub-"+subj,"ses-"+ses,"anat") # find path to anat folder for this subject and this session
        fpaths = glob.glob(targ+"/*T2w.nii.gz") # find the T1
        fpaths = ''.join(fpaths)
        if len(fpaths) > 0:
Пример #8
0
def main():
    parser = argparse.ArgumentParser(
        description='Example BIDS App entrypoint script.')
    parser.add_argument('app_descriptor_file', help='app descriptor')
    parser.add_argument('invocation_file', help='invocation file')
    args = parser.parse_args()

    descriptor_dict = json.load(open(args.app_descriptor_file))
    levels = None
    session_support = False
    for input in descriptor_dict['inputs']:
        if input['id'] == 'analysis_level':
            levels = input['value-choices']
        elif input['id'] == 'session_label':
            session_support = True
    assert levels, "analysis_level must have value-choices"
    invocation_dict = json.load(open(args.invocation_file))
    bids_dir = invocation_dict['bids_dir']
    layout = BIDSLayout(bids_dir)

    if 'participant_label' in invocation_dict.keys():
        participants_to_analyze = invocation_dict['participant_label']
    # for all subjects
    else:
        participants_to_analyze = layout.get_subjects()

    if 'session_label' in invocation_dict.keys():
        sessions_to_analyze = invocation_dict['session_label']
    # for all sessions
    else:
        sessions_to_analyze = layout.get_sessions()

    if not session_support:
        sessions_to_analyze = None

    print(levels)
    print(participants_to_analyze)
    print(sessions_to_analyze)

    dep_ids = []
    for level in levels:
        id_sources = []
        if level.startswith('session') and session_support:
            for participant in participants_to_analyze:
                for session in sessions_to_analyze:
                    filename = "level-%s_sub-%s_ses-%s_subtask.json" % (
                        level, participant, session)
                    prepare_and_save_subtask(
                        tool_class=args.app_descriptor_file,
                        app_name=descriptor_dict['name'],
                        filename=filename,
                        invocation_dict=invocation_dict,
                        participant_label=participant,
                        session_label=session,
                        analysis_level=level,
                        dep_ids=dep_ids)
                    id_sources.append(filename.replace('.json', '.*bid'))

            dep_ids = get_dep_ids(id_sources)

        elif level.startswith('participant'):
            for participant in participants_to_analyze:
                filename = "level-%s_sub-%s_subtask.json" % (level,
                                                             participant)
                prepare_and_save_subtask(tool_class=args.app_descriptor_file,
                                         app_name=descriptor_dict['name'],
                                         filename=filename,
                                         invocation_dict=invocation_dict,
                                         participant_label=participant,
                                         session_label=sessions_to_analyze,
                                         analysis_level=level,
                                         dep_ids=dep_ids)
                id_sources.append(filename.replace('.json', '.*bid'))

            dep_ids = get_dep_ids(id_sources)

        elif level.startswith('group'):
            filename = "level-%s_subtask.json" % (level)
            prepare_and_save_subtask(tool_class=args.app_descriptor_file,
                                     app_name=descriptor_dict['name'],
                                     filename=filename,
                                     invocation_dict=invocation_dict,
                                     participant_label=participants_to_analyze,
                                     session_label=sessions_to_analyze,
                                     analysis_level=level,
                                     dep_ids=dep_ids)
            id_sources.append(filename.replace('.json', '.*bid'))
            dep_ids = get_dep_ids(id_sources)
Пример #9
0
    if args.freesurfer_dir:
        freesurfer_dir = args.freesurfer_dir
    else:
        freesurfer_dir = os.path.join(args.out_dir, "freesurfer")
    out_dir = os.path.join(args.out_dir, "baracus")
    if not os.path.isdir(freesurfer_dir):
        os.makedirs(freesurfer_dir)
    if not os.path.isdir(out_dir):
        os.makedirs(out_dir)

    model_dir = resource_filename(Requirement.parse("baracus"), 'models')

    run("bids-validator " + args.bids_dir)
    layout = BIDSLayout(args.bids_dir)

    truly_longitudinal_study = True if len(layout.get_sessions()) > 1 else False
    subjects_to_analyze, sessions_to_analyze, freesurfer_subjects_to_analyze = get_subjects_session(layout,
                                                                                                    args.participant_label,
                                                                                                    truly_longitudinal_study)

    if args.analysis_level == "participant":

        data_files = run_prepare_all(args.bids_dir, freesurfer_dir, out_dir, subjects_to_analyze,
                                     sessions_to_analyze, args.n_cpus, args.license_key, args.skip_missing)

        for subject, d in data_files.items():
            d["out_dir"] = out_dir
            d["model_dir"] = model_dir
            d["models"] = args.models
            d["subject_label"] = subject
            predict_brain_age_single_subject(**d)
Пример #10
0
def wfmaker(project_dir,
            raw_dir,
            subject_id,
            task_name='',
            apply_trim=False,
            apply_dist_corr=False,
            apply_smooth=False,
            apply_filter=False,
            mni_template='2mm',
            apply_n4=True,
            ants_threads=8,
            readable_crash_files=False,
            write_logs=True):
    """
    This function returns a "standard" workflow based on requested settings. Assumes data is in the following directory structure in BIDS format:

    *Work flow steps*:

    1) EPI Distortion Correction (FSL; optional)
    2) Trimming (nipy)
    3) Realignment/Motion Correction (FSL)
    4) Artifact Detection (rapidART/python)
    5) Brain Extraction + N4 Bias Correction (ANTs)
    6) Coregistration (rigid) (ANTs)
    7) Normalization to MNI (non-linear) (ANTs)
    8) Low-pass filtering (nilearn; optional)
    8) Smoothing (FSL; optional)
    9) Downsampling to INT16 precision to save space (nibabel)

    If data contains multiple sessions, this returns a *list* of workflows each of which should be run independently.

    Args:
        project_dir (str): full path to the root of project folder, e.g. /my/data/myproject. All preprocessed data will be placed under this foler and the raw_dir folder will be searched for under this folder
        raw_dir (str): folder name for raw data, e.g. 'raw' which would be automatically converted to /my/data/myproject/raw
        subject_id (str/int): subject ID to process. Can be either a subject ID string e.g. 'sid-0001' or an integer to index the entire list of subjects in raw_dir, e.g. 0, which would process the first subject
        apply_trim (int/bool; optional): number of volumes to trim from the beginning of each functional run; default is None
        task_name (str; optional): which functional task runs to process; default is all runs
        apply_dist_corr (bool; optional): look for fmap files and perform distortion correction; default False
        smooth (int/list; optional): smoothing to perform in FWHM mm; if a list is provided will create outputs for each smoothing kernel separately; default False
        apply_filter (float/list; optional): low-pass/high-freq filtering cut-offs in Hz; if a list is provided will create outputs for each filter cut-off separately. With high temporal resolution scans .25Hz is a decent value to capture respitory artifacts; default None/False
        mni_template (str; optional): which mm resolution template to use, e.g. '3mm'; default '2mm'
        apply_n4 (bool; optional): perform N4 Bias Field correction on the anatomical image; default true
        ants_threads (int; optional): number of threads ANTs should use for its processes; default 8
        readable_crash_files (bool; optional): should nipype crash files be saved as txt? This makes them easily readable, but sometimes interferes with nipype's ability to use cached results of successfully run nodes (i.e. picking up where it left off after bugs are fixed); default False
        write_logs (bool; optional): should nipype write log files? convenient to see all steps in retrospect (can also inspect your cluster's log files) but sometimes will randomly fail due to portalocker issues on NFS systems (e.g https://bit.ly/2L8Lm6N)

    Examples:

        >>> from cosanlab_preproc.wfmaker import wfmaker
        >>> # Create workflow that performs no distortion correction, trims first 5 TRs, no filtering, 6mm smoothing, and normalizes to 2mm MNI space. Run it with 16 cores.
        >>>
        >>> workflow = wfmaker(
                        project_dir = '/data/project',
                        raw_dir = 'raw',
                        apply_trim = 5)
        >>>
        >>> workflow.run('MultiProc',plugin_args = {'n_procs': 16})
        >>>
        >>> # Create workflow that performs distortion correction, trims first 25 TRs, no filtering and filtering .25hz, 6mm and 8mm smoothing, and normalizes to 3mm MNI space. Run it serially (will be super slow!).
        >>>
        >>> workflow = wfmaker(
                        project_dir = '/data/project',
                        raw_dir = 'raw',
                        apply_trim = 25,
                        apply_dist_corr = True,
                        apply_filter = [0, .25],
                        apply_smooth = [6.0, 8.0],
                        mni = '3mm')
        >>>
        >>> workflow.run()

    """

    ##################
    ### PATH SETUP ###
    ##################
    if mni_template not in ['1mm', '2mm', '3mm']:
        raise ValueError("MNI template must be: 1mm, 2mm, or 3mm")

    data_dir = os.path.join(project_dir, raw_dir)
    output_dir = os.path.join(project_dir, 'preprocessed')
    output_final_dir = os.path.join(output_dir, 'final')
    output_interm_dir = os.path.join(output_dir, 'intermediate')

    if not os.path.exists(output_final_dir):
        os.makedirs(output_final_dir)
    if not os.path.exists(output_interm_dir):
        os.makedirs(output_interm_dir)

    layout = BIDSLayout(data_dir)
    # Dartmouth subjects are named with the sub- prefix, handle whether we receive an integer identifier for indexing or the full subject id with prefixg
    if isinstance(subject_id, six.string_types):
        subId = subject_id[4:]
    elif isinstance(subject_id, int):
        subId = layout.get_subjects()[subject_id]
        subject_id = 'sub-' + subId
    else:
        raise TypeError("subject_id should be a string or integer")

    # For multi-session datasets return a list of workflows consisting of pipelines specific to all data within that session
    # Otherwise return a single workflow
    sessions = layout.get_sessions()
    if len(sessions) <= 1:
        anat, funcs, fmaps = file_getter(layout, subId, apply_dist_corr,
                                         task_name)
        workflow = builder(subject_id=subject_id,
                           subId=subId,
                           project_dir=project_dir,
                           data_dir=data_dir,
                           output_dir=output_dir,
                           output_final_dir=output_final_dir,
                           output_interm_dir=output_interm_dir,
                           layout=layout,
                           anat=anat,
                           funcs=funcs,
                           fmaps=fmaps,
                           task_name=task_name,
                           session=None,
                           apply_trim=apply_trim,
                           apply_dist_corr=apply_dist_corr,
                           apply_smooth=apply_smooth,
                           apply_filter=apply_filter,
                           mni_template=mni_template,
                           apply_n4=apply_n4,
                           ants_threads=ants_threads,
                           readable_crash_files=readable_crash_files,
                           write_logs=write_logs)
    else:
        workflow = []
        for s in sessions:
            try:
                # Generate a workflow if this subject and session are found
                anat, funcs, fmaps = file_getter(layout,
                                                 subId,
                                                 apply_dist_corr,
                                                 task_name,
                                                 session=s)
                w = builder(subject_id=subject_id,
                            subId=subId,
                            project_dir=project_dir,
                            data_dir=data_dir,
                            output_dir=output_dir,
                            output_final_dir=output_final_dir,
                            output_interm_dir=output_interm_dir,
                            layout=layout,
                            anat=anat,
                            funcs=funcs,
                            fmaps=fmaps,
                            task_name=task_name,
                            session=s,
                            apply_trim=apply_trim,
                            apply_dist_corr=apply_dist_corr,
                            apply_smooth=apply_smooth,
                            apply_filter=apply_filter,
                            mni_template=mni_template,
                            apply_n4=apply_n4,
                            ants_threads=ants_threads,
                            readable_crash_files=readable_crash_files,
                            write_logs=write_logs)
                workflow.append(w)
            except IndexError:
                # IndexError because file_getter indexes the list returned from layout.get()
                print(f"Subject: {subId} has no Session: {s}")

    return workflow
Пример #11
0
def create_pipeline_SS_TV(bids_dir, work_dir, out_dir, subjects, sessions,
                          mag_match_pattern, phase_match_pattern,
                          keep_unnecessary_outputs, FAST_bias_iters,
                          FAST_bias_lowpass, FAST_num_classes, BET_frac,
                          freq_weights__snr_window_sz, truncate_echo,
                          SS_TV_lagrange_parameter, B0_dir,
                          scnd_diff_reliability_thresh_trim,
                          scnd_diff_reliability_thresh_noise):
    layout = BIDSLayout(bids_dir)

    #can we do this more elegantly?
    first_echo_files = []
    for subject in subjects:
        if layout.get_sessions(subject=subject) == []:
            if sessions == ['.*']:
                first_echo_files = first_echo_files + layout.get(
                    subject=subject,
                    modality='anat',
                    extensions='.*part-phase.*echo-0*1.*.nii.*',
                )
            else:
                print(
                    "Warning: Session filter applied, but subject " + subject +
                    " has no bids session information. This subject has been ignored."
                )
        else:
            for session in sessions:
                first_echo_files = first_echo_files + layout.get(
                    subject=subject,
                    session=session,
                    modality='anat',
                    extensions='.*part-phase.*echo-0*1.*.nii.*',
                )
    anat_folders = []
    for img in first_echo_files:
        full_dirname = os.path.dirname(img.filename)
        remove_base_dir = full_dirname.replace(bids_dir, '')
        remove_leading_slash = remove_base_dir.lstrip(os.sep)
        anat_folders.append(remove_leading_slash)
    list(set(anat_folders)).sort()

    #IdentityInterface is useful for passing subject directory structure to datasink
    infosource = pe.Node(niu.IdentityInterface(fields=['subject_id']),
                         name="infosource")
    infosource.iterables = ('subject_id', anat_folders)

    ### NODES AND PARAMETERS
    datasource = pe.Node(nio.DataGrabber(
        infields=['subject_id'],
        outfields=['phase_images', 'mag_images', 'phase_jsons', 'mag_jsons']),
                         name='datasource')
    datasource.inputs.field_template = dict(
        phase_images='%s/' + phase_match_pattern + '.nii*',
        phase_jsons='%s/' + phase_match_pattern + '.json',
        mag_images='%s/' + mag_match_pattern + '.nii*',
        mag_jsons='%s/' + mag_match_pattern + '.json',
    )
    datasource.inputs.sort_filelist = True
    datasource.inputs.template = "*"
    datasource.inputs.base_directory = bids_dir

    #this node must change depending on the scanner vendor
    susc_phase_preprocess = pe.Node(SiemensPhasePreprocess(),
                                    name='susc_phase_preprocess')

    avg_and_freq_estimate_weights = pe.Node(
        GetAvgAndWeightsFromMag(), name='avg_and_freq_estimate_weights')
    avg_and_freq_estimate_weights.inputs.snr_window_sz = freq_weights__snr_window_sz
    avg_and_freq_estimate_weights.inputs.avg_out_filename = "avg.nii.gz"
    avg_and_freq_estimate_weights.inputs.weight_out_filename = "weights.nii.gz"
    """
    #spm worked better for varian 7T data
    #if using spm, these prameters are needed
    bias_regularization=.001
    sampling_distance=2.0
    bias_fwhm=30
    
    nonuniformityCorrect_spm=pe.Node(spm.preprocess.Segment(),name='nonuniformityCorrect_spm')
    nonuniformityCorrect_spm.inputs.bias_regularization=bias_regularization
    nonuniformityCorrect_spm.inputs.sampling_distance=sampling_distance
    nonuniformityCorrect_spm.inputs.bias_fwhm=bias_fwhm
    nonuniformityCorrect_spm.inputs.save_bias_corrected=True
    """

    nonuniformity_correct_fsl = pe.Node(fsl.FAST(),
                                        name='nonuniformity_correct_fsl')
    nonuniformity_correct_fsl.inputs.img_type = 2  #1 for t1, 2 for t2
    nonuniformity_correct_fsl.inputs.bias_iters = FAST_bias_iters  #higher for larger nonuniformity
    nonuniformity_correct_fsl.inputs.bias_lowpass = FAST_bias_lowpass  #spm uses 30
    nonuniformity_correct_fsl.inputs.number_classes = FAST_num_classes  #spm uses 5
    nonuniformity_correct_fsl.inputs.output_biasfield = True
    nonuniformity_correct_fsl.inputs.output_biascorrected = True
    nonuniformity_correct_fsl.interface.estimated_memory_gb = 10

    brain_extract = pe.Node(fsl.BET(), name='brain_extract')
    brain_extract.inputs.frac = BET_frac
    brain_extract.inputs.mask = True
    brain_extract.inputs.robust = True

    freq_est = pe.Node(EstimateFrequncyFromWrappedPhase(), 'freq_est')
    freq_est.inputs.truncate_echo = truncate_echo
    freq_est.inputs.freq_filename = "freq_est.nii.gz"
    freq_est.interface.estimated_memory_gb = 4

    R2Star = pe.Node(CalcR2Star_cmd(), 'R2Star')
    R2Star.inputs.R2star = 'R2star.nii.gz'
    R2Star.inputs.neg_mask = 'negMask.nii.gz'
    R2Star.inputs.nan_mask = 'nanMask.nii.gz'
    #R2Star.interface.estimated_memory_gb = 5

    trim_mask = pe.Node(TrimMaskUsingReliability(), name='trim_mask')
    trim_mask.inputs.erosion_sz = 15.0  #in mm
    trim_mask.inputs.threshold = scnd_diff_reliability_thresh_trim
    trim_mask.inputs.trimmed_mask_filename = "trim_mask.nii.gz"
    trim_mask.inputs.reliability_filename = "unreliableMap.nii.gz"
    trim_mask.interface.estimated_memory_gb = 25

    unreliable_fieldmap_voxels = pe.Node(CalculatReliabilityMask(),
                                         name='unreliable_fieldmap_voxels')
    unreliable_fieldmap_voxels.inputs.threshold = scnd_diff_reliability_thresh_noise
    unreliable_fieldmap_voxels.inputs.reliability_mask_filename = "unreliableMask.nii.gz"
    unreliable_fieldmap_voxels.inputs.reliability_filename = "unreliableMap.nii.gz"

    CF_value = pe.Node(GetCFFromJson, name='CFValue')

    susceptibility = pe.Node(SS_TV_mcr(), name='susceptibility')
    susceptibility.inputs.quit_matlab = ''  #use this line when using mcr, comment when using matlab
    susceptibility.inputs.alpha = SS_TV_lagrange_parameter
    susceptibility.inputs.B0_dir = B0_dir
    susceptibility.inputs.susceptibility_filename = 'susceptibilityMap.nii.gz'
    susceptibility.interface.estimated_memory_gb = 10

    fieldmap_reorient = pe.Node(fsl.Reorient2Std(), name='fieldmap_reorient')
    QSM_reorient = pe.Node(fsl.Reorient2Std(), name='QSM_reorient')
    QSM_brain_mask_reorient = pe.Node(fsl.Reorient2Std(),
                                      name='QSM_brain_mask_reorient')
    QSM_noise_mask_reorient = pe.Node(fsl.Reorient2Std(),
                                      name='QSM_noise_mask_reorient')
    R2star_reorient = pe.Node(fsl.Reorient2Std(), name='R2star_reorient')
    R2star_fit_reorient = pe.Node(fsl.Reorient2Std(),
                                  name='R2star_fit_reorient')
    R2star_neg_mask_reorient = pe.Node(fsl.Reorient2Std(),
                                       name='R2star_neg_mask_reorient')

    datasink = pe.Node(nio.DataSink(), name="datasink")
    datasink.inputs.base_directory = out_dir + '/qsm_sstv/'
    datasink.inputs.parameterization = False

    rename_infosource = pe.Node(replace_slash, "rename_infosource")
    rename_fieldmap = pe.Node(
        niu.Rename(format_string="%(subject_id)s-fieldmap", keep_ext=True),
        "rename_fieldmap")
    rename_QSM = pe.Node(
        niu.Rename(format_string="%(subject_id)s-QSM", keep_ext=True),
        "rename_QSM")
    rename_QSM_brain_mask = pe.Node(
        niu.Rename(format_string="%(subject_id)s-QSM_brainMask",
                   keep_ext=True), "rename_QSM_brain_mask")
    rename_QSM_noise_mask = pe.Node(
        niu.Rename(format_string="%(subject_id)s-QSM_noiseMask",
                   keep_ext=True), "rename_QSM_noise_mask")

    rename_R2star = pe.Node(
        niu.Rename(format_string="%(subject_id)s-R2star", keep_ext=True),
        "rename_R2star")
    rename_R2star_fit = pe.Node(
        niu.Rename(format_string="%(subject_id)s-R2star_fit", keep_ext=True),
        "rename_R2star_fit")
    rename_R2star_neg_mask = pe.Node(
        niu.Rename(format_string="%(subject_id)s-R2star_negMask",
                   keep_ext=True), "rename_R2star_neg_mask")

    ### PIPELINE CONNECTION
    pipelineDir = work_dir
    wf = pe.Workflow(name="SS_TV")
    wf.base_dir = pipelineDir
    wf.config['execution'][
        'remove_unnecessary_outputs'] = False  #useful for debugging
    wf.connect([
        (infosource, datasource, [('subject_id', 'subject_id')]),
        (datasource, avg_and_freq_estimate_weights, [('mag_images', 'mag')]),
        (datasource, susc_phase_preprocess, [('phase_images', 'infiles')]),
        #spm requires matlab
        #(avg_and_freq_estimate_weights, nonuniformityCorrect_spm, [('avgOutFilename', 'data')]),
        #(nonuniformityCorrect_spm, brain_extract, [('bias_corrected_image', 'in_file')]),
        (avg_and_freq_estimate_weights, nonuniformity_correct_fsl,
         [('avg_out_filename', 'in_files')]),
        (nonuniformity_correct_fsl, brain_extract, [('restored_image',
                                                     'in_file')]),
        (susc_phase_preprocess, freq_est, [('outfiles', 'phase')]),
        (datasource, freq_est, [('phase_jsons', 'json')]),
        (brain_extract, freq_est, [('mask_file', 'mask')]),
        (avg_and_freq_estimate_weights, freq_est, [('weight_out_filename',
                                                    'weight')]),
        (freq_est, trim_mask, [('freq_filename', 'phase')]),
        (datasource, R2Star, [('mag_images', 'mag')]),
        (susc_phase_preprocess, R2Star, [('outfiles', 'phase')]),
        (freq_est, R2Star, [('freq_filename', 'freq_loc')]),
        (trim_mask, R2Star, [('trimmed_mask_filename', 'mask')]),
        (datasource, R2Star, [('mag_jsons', 'json')]),
        (brain_extract, trim_mask, [('mask_file', 'mask')]),
        (freq_est, unreliable_fieldmap_voxels, [('freq_filename', 'phase')]),
        (brain_extract, unreliable_fieldmap_voxels, [('mask_file', 'mask')]),
        (freq_est, susceptibility, [('freq_filename', 'freq_loc')]),
        (datasource, CF_value, [('mag_jsons', 'filename')]),
        (unreliable_fieldmap_voxels, susceptibility,
         [('reliability_mask_filename', 'reliability_mask_loc')]),
        (trim_mask, susceptibility, [('trimmed_mask_filename', 'mask_loc')]),
        (CF_value, susceptibility, [('cf', 'CF')]),
        (freq_est, fieldmap_reorient, [('freq_filename', 'in_file')]),
        (susceptibility, QSM_reorient, [('susceptibility_filename', 'in_file')
                                        ]),
        (trim_mask, QSM_brain_mask_reorient, [('trimmed_mask_filename',
                                               'in_file')]),
        (unreliable_fieldmap_voxels, QSM_noise_mask_reorient,
         [('reliability_mask_filename', 'in_file')]),
        (R2Star, R2star_reorient, [('R2star', 'in_file')]),
        (R2Star, R2star_fit_reorient, [('R2star_fit', 'in_file')]),
        (R2Star, R2star_neg_mask_reorient, [('neg_mask', 'in_file')]),

        #rename files and data sink
        (infosource, rename_infosource, [('subject_id', 'filename')]),
        #fieldmap
        (rename_infosource, rename_fieldmap, [('renamed', 'subject_id')]),
        (fieldmap_reorient, rename_fieldmap, [('out_file', 'in_file')]),
        (rename_fieldmap, datasink, [('out_file', '@')]),
        #qsm
        (rename_infosource, rename_QSM, [('renamed', 'subject_id')]),
        (QSM_reorient, rename_QSM, [('out_file', 'in_file')]),
        (rename_QSM, datasink, [('out_file', '@.@qsm')]),
        #qsm brain mask
        (rename_infosource, rename_QSM_brain_mask, [('renamed', 'subject_id')]
         ),
        (QSM_brain_mask_reorient, rename_QSM_brain_mask, [('out_file',
                                                           'in_file')]),
        (rename_QSM_brain_mask, datasink, [('out_file', '@.@qsm_brain')]),
        #qsm noisey voxels in fieldmap
        (rename_infosource, rename_QSM_noise_mask, [('renamed', 'subject_id')]
         ),
        (QSM_noise_mask_reorient, rename_QSM_noise_mask, [('out_file',
                                                           'in_file')]),
        (rename_QSM_noise_mask, datasink, [('out_file', '@.@qsm_noise')]),
        #r2star
        (rename_infosource, rename_R2star, [('renamed', 'subject_id')]),
        (R2star_reorient, rename_R2star, [('out_file', 'in_file')]),
        (rename_R2star, datasink, [('out_file', '@.@r2star')]),
        #r2star fit map
        (rename_infosource, rename_R2star_fit, [('renamed', 'subject_id')]),
        (R2star_fit_reorient, rename_R2star_fit, [('out_file', 'in_file')]),
        (rename_R2star_fit, datasink, [('out_file', '@.@r2starfit')]),
        #r2star negative values that were set to 0
        (rename_infosource, rename_R2star_neg_mask, [('renamed', 'subject_id')]
         ),
        (R2star_neg_mask_reorient, rename_R2star_neg_mask, [('out_file',
                                                             'in_file')]),
        (rename_R2star_neg_mask, datasink, [('out_file', '@.@r2starneg')]),
        (infosource, datasink, [('subject_id', 'container')]),
    ])
    return wf
Пример #12
0
"""
fix various metadata issues

"""

import os, sys, glob
import json

from bids.grabbids import BIDSLayout

#project_root = '/Users/poldrack/data_unsynced/multitask/sc1/BIDS'
project_root = '/scratch/01329/poldrack/MultiTask/sc2/BIDS'
layout = BIDSLayout(project_root)

for sub in layout.get_subjects():
    for sess in layout.get_sessions(subject=sub):
        print(sub, sess)
        # first fix fieldmap metadata
        # need to add echo times to phasediff

        m1 = layout.get(subject=sub,
                        session=sess,
                        type='magnitude1',
                        extensions='.json')
        assert len(m1) <= 1
        if len(m1) == 0:
            print('no fieldmap - skipping')
        else:
            m2 = layout.get(subject=sub,
                            session=sess,
                            type='magnitude2',