コード例 #1
0
def T1_template_preproc(subject_list, base_directory, out_directory):
    #==============================================================
    # Loading required packages
    import nipype.interfaces.io as nio
    import nipype.pipeline.engine as pe
    import nipype.interfaces.utility as util
    from nipype import SelectFiles
    from nipype.interfaces import fsl
    import os

    #====================================
    # Defining the nodes for the workflow

    # Getting the subject ID
    infosource = pe.Node(
        interface=util.IdentityInterface(fields=['subject_id']),
        name='infosource')
    infosource.iterables = ('subject_id', subject_list)

    # Getting the relevant diffusion-weighted data
    templates = dict(T1='{subject_id}/anat/{subject_id}_T1w.nii.gz')

    selectfiles = pe.Node(SelectFiles(templates), name="selectfiles")
    selectfiles.inputs.base_directory = os.path.abspath(base_directory)

    btr = pe.Node(interface=fsl.BET(), name='betr')
    btr.inputs.robust = True

    flt = pe.Node(interface=fsl.FLIRT(dof=6, cost_func='corratio'), name='flt')
    flt.inputs.reference = os.environ[
        'FSLDIR'] + '/data/standard/MNI152_T1_1mm_brain.nii.gz'

    robustfov = pe.Node(interface=fsl.RobustFOV(), name='robustfov')

    #====================================
    # Setting up the workflow
    templ_preproc = pe.Workflow(name='templ_preproc')

    templ_preproc.connect(infosource, 'subject_id', selectfiles, 'subject_id')
    templ_preproc.connect(selectfiles, 'T1', btr, 'in_file')
    templ_preproc.connect(btr, 'out_file', flt, 'in_file')
    templ_preproc.connect(flt, 'out_file', robustfov, 'in_file')

    #====================================
    # Running the workflow
    templ_preproc.base_dir = os.path.abspath(out_directory)
    templ_preproc.write_graph()
    templ_preproc.run('PBSGraph')
コード例 #2
0
def ANTs_Apply_Transform(subject_list, base_directory, reference):
    #==============================================================
    # Loading required packages
    import nipype.interfaces.io as nio
    import nipype.pipeline.engine as pe
    import nipype.interfaces.utility as util
    from nipype import SelectFiles
    from nipype.interfaces.ants import ApplyTransforms
    import os

    #====================================
    # Defining the nodes for the workflow

    # Getting the subject ID
    infosource = pe.Node(
        interface=util.IdentityInterface(fields=['subject_id']),
        name='infosource')
    infosource.iterables = ('subject_id', subject_list)

    # Getting the relevant diffusion-weighted data
    templates = dict(in_file='antsTMPL_{subject_id}repaired.nii.gz',
                     warp_field='antsTMPL_{subject_id}Warp.nii.gz',
                     transformation_matrix='antsTMPL_{subject_id}Affine.txt')

    selectfiles = pe.Node(SelectFiles(templates), name="selectfiles")
    selectfiles.inputs.base_directory = os.path.abspath(base_directory)

    at = pe.Node(interface=ApplyTransforms(), name='at')
    at.inputs.dimension = 3
    at.inputs.reference_image = reference
    at.inputs.interpolation = 'Linear'
    at.inputs.default_value = 0
    at.inputs.invert_transform_flags = False

    #====================================
    # Setting up the workflow
    apply_ants_transform = pe.Workflow(name='apply_ants_transform')

    apply_ants_transform.connect(infosource, 'subject_id', selectfiles,
                                 'subject_id')
    apply_ants_transform.connect(selectfiles, 'in_file', at, 'input_image')
    apply_ants_transform.connect(selectfiles, 'warp_field', at, 'transforms')

    #====================================
    # Running the workflow
    apply_ants_transform.base_dir = os.path.abspath(base_directory)
    apply_ants_transform.write_graph()
    apply_ants_transform.run('PBSGraph')
コード例 #3
0
def selectFile(rootPath=r'D:\其他\老舅财务\allData'):
    templates = {'path': '*\\*\\*.txt'}

    # Create SelectFiles node
    sf = Node(SelectFiles(templates), name='selectfiles')

    # Location of the dataset folder
    sf.inputs.base_directory = rootPath

    # Feed {}-based placeholder strings with values
    #    sf.inputs.subject_id1 = '00[1,2]'
    #    sf.inputs.subject_id2 = '01'
    #    sf.inputs.ses_name = "retest"
    #    sf.inputs.task_name = 'covertverb'
    path = sf.run().outputs.__dict__['path']
    return path
コード例 #4
0
def selectFile(
        rootPath=r'I:\Data_Code\insomnia\workstation_MVPA_2018_05\FunImgARW1'):
    templates = {'path': '*\\sw*.nii'}

    # Create SelectFiles node
    sf = Node(SelectFiles(templates), name='selectfiles')

    # Location of the dataset folder
    sf.inputs.base_directory = rootPath

    # Feed {}-based placeholder strings with values
    #    sf.inputs.subject_id1 = '00[1,2]'
    #    sf.inputs.subject_id2 = '01'
    #    sf.inputs.ses_name = "retest"
    #    sf.inputs.task_name = 'covertverb'
    path = sf.run().outputs.__dict__['path']
    return path
コード例 #5
0
ファイル: lc_selectFile.py プロジェクト: wdxbb/easylearn_dev
def selectFile(rootPath=r'I:\Data_Code\Doctor\RealignParameter'):
    templates = {'path': '{folder}\\{id}'}

    # Create SelectFiles node
    sf = Node(SelectFiles(templates), name='selectfiles')

    # Location of the dataset folder
    sf.inputs.base_directory = rootPath

    # Feed {}-based placeholder strings with values
    sf.inputs.folder = '*_*'
    sf.inputs.id = 'FD_Jenkinson_*'
#    sf.inputs.subject_id2 = '01'
#    sf.inputs.ses_name = "retest"
#    sf.inputs.task_name = 'covertverb'
    path = sf.run().outputs.__dict__['path']
    return path
コード例 #6
0
def FreeSurfer_Reconall(subject_list, base_directory, out_directory):
    #==============================================================
    # Loading required packages
    import nipype.interfaces.io as nio
    import nipype.pipeline.engine as pe
    import nipype.interfaces.utility as util
    from nipype.interfaces.freesurfer import ReconAll
    from nipype import SelectFiles
    import os
    nodes = list()

    #====================================
    # Defining the nodes for the workflow

    # Getting the subject ID
    infosource = pe.Node(
        interface=util.IdentityInterface(fields=['subject_id']),
        name='infosource')
    infosource.iterables = ('subject_id', subject_list)

    # Getting the relevant diffusion-weighted data
    templates = dict(T1='{subject_id}/anat/{subject_id}_T1w.nii.gz')

    selectfiles = pe.Node(SelectFiles(templates), name="selectfiles")
    selectfiles.inputs.base_directory = os.path.abspath(base_directory)
    nodes.append(selectfiles)

    reconall = pe.Node(interface=ReconAll(), name='reconall')
    reconall.inputs.directive = 'autorecon2'
    reconall.inputs.subjects_dir = out_directory
    reconall.inputs.flags = '-no-isrunning'
    reconall.inputs.ignore_exception = True

    # Setting up the workflow
    fs_reconall = pe.Workflow(name='fs_reconall')

    # Reading in files
    fs_reconall.connect(infosource, 'subject_id', selectfiles, 'subject_id')
    fs_reconall.connect(selectfiles, 'T1', reconall, 'T1_files')
    fs_reconall.connect(infosource, 'subject_id', reconall, 'subject_id')

    # Running the workflow
    fs_reconall.base_dir = os.path.abspath(out_directory)
    fs_reconall.write_graph()
    fs_reconall.run('PBSGraph')
コード例 #7
0
def coreg_with_FLIRT(subject_list, base_directory):
    #==============================================================
    # Loading required packages
    import nipype.interfaces.io as nio
    import nipype.pipeline.engine as pe
    import nipype.interfaces.utility as util
    from nipype import SelectFiles
    from nipype.interfaces import fsl
    import os

    #====================================
    # Defining the nodes for the workflow

    # Getting the subject ID
    infosource = pe.Node(
        interface=util.IdentityInterface(fields=['subject_id']),
        name='infosource')
    infosource.iterables = ('subject_id', subject_list)

    # Getting the relevant diffusion-weighted data
    templates = dict(in_file='{subject_id}.nii.gz')

    selectfiles = pe.Node(SelectFiles(templates), name="selectfiles")
    selectfiles.inputs.base_directory = os.path.abspath(base_directory)

    flt = pe.Node(interface=fsl.FLIRT(dof=6, cost_func='corratio'), name='flt')
    flt.inputs.reference = os.environ[
        'FSLDIR'] + '/data/standard/FMRIB58_FA_1mm.nii.gz'

    #====================================
    # Setting up the workflow
    flt_coreg = pe.Workflow(name='flt_coreg')

    flt_coreg.connect(infosource, 'subject_id', selectfiles, 'subject_id')
    flt_coreg.connect(selectfiles, 'in_file', flt, 'in_file')

    #====================================
    # Running the workflow
    flt_coreg.base_dir = os.path.abspath(base_directory)
    flt_coreg.write_graph()
    flt_coreg.run('PBSGraph')
コード例 #8
0
    def analysis_steps(self):
        self.analysis = type('', (), {})()
        # Get files
        subj_list = [
            subj.split('_')[:-1] for subj in next(os.walk(self.proj_dir))[1]
        ]
        # TODO limit the subj_list to those without sw processed files.

        # for parallelization by subject, use idnetityInterface
        self.analysis.infosource = Node(
            IdentityInterface(fields=['subj_id', 'task']), name="infosource")
        self.analysis.infosource.iterables = [('subject_id', subj_list),
                                              ('task', self.task_names)]

        templates = {
            'anat': '{subj_id}/t1/{subj_id}_t1*.nii',
            'func': '{subj_id}/{task}*/{subj_id}_{task}*.nii'
        }
        self.analysis.sf = Node(SelectFiles(templates), name='selectfiles')
        self.analysis.sf.inputs.base_directory = self.proj_dir

        # Realign
        self.analysis.realign = Node(spm.Realign(register_to_mean=True,
                                                 fwhm=self.opts.fwhm),
                                     name='realign')

        # Coregister
        self.analysis.coreg = Node(spm.Coregister(), name='coregistration')
        # Normalize
        self.analysis.norm12 = Node(spm.Normalize12(
            bias_regularization=1e-05, affine_regularization_type='mni'),
                                    name='normalize')

        #Smooth
        self.analysis.smooth = Node(spm.Smooth(), name='smooth')
        #smooth.inputs.in_files = 'functional.nii'
        self.analysis.smooth.inputs.fwhm = self.opts.smooth_fwhm
コード例 #9
0
                                       run=run_id)

    write_raw_bids(raw,
                   bids_basename,
                   bids_root,
                   event_id=event_id,
                   events_data=events,
                   overwrite=True)


# setup workflow nodes

# Create SelectFiles node
templates = {'eeg_raw': 'sub-{subject_id}.bdf'}

selectfiles = Node(SelectFiles(templates, base_directory=source_data_path),
                   name='selectfiles')

# Create DataSink node
datasink = Node(DataSink(base_directory=data_dir, container=bids_root),
                name="datasink")

# Infosource - a function free node to iterate over the list of subject names
infosource = Node(IdentityInterface(fields=['subject_id', 'run_id']),
                  name="infosource")
infosource.iterables = [('subject_id', subject_list), ('run_id', run_list)]

# Create split run node
split_single_file = Node(Function(
    input_names=['source_data_file', 'source_data_path', 'rb_trig'],
    output_names=['run_files', 'run_ids'],
コード例 #10
0
    infosource.iterables = [("subj_id", subjs[-20:]), ("task", tasks),
                            ('timept', timepts), ('t1', t1_timepts)]
    # Tip 15: iterables run full combinations. Be sneaky with using a dictionary of limited iterables, if you are only trying to select combinations. For example, having the T1 be iterable with timepoints [1,1,3,3] and the fmri scans be iterable with [1,2,3,4] will create 16 results, instead of the intended 2 combinations (wher 1 went with 1, t1=1 went with fmri=2, t1=3 went with fmri=3,etc.)

    # Create the templates for the scan locations
    # The values from the template dictionary are added onto the end of the base_directory, when the templates key is invoked by the Workflow coneection
    templates = {
        'struct': '{subj_id}*{t1}*/t1/{subj_id}*t1*.nii',
        'func': '{subj_id}*_{timept}_*/{task}/{subj_id}*_????????.nii'
    }

    # Setup the options for the SelectFiles command. SelectFiles seems to be a bit more straightforward than DataGrabber and still invokes glob...
    sf = Node(
        SelectFiles(
            templates,
            base_directory=op.abspath(data_dir),
            raise_on_empty=
            False,  # Don't worry about the potential filepath matches built off the templates that don't actually exist
            sort_filelist=True),
        run_without_submitting=
        True,  # Use this option to get around accidentally having too much processes accessing the same working directory b/c bigkahuna is too fast/good
        name='selectfiles')

    # Input the iterables for the experiment(s) to be analyzed.
    # N.B. usually these fields are set up in the sf definition as "sf.inputs.task", "sf.inputs.subj_id", etc. depending what you defined as your fields in the templates.
    # However, if you are using iterables to maximize the pipeline potential, the values are set in the sf.iterables structure.
    # sf.run().outputs.get() will show the {} fields from the templates dictionary as undefined, as iterables are only incorporated through MapNode (and iterfield) or during a Workflow process

    # #### Tip 2: Create the output folder, so that nipype will not error out on that technicality.
    #
    # #### Tip 3: The I/O (and pipeline/iterable) arguments are basically setting up structures in the object. Therefore, "name" is a required argument, b/c it creates the {variable}.name field. Other arguments must also be named, but not necessarily by specific keyword. e.g., "timepts" is not a kw, but the {variable}.iterable will create {variable}.timept with the specified values.
コード例 #11
0
def create_fsl_workflow(data_dir=None, subjects=None, name="fslwarp"):
    """Set up the anatomical normalzation workflow using FNIRT.

    Your anatomical data must have been processed in Freesurfer.
    Unlike most lyman workflows, the DataGrabber and DataSink
    nodes are hardwired within the returned workflow, as this
    tightly integrates with the Freesurfer subjects directory
    structure.

    Parameters
    ----------
    data_dir : path
        top level of data hierarchy/FS subjects directory
    subjects : list of strings
        list of subject IDs
    name : alphanumeric string, optional
        workflow name

    """
    if data_dir is None:
        data_dir = os.environ["SUBJECTS_DIR"]
    if subjects is None:
        subjects = []

    # Get target images
    target_brain = fsl.Info.standard_image("avg152T1_brain.nii.gz")
    target_head = fsl.Info.standard_image("avg152T1.nii.gz")
    hires_head = fsl.Info.standard_image("MNI152_T1_1mm.nii.gz")
    target_mask = fsl.Info.standard_image(
        "MNI152_T1_2mm_brain_mask_dil.nii.gz")
    fnirt_cfg = os.path.join(os.environ["FSLDIR"],
                             "etc/flirtsch/T1_2_MNI152_2mm.cnf")

    # Subject source node
    subjectsource = Node(IdentityInterface(fields=["subject_id"]),
                         iterables=("subject_id", subjects),
                         name="subjectsource")

    # Grab recon-all outputs
    head_image = "T1"
    templates = dict(aseg="{subject_id}/mri/aparc+aseg.mgz",
                     head="{subject_id}/mri/" + head_image + ".mgz")
    datasource = Node(SelectFiles(templates, base_directory=data_dir),
                      "datasource")

    # Convert images to nifti storage and float representation
    cvtaseg = Node(fs.MRIConvert(out_type="niigz"), "convertaseg")

    cvthead = Node(fs.MRIConvert(out_type="niigz", out_datatype="float"),
                   "converthead")

    # Turn the aparc+aseg into a brainmask
    makemask = Node(fs.Binarize(dilate=1, min=0.5), "makemask")

    # Extract the brain from the orig.mgz using the mask
    skullstrip = Node(fsl.ApplyMask(), "skullstrip")

    # FLIRT brain to MNI152_brain
    flirt = Node(fsl.FLIRT(reference=target_brain), "flirt")

    sw = [-180, 180]
    for dim in ["x", "y", "z"]:
        setattr(flirt.inputs, "searchr_%s" % dim, sw)

    # FNIRT head to MNI152
    fnirt = Node(
        fsl.FNIRT(ref_file=target_head,
                  refmask_file=target_mask,
                  config_file=fnirt_cfg,
                  fieldcoeff_file=True), "fnirt")

    # Warp and rename the images
    warpbrain = Node(
        fsl.ApplyWarp(ref_file=target_head,
                      interp="spline",
                      out_file="brain_warp.nii.gz"), "warpbrain")

    warpbrainhr = Node(
        fsl.ApplyWarp(ref_file=hires_head,
                      interp="spline",
                      out_file="brain_warp_hires.nii.gz"), "warpbrainhr")

    # Generate a png summarizing the registration
    warpreport = Node(WarpReport(), "warpreport")

    # Save relevant files to the data directory
    fnirt_subs = [(head_image + "_out_masked_flirt.mat", "affine.mat"),
                  (head_image + "_out_fieldwarp", "warpfield"),
                  (head_image + "_out_masked", "brain"),
                  (head_image + "_out", "T1")]
    datasink = Node(
        DataSink(base_directory=data_dir,
                 parameterization=False,
                 substitutions=fnirt_subs), "datasink")

    # Define and connect the workflow
    # -------------------------------

    normalize = Workflow(name=name)

    normalize.connect([
        (subjectsource, datasource, [("subject_id", "subject_id")]),
        (datasource, cvtaseg, [("aseg", "in_file")]),
        (datasource, cvthead, [("head", "in_file")]),
        (cvtaseg, makemask, [("out_file", "in_file")]),
        (cvthead, skullstrip, [("out_file", "in_file")]),
        (makemask, skullstrip, [("binary_file", "mask_file")]),
        (skullstrip, flirt, [("out_file", "in_file")]),
        (flirt, fnirt, [("out_matrix_file", "affine_file")]),
        (cvthead, fnirt, [("out_file", "in_file")]),
        (skullstrip, warpbrain, [("out_file", "in_file")]),
        (fnirt, warpbrain, [("fieldcoeff_file", "field_file")]),
        (skullstrip, warpbrainhr, [("out_file", "in_file")]),
        (fnirt, warpbrainhr, [("fieldcoeff_file", "field_file")]),
        (warpbrain, warpreport, [("out_file", "in_file")]),
        (subjectsource, datasink, [("subject_id", "container")]),
        (skullstrip, datasink, [("out_file", "normalization.@brain")]),
        (cvthead, datasink, [("out_file", "normalization.@t1")]),
        (flirt, datasink, [("out_file", "normalization.@brain_flirted")]),
        (flirt, datasink, [("out_matrix_file", "normalization.@affine")]),
        (warpbrain, datasink, [("out_file", "normalization.@brain_warped")]),
        (warpbrainhr, datasink, [("out_file", "normalization.@brain_hires")]),
        (fnirt, datasink, [("fieldcoeff_file", "normalization.@warpfield")]),
        (warpreport, datasink, [("out_file", "normalization.@report")]),
    ])

    return normalize
コード例 #12
0
def create_ants_workflow(data_dir=None, subjects=None, name="antswarp"):
    """Set up the anatomical normalzation workflow using ANTS.

    Your anatomical data must have been processed in Freesurfer.
    Unlike most lyman workflows, the DataGrabber and DataSink
    nodes are hardwired within the returned workflow, as this
    tightly integrates with the Freesurfer subjects directory
    structure.

    Parameters
    ----------
    data_dir : path
        top level of data hierarchy/FS subjects directory
    subjects : list of strings
        list of subject IDs
    name : alphanumeric string, optional
        workflow name

    """
    if data_dir is None:
        data_dir = os.environ["SUBJECTS_DIR"]
    if subjects is None:
        subjects = []

    # Subject source node
    subjectsource = Node(IdentityInterface(fields=["subject_id"]),
                         iterables=("subject_id", subjects),
                         name="subjectsource")

    # Grab recon-all outputs
    templates = dict(aseg="{subject_id}/mri/aparc+aseg.mgz",
                     head="{subject_id}/mri/brain.mgz")
    datasource = Node(SelectFiles(templates, base_directory=data_dir),
                      "datasource")

    # Convert images to nifti storage and float representation
    cvtaseg = Node(fs.MRIConvert(out_type="niigz"), "convertaseg")

    cvtbrain = Node(fs.MRIConvert(out_type="niigz", out_datatype="float"),
                    "convertbrain")

    # Turn the aparc+aseg into a brainmask
    makemask = Node(fs.Binarize(dilate=4, erode=3, min=0.5), "makemask")

    # Extract the brain from the orig.mgz using the mask
    skullstrip = Node(fsl.ApplyMask(), "skullstrip")

    # Normalize using ANTS
    antswarp = Node(ANTSIntroduction(), "antswarp")

    # Generate a png summarizing the registration
    warpreport = Node(WarpReport(), "warpreport")

    # Save relevant files to the data directory
    datasink = Node(DataSink(base_directory=data_dir, parameterization=False),
                    name="datasink")

    # Define and connect the workflow
    # -------------------------------

    normalize = Workflow(name=name)

    normalize.connect([
        (subjectsource, datasource, [("subject_id", "subject_id")]),
        (datasource, cvtaseg, [("aseg", "in_file")]),
        (datasource, cvtbrain, [("head", "in_file")]),
        (cvtaseg, makemask, [("out_file", "in_file")]),
        (cvtbrain, skullstrip, [("out_file", "in_file")]),
        (makemask, skullstrip, [("binary_file", "mask_file")]),
        (skullstrip, antswarp, [("out_file", "in_file")]),
        (antswarp, warpreport, [("brain_file", "in_file")]),
        (subjectsource, datasink, [("subject_id", "container")]),
        (antswarp, datasink, [("warp_file", "normalization.@warpfield"),
                              ("inv_warp_file",
                               "normalization.@inverse_warpfield"),
                              ("affine_file", "normalization.@affine"),
                              ("brain_file", "normalization.@brain")]),
        (warpreport, datasink, [("out_file", "normalization.@report")]),
    ])

    return normalize
コード例 #13
0
ファイル: main.py プロジェクト: davidmeunier79/sctva
def create_main_pipeline(subject_list=SUBJECTS):
    RADIUS = 5  # selection sphere radius (mm)

    # create node that contains meta-variables about data
    inputnode = Node(
        IdentityInterface(fields=[
            "subject_id", "center", "modality", "acquisition", "correction"
        ]),
        name="inputnode",
    )
    inputnode.inputs.center = CENTER
    inputnode.inputs.modality = MODALITY
    inputnode.inputs.acquisition = ACQUISITION
    inputnode.inputs.correction = CORRECTION
    inputnode.iterables = [("subject_id", subject_list)]

    #
    templates = {
        "diffusion_volume":
        "DTI/{center}/{subject_id}/{modality}/{acquisition}/{"
        "correction}/corrected_dwi_{subject_id}.nii.gz",
        "bvals":
        "DTI/{center}/{subject_id}/{modality}/{acquisition}/raw_bvals_{subject_id}.txt",
        "bvecs":
        "DTI/{center}/{subject_id}/{modality}/{acquisition}/{correction}/corrected_bvecs_{subject_id}.txt",
        "t1_volume":
        "analysis_{subject_id}/anat/{"
        "subject_id}_ses-01_T1w_denoised_debiased_in-MNI152.nii.gz",
        "func_contrast_volume":
        "analysis_{subject_id}/spm_realign/results_8WM_9CSF_0mvt/In-MNI152_{subject_id}_res-8WM_9CSF_0mvt_human_vs_all_t.nii.gz",
    }
    datagrabber = pe.Node(SelectFiles(templates), name="datagrabber")
    datagrabber.inputs.base_directory = PRIMAVOICE

    study_pipeline = create_study_pipeline(radius=RADIUS)

    main_pipeline = pe.Workflow(name="main_pipeline")

    main_pipeline.connect([(
        inputnode,
        datagrabber,
        [
            ("subject_id", "subject_id"),
            ("center", "center"),
            ("modality", "modality"),
            ("acquisition", "acquisition"),
            ("correction", "correction"),
        ],
    )])
    main_pipeline.connect([(
        datagrabber,
        study_pipeline,
        [
            ("diffusion_volume", "inputnode.diffusion_volume"),
            ("bvals", "inputnode.bvals"),
            ("bvecs", "inputnode.bvecs"),
            ("t1_volume", "inputnode.t1_volume"),
            ("func_contrast_volume", "inputnode.func_contrast_volume"),
        ],
    )])
    return main_pipeline
コード例 #14
0
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel)

    if args.experiment is None:
        args.experiment = project["default_exp"]

    if args.altmodel:
        exp_name = "-".join([args.experiment, args.altmodel])
    else:
        exp_name = args.experiment

    # Make sure some paths are set properly
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Set roots of output storage
    anal_dir_base = op.join(project["analysis_dir"], exp_name)
    work_dir_base = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])

    ### Set up group info
    ## Regular design
    # Subject source (no iterables here)
    subject_list = lyman.determine_subjects(args.subjects)
    print subject_list

    subj_source = Node(IdentityInterface(fields=["subject_id"]),
                       name="subj_source")
    subj_source.inputs.subject_id = subject_list

    # load in covariate for source accuracy analysis
    #     cov = pd.read_csv('/Volumes/group/awagner/sgagnon/AP/results/df_sourceAcc.csv')
    #     cov_col = 'mean_acc'

    # load in covariate for cort analysis
    cov = pd.read_csv(
        '/Volumes/group/awagner/sgagnon/AP/data/cortisol/cort_percentchange_testbaseline_controlassay.csv'
    )
    cov_col = 'cort_controlassay'

    cov = cov.loc[cov.subid.isin(
        subject_list)]  # prune for those in this analysis
    cov[cov_col] = (cov[cov_col] -
                    cov[cov_col].mean()) / cov[cov_col].std()  # zscore
    print cov.describe()

    cov_reg = [
        cov[cov.subid == x].reset_index().at[0, cov_col] for x in subject_list
    ]

    # Set up the regressors and contrasts
    regressors = dict(group_mean=[int(1) for sub in subject_list],
                      z_covariate=cov_reg)
    print regressors

    contrasts = [["cov", "T", ["group_mean", "z_covariate"], [0, 1]]]

    # Subject level contrast source
    contrast_source = Node(IdentityInterface(fields=["l1_contrast"]),
                           iterables=("l1_contrast", exp["contrast_names"]),
                           name="contrast_source")

    # Group workflow
    space = args.regspace
    wf_name = "_".join([space, args.output])
    if space == "mni":
        mfx, mfx_input, mfx_output = wf.create_volume_mixedfx_workflow(
            wf_name, subject_list, regressors, contrasts, exp)
    else:
        print 'run mni!'

    # Mixed effects inputs
    ffxspace = "mni" if space == "mni" else "epi"
    ffxsmooth = "unsmoothed" if args.unsmoothed else "smoothed"
    mfx_base = op.join("{subject_id}/ffx/%s/%s/{l1_contrast}" %
                       (ffxspace, ffxsmooth))
    templates = dict(copes=op.join(mfx_base, "cope1.nii.gz"))
    if space == "mni":
        templates.update(
            dict(varcopes=op.join(mfx_base, "varcope1.nii.gz"),
                 dofs=op.join(mfx_base, "tdof_t1.nii.gz")))
    else:
        templates.update(
            dict(reg_file=op.join(anal_dir_base, "{subject_id}/preproc/run_1",
                                  "func2anat_tkreg.dat")))

    # Workflow source node
    mfx_source = MapNode(
        SelectFiles(templates,
                    base_directory=anal_dir_base,
                    sort_filelist=True), "subject_id", "mfx_source")

    # Workflow input connections
    mfx.connect([
        (contrast_source, mfx_source, [("l1_contrast", "l1_contrast")]),
        (contrast_source, mfx_input, [("l1_contrast", "l1_contrast")]),
        (subj_source, mfx_source, [("subject_id", "subject_id")]),
        (mfx_source, mfx_input, [("copes", "copes")])
    ]),
    if space == "mni":
        mfx.connect([
            (mfx_source, mfx_input, [("varcopes", "varcopes"),
                                     ("dofs", "dofs")]),
        ])
    else:
        mfx.connect([(mfx_source, mfx_input, [("reg_file", "reg_file")]),
                     (subj_source, mfx_input, [("subject_id", "subject_id")])])

    # Mixed effects outputs
    mfx_sink = Node(DataSink(base_directory="/".join(
        [anal_dir_base, args.output, space]),
                             substitutions=[("/stats", "/"), ("/_hemi_", "/"),
                                            ("_glm_results", "")],
                             parameterization=True),
                    name="mfx_sink")

    mfx_outwrap = tools.OutputWrapper(mfx, subj_source, mfx_sink, mfx_output)
    mfx_outwrap.sink_outputs()
    mfx_outwrap.set_mapnode_substitutions(1)
    mfx_outwrap.add_regexp_substitutions([(r"_l1_contrast_[-\w]*/", "/"),
                                          (r"_mni_hemi_[lr]h", "")])
    mfx.connect(contrast_source, "l1_contrast", mfx_sink, "container")

    # Set a few last things
    mfx.base_dir = work_dir_base

    # Execute
    lyman.run_workflow(mfx, args=args)

    # Clean up
    if project["rm_working_dir"]:
        shutil.rmtree(project["working_dir"])
コード例 #15
0
infosource = Node(util.IdentityInterface(fields=['contrast_id', 'subject_id']),
                  name="infosource")

infosource.iterables = [('contrast_id', contrast_list)]
infosource.inputs.subject_id = subject_list

# SelectFiles - to grab the data (alternative to DataGrabber)
templates = {
    'cons':
    os.path.join(
        '/home/rj299/scratch60/mdm_analysis/output/imaging/Sink_resp_mon_sv/1stLevel/_subject_id_{subject_id}/',
        '{contrast_id}.nii')
}

selectfiles = MapNode(SelectFiles(
    templates,
    base_directory='/home/rj299/scratch60/mdm_analysis/work/',
    sort_filelist=True),
                      name="selectfiles",
                      iterfield=['subject_id'])

datasink = Node(nio.DataSink(
    base_directory=
    '/home/rj299/scratch60/mdm_analysis/output/imaging/Sink_resp_mon_sv/'),
                name="datasink")

l2analysis = Workflow(name='l2spm_mon_sv_glm_heightp05')

l2analysis.base_dir = '/home/rj299/scratch60/mdm_analysis/work/'

l2analysis.connect([
    (infosource, selectfiles, [('contrast_id', 'contrast_id'),
コード例 #16
0
def FA_connectome(subject_list,base_directory,out_directory):

	#==============================================================
	# Loading required packages
	import nipype.interfaces.io as nio
	import nipype.pipeline.engine as pe
	import nipype.interfaces.utility as util
	import nipype.interfaces.fsl as fsl
	import nipype.interfaces.dipy as dipy
	import nipype.interfaces.mrtrix as mrt
	from own_nipype import DipyDenoise as denoise
	from own_nipype import trk_Coreg as trkcoreg
	from own_nipype import TXT2PCK as txt2pck
	from own_nipype import FAconnectome as connectome
	from own_nipype import Extractb0 as extract_b0
	import nipype.interfaces.cmtk as cmtk
	import nipype.interfaces.diffusion_toolkit as dtk
	import nipype.algorithms.misc as misc

	from nipype import SelectFiles
	import os
	registration_reference = os.environ['FSLDIR'] + '/data/standard/FMRIB58_FA_1mm.nii.gz'
	nodes = list()

	#====================================
	# Defining the nodes for the workflow

	# Utility nodes
	gunzip = pe.Node(interface=misc.Gunzip(), name='gunzip')
	gunzip2 = pe.Node(interface=misc.Gunzip(), name='gunzip2')
	fsl2mrtrix = pe.Node(interface=mrt.FSL2MRTrix(invert_x=True),name='fsl2mrtrix')

	# Getting the subject ID
	infosource  = pe.Node(interface=util.IdentityInterface(fields=['subject_id']),name='infosource')
	infosource.iterables = ('subject_id', subject_list)

	# Getting the relevant diffusion-weighted data
	templates = dict(dwi='{subject_id}/dwi/{subject_id}_dwi.nii.gz',
		bvec='{subject_id}/dwi/{subject_id}_dwi.bvec',
		bval='{subject_id}/dwi/{subject_id}_dwi.bval')

	selectfiles = pe.Node(SelectFiles(templates),
	                   name='selectfiles')
	selectfiles.inputs.base_directory = os.path.abspath(base_directory)

	# Denoising
	denoise = pe.Node(interface=denoise(), name='denoise')

	# Eddy-current and motion correction
	eddycorrect = pe.Node(interface=fsl.epi.EddyCorrect(), name='eddycorrect')
	eddycorrect.inputs.ref_num = 0

	# Upsampling
	resample = pe.Node(interface=dipy.Resample(interp=3,vox_size=(1.,1.,1.)), name='resample')

	# Extract b0 image
	extract_b0 = pe.Node(interface=extract_b0(),name='extract_b0')

	# Fitting the diffusion tensor model
	dwi2tensor = pe.Node(interface=mrt.DWI2Tensor(), name='dwi2tensor')
	tensor2vector = pe.Node(interface=mrt.Tensor2Vector(), name='tensor2vector')
	tensor2adc = pe.Node(interface=mrt.Tensor2ApparentDiffusion(), name='tensor2adc')
	tensor2fa = pe.Node(interface=mrt.Tensor2FractionalAnisotropy(), name='tensor2fa')

	# Create a brain mask
	bet = pe.Node(interface=fsl.BET(frac=0.3,robust=False,mask=True),name='bet')

	# Eroding the brain mask
	erode_mask_firstpass = pe.Node(interface=mrt.Erode(), name='erode_mask_firstpass')
	erode_mask_secondpass = pe.Node(interface=mrt.Erode(), name='erode_mask_secondpass')
	MRmultiply = pe.Node(interface=mrt.MRMultiply(), name='MRmultiply')
	MRmult_merge = pe.Node(interface=util.Merge(2), name='MRmultiply_merge')
	threshold_FA = pe.Node(interface=mrt.Threshold(absolute_threshold_value = 0.7), name='threshold_FA')

	# White matter mask
	gen_WM_mask = pe.Node(interface=mrt.GenerateWhiteMatterMask(), name='gen_WM_mask')
	threshold_wmmask = pe.Node(interface=mrt.Threshold(absolute_threshold_value = 0.4), name='threshold_wmmask')

	# CSD probabilistic tractography 
	estimateresponse = pe.Node(interface=mrt.EstimateResponseForSH(maximum_harmonic_order = 8), name='estimateresponse')
	csdeconv = pe.Node(interface=mrt.ConstrainedSphericalDeconvolution(maximum_harmonic_order = 8), name='csdeconv')

	# Tracking 
	probCSDstreamtrack = pe.Node(interface=mrt.ProbabilisticSphericallyDeconvolutedStreamlineTrack(), name='probCSDstreamtrack')
	probCSDstreamtrack.inputs.inputmodel = 'SD_PROB'
	probCSDstreamtrack.inputs.desired_number_of_tracks = 150000
	tck2trk = pe.Node(interface=mrt.MRTrix2TrackVis(), name='tck2trk')

	# smoothing the tracts 
	smooth = pe.Node(interface=dtk.SplineFilter(step_length=0.5), name='smooth')

	# Co-registration with MNI space
	mrconvert = pe.Node(mrt.MRConvert(extension='nii'), name='mrconvert')
	flt = pe.Node(interface=fsl.FLIRT(reference=registration_reference, dof=12, cost_func='corratio'), name='flt')

	# Moving tracts to common space
	trkcoreg = pe.Node(interface=trkcoreg(reference=registration_reference),name='trkcoreg')

	# calcuating the connectome matrix 
	calc_matrix = pe.Node(interface=connectome(ROI_file='/home/jb07/Desktop/aal.nii.gz'),name='calc_matrix')

	# Converting the adjacency matrix from txt to pck format
	txt2pck = pe.Node(interface=txt2pck(), name='txt2pck')

	# Calculate graph theory measures with NetworkX and CMTK
	nxmetrics = pe.Node(interface=cmtk.NetworkXMetrics(treat_as_weighted_graph = True), name='nxmetrics')

	#====================================
	# Setting up the workflow
	fa_connectome = pe.Workflow(name='FA_connectome')

	# Reading in files
	fa_connectome.connect(infosource, 'subject_id', selectfiles, 'subject_id')

	# Denoising
	fa_connectome.connect(selectfiles, 'dwi', denoise, 'in_file')

	# Eddy current and motion correction
	fa_connectome.connect(denoise, 'out_file',eddycorrect, 'in_file')
	fa_connectome.connect(eddycorrect, 'eddy_corrected', resample, 'in_file')
	fa_connectome.connect(resample, 'out_file', extract_b0, 'in_file')
	fa_connectome.connect(resample, 'out_file', gunzip,'in_file')

	# Brain extraction
	fa_connectome.connect(extract_b0, 'out_file', bet, 'in_file')

	# Creating tensor maps
	fa_connectome.connect(selectfiles,'bval',fsl2mrtrix,'bval_file')
	fa_connectome.connect(selectfiles,'bvec',fsl2mrtrix,'bvec_file')
	fa_connectome.connect(gunzip,'out_file',dwi2tensor,'in_file')
	fa_connectome.connect(fsl2mrtrix,'encoding_file',dwi2tensor,'encoding_file')
	fa_connectome.connect(dwi2tensor,'tensor',tensor2vector,'in_file')
	fa_connectome.connect(dwi2tensor,'tensor',tensor2adc,'in_file')
	fa_connectome.connect(dwi2tensor,'tensor',tensor2fa,'in_file')
	fa_connectome.connect(tensor2fa,'FA', MRmult_merge, 'in1')

	# Thresholding to create a mask of single fibre voxels
	fa_connectome.connect(gunzip2, 'out_file', erode_mask_firstpass, 'in_file')
	fa_connectome.connect(erode_mask_firstpass, 'out_file', erode_mask_secondpass, 'in_file')
	fa_connectome.connect(erode_mask_secondpass,'out_file', MRmult_merge, 'in2')
	fa_connectome.connect(MRmult_merge, 'out', MRmultiply,  'in_files')
	fa_connectome.connect(MRmultiply, 'out_file', threshold_FA, 'in_file')

	# Create seed mask
	fa_connectome.connect(gunzip, 'out_file', gen_WM_mask, 'in_file')
	fa_connectome.connect(bet, 'mask_file', gunzip2, 'in_file')
	fa_connectome.connect(gunzip2, 'out_file', gen_WM_mask, 'binary_mask')
	fa_connectome.connect(fsl2mrtrix, 'encoding_file', gen_WM_mask, 'encoding_file')
	fa_connectome.connect(gen_WM_mask, 'WMprobabilitymap', threshold_wmmask, 'in_file')

	# Estimate response
	fa_connectome.connect(gunzip, 'out_file', estimateresponse, 'in_file')
	fa_connectome.connect(fsl2mrtrix, 'encoding_file', estimateresponse, 'encoding_file')
	fa_connectome.connect(threshold_FA, 'out_file', estimateresponse, 'mask_image')

	# CSD calculation
	fa_connectome.connect(gunzip, 'out_file', csdeconv, 'in_file')
	fa_connectome.connect(gen_WM_mask, 'WMprobabilitymap', csdeconv, 'mask_image')
	fa_connectome.connect(estimateresponse, 'response', csdeconv, 'response_file')
	fa_connectome.connect(fsl2mrtrix, 'encoding_file', csdeconv, 'encoding_file')

	# Running the tractography
	fa_connectome.connect(threshold_wmmask, "out_file", probCSDstreamtrack, "seed_file")
	fa_connectome.connect(csdeconv, "spherical_harmonics_image", probCSDstreamtrack, "in_file")
	fa_connectome.connect(gunzip, "out_file", tck2trk, "image_file")
	fa_connectome.connect(probCSDstreamtrack, "tracked", tck2trk, "in_file")

	# Smoothing the trackfile
	fa_connectome.connect(tck2trk, 'out_file',smooth,'track_file')

	# Co-registering FA with FMRIB58_FA_1mm standard space 
	fa_connectome.connect(MRmultiply,'out_file',mrconvert,'in_file')
	fa_connectome.connect(mrconvert,'converted',flt,'in_file')
	fa_connectome.connect(smooth,'smoothed_track_file',trkcoreg,'in_file')
	fa_connectome.connect(mrconvert,'converted',trkcoreg,'FA_file')
	fa_connectome.connect(flt,'out_matrix_file',trkcoreg,'transfomation_matrix')

	# Calculating the FA connectome
	fa_connectome.connect(trkcoreg,'transformed_track_file',calc_matrix,'trackfile')
	fa_connectome.connect(flt,'out_file',calc_matrix,'FA_file')

	# Calculating graph measures 
	fa_connectome.connect(calc_matrix,'out_file',txt2pck,'in_file')
	fa_connectome.connect(txt2pck,'out_file',nxmetrics,'in_file')

	#====================================
	# Running the workflow
	fa_connectome.base_dir = os.path.abspath(out_directory)
	fa_connectome.write_graph()
	fa_connectome.run('PBSGraph')
コード例 #17
0
def smoothing_skullstrip(
    fmriprep_dir,
    output_dir,
    work_dir,
    subject_list,
    task,
    run,
    fwhm=6.0,
    name="smoothing_skullstrip",
):
    """
    FSL smooth fMRIprep output
    """
    workflow = pe.Workflow(name=name)
    workflow.base_dir = work_dir

    template = {
        "bolds": "sub-{subject}/func/sub-{subject}_task-{task}_run-{run}_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz",
        "mask": "sub-{subject}/func/sub-{subject}_task-{task}_run-{run}_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz",
    }

    bg = pe.Node(SelectFiles(template, base_directory=fmriprep_dir), name="datagrabber")
    bg.iterables = [("subject", subject_list), ("task", task), ("run", run)]

    # Create DataSink object
    sinker = pe.Node(DataSink(), name="sinker")
    sinker.inputs.base_directory = output_dir
    sinker.inputs.substitutions = [
        ("_run_1_subject_", "sub-"),
        ("_skip0", "func"),
        ("desc-preproc_bold_smooth_masked_roi", f"desc-preproc-fwhm{int(fwhm)}mm_bold"),
    ]

    # Smoothing
    susan = create_susan_smooth()
    susan.inputs.inputnode.fwhm = fwhm

    # masking the smoothed output
    # note: susan workflow returns a list but apply mask only accept string of path
    mask_results = pe.MapNode(
        ApplyMask(), name="mask_results", iterfield=["in_file", "mask_file"]
    )

    # remove first five volumes
    skip = pe.MapNode(fsl.ExtractROI(), name="skip", iterfield=["in_file"])
    skip.inputs.t_min = 5
    skip.inputs.t_size = -1

    workflow.connect(
        [
            (
                bg,
                susan,
                [("bolds", "inputnode.in_files"), ("mask", "inputnode.mask_file")],
            ),
            (bg, mask_results, [("mask", "mask_file")]),
            (susan, mask_results, [("outputnode.smoothed_files", "in_file")]),
            (mask_results, skip, [("out_file", "in_file")]),
            (skip, sinker, [("roi_file", f"func_smooth-{int(fwhm)}mm.@out_file")]),
        ]
    )
    return workflow
コード例 #18
0
        height_threshold=0.001,
        extent_fdr_p_threshold=0.05,
        height_threshold_type='p-value'),
    iterfield=['stat_image'],
    name="level2thresh")
#Infosource - a function free node to iterate over the list of subject names
infosource = Node(util.IdentityInterface(fields=['contrast_id']),
                  name="infosource")
infosource.iterables = [('contrast_id', contrast_list)]

# SelectFiles - to grab the data (alternative to DataGrabber)

templates = {
    'cons': opj(input_dir, '_subject_id_{subject_id}', '{contrast_id}.nii')
}
selectfilesKet = MapNode(SelectFiles(templates),
                         iterfield=['subject_id'],
                         name="selectfilesKet")
selectfilesKet.inputs.subject_id = ket_list

selectfilesMid = MapNode(SelectFiles(templates),
                         iterfield=['subject_id'],
                         name="selectfilesMid")
selectfilesMid.inputs.subject_id = mid_list

l2analysis = Workflow(name='spm_l2analysisGroup')
l2analysis.base_dir = '/media/Data/work/KPE_SPM_ses2'

l2analysis.connect([
    (infosource, selectfilesKet, [
        ('contrast_id', 'contrast_id'),
コード例 #19
0
    def dwi_preproc(acqparams, base_directory, index_file, out_directory,
                    subject_list):

        # Loading required packages
        from BrainTypes_additional_interfaces import ants_QuickSyN
        import nipype.interfaces.fsl as fsl
        import nipype.interfaces.io as nio
        import nipype.pipeline.engine as pe
        from nipype import SelectFiles
        import nipype.interfaces.utility as util
        import os

        # ==============================================================
        # Processing of diffusion-weighted data
        infosource = pe.Node(
            interface=util.IdentityInterface(fields=['subject_id']),
            name='infosource')
        infosource.iterables = ('subject_id', subject_list)

        # Getting the relevant data
        templates = dict(dwi='{subject_id}/dwi/{subject_id}_dwi.nii.gz',
                         bvec='{subject_id}/dwi/{subject_id}_dwi.bvec',
                         bval='{subject_id}/dwi/{subject_id}_dwi.bval')

        selectfiles = pe.Node(SelectFiles(templates), name="selectfiles")
        selectfiles.inputs.base_directory = os.path.abspath(base_directory)

        # Extract b0 image
        fslroi = pe.Node(interface=fsl.ExtractROI(), name='fslroi')
        fslroi.inputs.t_min = 0
        fslroi.inputs.t_size = 1

        # Create a brain mask
        bet = pe.Node(interface=fsl.BET(frac=0.3,
                                        robust=False,
                                        mask=True,
                                        no_output=False),
                      name='bet')

        # Eddy-current and motion correction
        eddy = pe.Node(interface=fsl.epi.Eddy(args='-v'), name='eddy')
        eddy.inputs.in_acqp = acqparams
        eddy.inputs.in_index = index_file

        # Fitting the diffusion tensor model
        dtifit = pe.Node(interface=fsl.DTIFit(), name='dtifit')

        # Moving files to MNI space
        reg = pe.Node(interface=ants_QuickSyN(), name='reg')
        reg.inputs.fixed_image = os.environ[
            'FSLDIR'] + '/data/standard/FMRIB58_FA_1mm.nii.gz'
        reg.inputs.image_dimensions = 3
        reg.inputs.transform_type = 's'

        # ==============================================================
        # Setting up the workflow
        dwi_preproc = pe.Workflow(name='dwi_preproc')
        dwi_preproc.connect(infosource, 'subject_id', selectfiles,
                            'subject_id')

        # Diffusion data
        # Preprocessing
        dwi_preproc.connect(selectfiles, 'dwi', fslroi, 'in_file')
        dwi_preproc.connect(fslroi, 'roi_file', bet, 'in_file')
        dwi_preproc.connect(bet, 'mask_file', eddy, 'in_mask')
        dwi_preproc.connect(selectfiles, 'dwi', eddy, 'in_file')
        dwi_preproc.connect(selectfiles, 'bvec', eddy, 'in_bvec')
        dwi_preproc.connect(selectfiles, 'bval', eddy, 'in_bval')

        # Calculate diffusion measures
        dwi_preproc.connect(eddy, 'out_corrected', dtifit, 'dwi')
        dwi_preproc.connect(bet, 'mask_file', dtifit, 'mask')
        dwi_preproc.connect(infosource, 'subject_id', dtifit, 'base_name')
        dwi_preproc.connect(selectfiles, 'bvec', dtifit, 'bvecs')
        dwi_preproc.connect(selectfiles, 'bval', dtifit, 'bvals')
        dwi_preproc.connect(dtifit, 'FA', reg, 'moving_image')
        dwi_preproc.connect(infosource, 'subject_id', reg, 'output_prefix')

        # ==============================================================
        # Running the workflow
        dwi_preproc.base_dir = os.path.abspath(out_directory)
        dwi_preproc.write_graph()
        dwi_preproc.run()
コード例 #20
0
def preprocess_loc(experiment_dir,
                   subject_id=None,
                   run_id=None,
                   fwhm=4.0,
                   run_num=4,
                   hpcutoff=100.,
                   session_id=None,
                   task_id=None):
    """
    create a workflow for preprocessing and saving the visual localizer data
    """
    preproc = create_featreg_preproc(whichvol=None)
    preproc.inputs.inputspec.fwhm = fwhm
    preproc.inputs.inputspec.highpass = hpcutoff

    if subject_id:
        subject_list = [subject_id]
    else:
        subject_list = sorted([
            path.split(os.path.sep)[-1]
            for path in glob(os.path.join(experiment_dir, 'sub*'))
        ])

    if run_id:
        run_list = [run_id]
    else:
        run_list = []
        for i in range(1, run_num + 1):
            run_list.append('run-' + str(i))

    if session_id:
        session_list = [session_id]
    else:
        session_list = ['ses-localizer']

    if task_id:
        task_list = [task_id]
    else:
        task_list = ['task_objectcategories']

    info_source = pe.Node(util.IdentityInterface(
        fields=['subject_id', 'session_id', 'task_id', 'run_id'],
        mandatory_inputs=False),
                          name='infosource')

    info_source.iterables = [('subject_id', subject_list),
                             ('session_id', session_list),
                             ('task_id', task_list), ('run_id', run_list)]

    templates = {  #'anat':'inputs/tnt/{subject_id}/t1w/brain.nii.gz',
        'func':
        'sourcedata/aligned/{subject_id}/in_bold3Tp2/sub-*_task-objectcategories_{run_id}_bold.nii.gz'
    }

    sf = pe.Node(SelectFiles(templates), name='selectfiles')
    sf.inputs.base_directory = experiment_dir

    datasink = pe.Node(DataSink(), name='datasink')
    datasink.inputs.base_directory = experiment_dir

    # we currently do not want to slice time correct anymore,
    # regard it as superfluous
    #Slicer = pe.Node(fsl.SliceTimer(),
    #                name='Slicer')

    def get_preproc_subs(subject_id, session_id, task_id, run_id):
        subs = [('_subject_id_{}_'.format(subject_id), '')]
        subs.append(('task_id_{}'.format(task_id), ''))
        subs.append(('_run_id_{}'.format(run_id), ''))
        subs.append(('_session_id_{}'.format(session_id), ''))
        subs.append(('_addmean0', ''))
        subs.append(('_dilatemask0', ''))
        subs.append(('_maskfunc30', ''))
        subs.append(('_meanfunc30', ''))
        subs.append(('bold_dtype_bet_thresh_dil',
                     'space-custom-subject_type-brain_mask'))
        subs.append(('bold_dtype_mask_smooth_mask_gms',
                     'space-custom-subject_desc-mean'))
        subs.append(('bold_dtype_mask_smooth_mask',
                     'space-custom-subject_desc-smooth'))
        subs.append(('bold_dtype_mask_smooth_mask gms_tempfilt_maths',
                     'space-custom-subject_desc-highpass_bold'))
        subs.append(('_mean', ''))
        subs.append(('mean_tempfilt_maths', 'highpass_bold'))
        return subs

    subsgenpreproc = pe.Node(util.Function(
        input_names=['subject_id', 'session_id', 'task_id', 'run_id'],
        output_names=['substitutions'],
        function=get_preproc_subs),
                             name='subsgenpreproc')

    def ds_container_gen(subject_id):
        """
        Generate container for DataSink
        """
        from os.path import join as opj
        container = opj(subject_id, 'ses-localizer')
        return container

    ds_container = pe.Node(util.Function(input_names=['subject_id'],
                                         output_names=['container'],
                                         function=ds_container_gen),
                           name='ds_container')

    preprocwf = pe.Workflow(name='preprocessing')
    preprocwf.connect([
        (info_source, sf, [('subject_id', 'subject_id'), ('run_id', 'run_id'),
                           ('session_id', 'session_id')]),
        (info_source, ds_container, [('subject_id', 'subject_id')]),
        (ds_container, datasink, [('container', 'container')]),
        (info_source, subsgenpreproc, [('subject_id', 'subject_id'),
                                       ('session_id', 'session_id'),
                                       ('task_id', 'task_id'),
                                       ('run_id', 'run_id')]),
        #                        (sf, Slicer, [('func', 'in_file')]),
        #                        (Slicer, preproc,
        #                        [('slice_time_corrected_file', 'inputspec.func')]),
        (sf, preproc, [('func', 'inputspec.func')]),
        (subsgenpreproc, datasink, [('substitutions', 'substitutions')]),
        (preproc, datasink, [('outputspec.smoothed_files', 'func.@smooth')]),
        (preproc, datasink, [('outputspec.mask', 'func.@mask')]),
        (preproc, datasink, [('outputspec.mean', 'func.@mean')]),
        (preproc, datasink, [('outputspec.highpassed_files', 'func.@highpass')
                             ])
    ])
    return preprocwf
コード例 #21
0
working_dir = os.path.abspath('working_dir')

script_dir = os.path.dirname(inspect.getfile(inspect.currentframe()))

demo_container = os.path.abspath(
    os.path.join(script_dir, 'containers/demo_container.img'))

data_dir = os.path.abspath(os.path.join(script_dir, 'data'))

maps = [data_dir + ':/input', working_dir + ':/working', '.:/output']

node1 = Node(demo.DemoTask_1(container=demo_container, map_dirs_list=maps),
             name="Node1")
node2 = Node(demo.DemoTask_2(container=demo_container, map_dirs_list=maps),
             name="Node2")

templates = {'input': '{subject_id}.txt'}

sf = Node(SelectFiles(templates), name="SelectFiles")

sf.inputs.base_directory = data_dir
sf.inputs.subject_id = 'Sub0001'

wf = Workflow(name="DemoWorkflow", base_dir=working_dir)

wf.connect([(sf, node1, [("input", "in_file")]),
            (node1, node2, [("out_file", "in_file")])])

wf.run()
コード例 #22
0
                                  height_threshold=0.005,
                                  extent_fdr_p_threshold=0.1,
                                  height_threshold_type='p-value'),
                    name="level2thresh")
#Infosource - a function free node to iterate over the list of subject names
infosource = Node(util.IdentityInterface(fields=['contrast_id']),
                  name="infosource")
infosource.iterables = [('contrast_id', contrast_list)]

# SelectFiles - to grab the data (alternative to DataGrabber)
templates = {
    'cons': opj('/media/Data/work/datasink/1stLevel/_sub*/',
                '{contrast_id}.nii')
}
selectfiles = Node(SelectFiles(templates,
                               base_directory='/media/Data/work',
                               sort_filelist=True),
                   name="selectfiles")

l2analysis = Workflow(name='spm_l2analysis')
l2analysis.base_dir = opj(data_dir, '/media/Data/work/')

l2analysis.connect([
    (infosource, selectfiles, [
        ('contrast_id', 'contrast_id'),
    ]),
    (selectfiles, onesamplettestdes, [('cons', 'in_files')]),
    (onesamplettestdes, level2estimate, [('spm_mat_file', 'spm_mat_file')]),
    (level2estimate, level2conestimate, [('spm_mat_file', 'spm_mat_file'),
                                         ('beta_images', 'beta_images'),
                                         ('residual_image', 'residual_image')
コード例 #23
0
    def connectome(subject_list, base_directory, out_directory):

        # ==================================================================
        # Loading required packages
        import nipype.pipeline.engine as pe
        import nipype.interfaces.utility as util
        from nipype.interfaces.freesurfer import ApplyVolTransform
        from nipype.interfaces.freesurfer import BBRegister
        import nipype.interfaces.fsl as fsl
        import nipype.interfaces.diffusion_toolkit as dtk
        from nipype.interfaces.utility import Merge
        import numpy as np
        from additional_interfaces import AtlasValues
        from additional_interfaces import AparcStats
        from additional_interfaces import CalcMatrix
        from additional_interfaces import FreeSurferValues
        from additional_interfaces import Tractography
        from additional_pipelines import DWIPreproc
        from additional_pipelines import SubjectSpaceParcellation
        from additional_pipelines import T1Preproc

        from nipype import SelectFiles
        import os

        # ==================================================================
        # Defining the nodes for the workflow

        # Getting the subject ID
        infosource = pe.Node(
            interface=util.IdentityInterface(fields=['subject_id']),
            name='infosource')
        infosource.iterables = ('subject_id', subject_list)

        # Getting the relevant diffusion-weighted data
        templates = dict(T1='{subject_id}/anat/{subject_id}_T1w.nii.gz',
                         dwi='{subject_id}/dwi/{subject_id}_dwi.nii.gz',
                         bvec='{subject_id}/dwi/{subject_id}_dwi.bvec',
                         bval='{subject_id}/dwi/{subject_id}_dwi.bval')

        selectfiles = pe.Node(SelectFiles(templates), name='selectfiles')
        selectfiles.inputs.base_directory = os.path.abspath(base_directory)

        # ==============================================================
        # T1 processing
        t1_preproc = pe.Node(interface=T1Preproc(), name='t1_preproc')
        t1_preproc.inputs.out_directory = out_directory + '/connectome/'
        t1_preproc.inputs.template_directory = template_directory

        # DWI processing
        dwi_preproc = pe.Node(interface=DWIPreproc(), name='dwi_preproc')
        dwi_preproc.inputs.out_directory = out_directory + '/connectome/'
        dwi_preproc.inputs.acqparams = acquisition_parameters
        dwi_preproc.inputs.index_file = index_file
        dwi_preproc.inputs.out_directory = out_directory + '/connectome/'

        # Eroding the brain mask
        erode_mask = pe.Node(interface=fsl.maths.ErodeImage(),
                             name='erode_mask')

        # Reconstruction and tractography
        tractography = pe.Node(interface=Tractography(), name='tractography')
        tractography.iterables = ('model', ['CSA', 'CSD'])

        # smoothing the tracts
        smooth = pe.Node(interface=dtk.SplineFilter(step_length=0.5),
                         name='smooth')

        # Moving to subject space
        subject_parcellation = pe.Node(interface=SubjectSpaceParcellation(),
                                       name='subject_parcellation')
        subject_parcellation.inputs.source_subject = 'fsaverage'
        subject_parcellation.inputs.source_annot_file = 'aparc'
        subject_parcellation.inputs.out_directory = out_directory + '/connectome/'
        subject_parcellation.inputs.parcellation_directory = parcellation_directory

        # Co-registering T1 and dwi
        bbreg = pe.Node(interface=BBRegister(), name='bbreg')
        bbreg.inputs.init = 'fsl'
        bbreg.inputs.contrast_type = 't2'

        applyreg = pe.Node(interface=ApplyVolTransform(), name='applyreg')
        applyreg.inputs.interp = 'nearest'
        applyreg.inputs.inverse = True

        # Merge outputs to pass on to CalcMatrix
        merge = pe.Node(interface=Merge(3), name='merge')

        # calcuating the connectome matrix
        calc_matrix = pe.MapNode(interface=CalcMatrix(),
                                 name='calc_matrix',
                                 iterfield=['scalar_file'])
        calc_matrix.iterables = ('threshold', np.arange(0, 100, 10))

        # Getting values of diffusion measures
        FA_values = pe.Node(interface=AtlasValues(), name='FA_values')
        RD_values = pe.Node(interface=AtlasValues(), name='RD_values')
        AD_values = pe.Node(interface=AtlasValues(), name='AD_values')
        MD_values = pe.Node(interface=AtlasValues(), name='MD_values')

        # Getting additional surface measures
        aparcstats = pe.Node(interface=AparcStats(), name='aparcstats')
        aparcstats.inputs.parcellation_name = 'aparc'

        freesurfer_values = pe.Node(interface=FreeSurferValues(),
                                    name='freesurfer_values')
        freesurfer_values.inputs.parcellation_name = 'aparc'

        # ==================================================================
        # Setting up the workflow
        connectome = pe.Workflow(name='connectome')

        # Reading in files
        connectome.connect(infosource, 'subject_id', selectfiles, 'subject_id')

        # DWI preprocessing
        connectome.connect(infosource, 'subject_id', dwi_preproc, 'subject_id')
        connectome.connect(selectfiles, 'dwi', dwi_preproc, 'dwi')
        connectome.connect(selectfiles, 'bval', dwi_preproc, 'bvals')
        connectome.connect(selectfiles, 'bvec', dwi_preproc, 'bvecs')

        # CSD model and streamline tracking
        connectome.connect(dwi_preproc, 'mask', erode_mask, 'in_file')

        connectome.connect(selectfiles, 'bvec', tractography, 'bvec')
        connectome.connect(selectfiles, 'bval', tractography, 'bval')
        connectome.connect(dwi_preproc, 'dwi', tractography, 'in_file')
        connectome.connect(dwi_preproc, 'FA', tractography, 'FA')
        connectome.connect(erode_mask, 'out_file', tractography, 'brain_mask')

        # Smoothing the trackfile
        connectome.connect(tractography, 'out_track', smooth, 'track_file')

        # Preprocessing the T1-weighted file
        connectome.connect(infosource, 'subject_id', t1_preproc, 'subject_id')
        connectome.connect(selectfiles, 'T1', t1_preproc, 'T1')
        connectome.connect(t1_preproc, 'wm', subject_parcellation, 'wm')
        connectome.connect(t1_preproc, 'subjects_dir', subject_parcellation,
                           'subjects_dir')
        connectome.connect(t1_preproc, 'subject_id', subject_parcellation,
                           'subject_id')

        # Getting the parcellation into diffusion space
        connectome.connect(t1_preproc, 'subject_id', bbreg, 'subject_id')
        connectome.connect(t1_preproc, 'subjects_dir', bbreg, 'subjects_dir')
        connectome.connect(dwi_preproc, 'b0', bbreg, 'source_file')

        connectome.connect(dwi_preproc, 'b0', applyreg, 'source_file')
        connectome.connect(bbreg, 'out_reg_file', applyreg, 'reg_file')
        connectome.connect(subject_parcellation, 'renum_expanded', applyreg,
                           'target_file')

        # Calculating the FA connectome
        connectome.connect(tractography, 'out_file', calc_matrix, 'track_file')
        connectome.connect(dwi_preproc, 'FA', merge, 'in1')
        connectome.connect(dwi_preproc, 'RD', merge, 'in2')
        connectome.connect(tractography, 'GFA', merge, 'in3')
        connectome.connect(merge, 'out', calc_matrix, 'scalar_file')
        connectome.connect(applyreg, 'transformed_file', calc_matrix,
                           'ROI_file')

        # Getting values for additional measures
        connectome.connect(dwi_preproc, 'FA', FA_values, 'morpho_filename')
        connectome.connect(dwi_preproc, 'RD', RD_values, 'morpho_filename')
        connectome.connect(dwi_preproc, 'AD', AD_values, 'morpho_filename')
        connectome.connect(dwi_preproc, 'MD', MD_values, 'morpho_filename')
        connectome.connect(applyreg, 'transformed_file', FA_values,
                           'atlas_filename')
        connectome.connect(applyreg, 'transformed_file', RD_values,
                           'atlas_filename')
        connectome.connect(applyreg, 'transformed_file', AD_values,
                           'atlas_filename')
        connectome.connect(applyreg, 'transformed_file', MD_values,
                           'atlas_filename')

        # Getting FreeSurfer morphological values
        connectome.connect(t1_preproc, 'subject_id', aparcstats, 'subject_id')
        connectome.connect(t1_preproc, 'subjects_dir', aparcstats,
                           'subjects_dir')
        connectome.connect(aparcstats, 'lh_stats', freesurfer_values,
                           'lh_filename')
        connectome.connect(aparcstats, 'rh_stats', freesurfer_values,
                           'rh_filename')

        # ==================================================================
        # Running the workflow
        connectome.base_dir = os.path.abspath(out_directory)
        connectome.write_graph()
        connectome.run()
コード例 #24
0
                                  ('sub-{subject_id}' +
                                   '_task-' + taskName +
                                   '_run-{run_id}' +
                                   '_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz')),
             'events': os.path.join(dataDir,
                                     'sub-{subject_id}',
                                     'func',
                                     ('sub-{subject_id}' +
                                      '_task-' + taskName +
                                      '_run-{run_id}' +
                                      '_events.tsv'))
             }

# Create SelectFiles node
sf = Node(SelectFiles(templates,
                      raise_on_empty=False,
                      sort_filelist=True),
          name='sf')
sf.iterables = [('subject_id', subject_list),
                ('run_id', run_list)]



###########
#
# FMRI PREPROCESSING NODES
#
###########

# smoothing with SUSAN
susan = Node(fsl.SUSAN(brightness_threshold = 2000.0,
コード例 #25
0
                                  ('sub-{subject_id}' +
                                   '_ses-{subsession_id}' +
                                   '_task-' + task_name +
                                   '_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz')),
             'mask': os.path.join(baseDir,
                                  'sub-{subject_id}',
                                  'ses-{subsession_id}',
                                  'func',
                                  ('sub-{subject_id}' +
                                   '_ses-{subsession_id}' +
                                   '_task-' + task_name +
                                   '_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz'))
             }

# Create SelectFiles node
sf = Node(SelectFiles(templates, sort_filelist=True),
          name='sf')
sf.iterables = [('subject_id', subject_list),
                ('subsession_id', session_list)]



###########
#
# FMRI PREPROCESSING NODES
#
###########

# skip dummy scans
extract = Node(fsl.ExtractROI(t_min=nDelfMRI,    # first volumes to be deleted
                              t_size=-1),
コード例 #26
0
      Change work_dir to appropriate folder as needed 
      (ie. 'EpilepsyDatabase/')
"""
########
# Import
########
import getpass  # Import getpass library to pull current user
import nipype.pipeline as pe
from nipype import SelectFiles

# Pathing
user = getpass.getuser()  # Grabs current user name
base_dir = '/home/ROBARTS/' + user + '/Desktop/'
work_dir = '/home/ROBARTS/' + user + '/Desktop/Test/'

############
# DWIGrabber
############
# Template
templates = {
    'dwi': work_dir + "{subject_id}/dti/uncorrected/dwi.nii.gz",
    'bvec': work_dir + "{subject_id}/dti/uncorrected/dwi.bvec",
    'bval': work_dir + "{subject_id}/dti/uncorrected/dwi.bval",
}

# Node
node_dwiSelect = pe.Node(SelectFiles(templates), name="SelectFiles")
node_dwiSelect.base_dir = base_dir
node_dwiSelect.inputs.base_directory = work_dir
node_dwiSelect.inputs.sort_filelist = False
コード例 #27
0
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel, args)

    if args.experiment is None:
        args.experiment = project["default_exp"]

    if args.altmodel:
        exp_name = "-".join([args.experiment, args.altmodel])
    else:
        exp_name = args.experiment

    # Make sure some paths are set properly
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Set roots of output storage
    anal_dir_base = op.join(project["analysis_dir"], exp_name)
    work_dir_base = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])

    # Subject source (no iterables here)
    subject_list = lyman.determine_subjects(args.subjects)
    subj_source = Node(IdentityInterface(fields=["subject_id"]),
                       name="subj_source")
    subj_source.inputs.subject_id = subject_list

    # Set up the regressors and contrasts
    regressors = dict(group_mean=[1] * len(subject_list))
    contrasts = [["group_mean", "T", ["group_mean"], [1]]]

    # Subject level contrast source
    contrast_source = Node(IdentityInterface(fields=["l1_contrast"]),
                           iterables=("l1_contrast", exp["contrast_names"]),
                           name="contrast_source")

    # Group workflow
    space = args.regspace
    wf_name = "_".join([space, args.output])
    if space == "mni":
        mfx, mfx_input, mfx_output = wf.create_volume_mixedfx_workflow(
            wf_name, subject_list, regressors, contrasts, exp)
    else:
        mfx, mfx_input, mfx_output = wf.create_surface_ols_workflow(
            wf_name, subject_list, exp)

    # Mixed effects inputs
    ffxspace = "mni" if space == "mni" else "epi"
    ffxsmooth = "unsmoothed" if args.unsmoothed else "smoothed"
    mfx_base = op.join("{subject_id}/ffx/%s/%s/{l1_contrast}" %
                       (ffxspace, ffxsmooth))
    templates = dict(copes=op.join(mfx_base, "cope1.nii.gz"))
    if space == "mni":
        templates.update(
            dict(varcopes=op.join(mfx_base, "varcope1.nii.gz"),
                 dofs=op.join(mfx_base, "tdof_t1.nii.gz")))
    else:
        templates.update(
            dict(reg_file=op.join(anal_dir_base, "{subject_id}/reg/epi/",
                                  ffxsmooth, "run_1/func2anat_tkreg.dat")))

    # Workflow source node
    mfx_source = MapNode(
        SelectFiles(templates,
                    base_directory=anal_dir_base,
                    sort_filelist=True), "subject_id", "mfx_source")

    # Workflow input connections
    mfx.connect([
        (contrast_source, mfx_source, [("l1_contrast", "l1_contrast")]),
        (contrast_source, mfx_input, [("l1_contrast", "l1_contrast")]),
        (subj_source, mfx_source, [("subject_id", "subject_id")]),
        (mfx_source, mfx_input, [("copes", "copes")])
    ]),
    if space == "mni":
        mfx.connect([
            (mfx_source, mfx_input, [("varcopes", "varcopes"),
                                     ("dofs", "dofs")]),
        ])
    else:
        mfx.connect([(mfx_source, mfx_input, [("reg_file", "reg_file")]),
                     (subj_source, mfx_input, [("subject_id", "subject_id")])])

    # Mixed effects outputs
    mfx_sink = Node(DataSink(base_directory="/".join(
        [anal_dir_base, args.output, space]),
                             substitutions=[("/stats", "/"), ("/_hemi_", "/"),
                                            ("_glm_results", "")],
                             parameterization=True),
                    name="mfx_sink")

    mfx_outwrap = tools.OutputWrapper(mfx, subj_source, mfx_sink, mfx_output)
    mfx_outwrap.sink_outputs()
    mfx_outwrap.set_mapnode_substitutions(1)
    mfx_outwrap.add_regexp_substitutions([(r"_l1_contrast_[-\w]*/", "/"),
                                          (r"_mni_hemi_[lr]h", "")])
    mfx.connect(contrast_source, "l1_contrast", mfx_sink, "container")

    # Set a few last things
    mfx.base_dir = work_dir_base

    # Execute
    lyman.run_workflow(mfx, args=args)

    # Clean up
    if project["rm_working_dir"]:
        shutil.rmtree(project["working_dir"])
コード例 #28
0
    def preprocessing_for_Melodic(subject_list, base_directory, out_directory):

        from BrainTypes_additional_interfaces import DipyDenoiseT1
        import nipype.interfaces.fsl as fsl
        import nipype.interfaces.io as nio
        import nipype.pipeline.engine as pe
        import nipype.interfaces.utility as util
        from nipype import SelectFiles
        import os

        # ==================================================================
        # Defining the nodes for the workflow

        # Getting the subject ID
        infosource = pe.Node(
            interface=util.IdentityInterface(fields=['subject_id']),
            name='infosource')
        infosource.iterables = ('subject_id', subject_list)

        # Getting the relevant diffusion-weighted data
        templates = dict(
            T1='{subject_id}/anat/{subject_id}_T1w.nii.gz',
            func='{subject_id}/func/{subject_id}_task-rest.nii.gz')

        selectfiles = pe.Node(interface=SelectFiles(templates),
                              name='selectfiles')
        selectfiles.inputs.base_directory = os.path.abspath(base_directory)

        ### Preprocessing of the structural images
        # Getting a better field of view
        robustfov = pe.Node(interface=fsl.RobustFOV(), name='robustfov')

        # Denoising
        T1_denoise = pe.Node(interface=DipyDenoiseT1(), name='T1_denoise')

        # Brain extraction
        brainextraction = pe.Node(interface=fsl.BET(), name='brainextraction')

        ### Running motion correction on the functional images
        mcflirt = pe.Node(interface=fsl.MCFLIRT(), name='mcflirt')
        mcflirt.inputs.cost = 'mutualinfo'
        mcflirt.inputs.save_plots = True
        mcflirt.inputs.save_rms = True

        ### Getting the motion parameters
        motion_outliers = pe.Node(interface=fsl.utils.MotionOutliers(),
                                  name='motion_outliers')
        motion_outliers.inputs.metric = 'fd'

        ### Creating the folder structure
        datasink = pe.Node(interface=nio.DataSink(), name='sinker')
        datasink.inputs.base_directory = out_directory + '/preprocessed/'
        datasink.inputs.container = out_directory + '/preprocessed/'
        datasink.inputs.substitutions = [('_subject_id_', '')]

        functional_rename = pe.Node(interface=util.Rename(
            format_string="%(subject_id)s_functional.nii.gz"),
                                    name='functional_rename')
        structural_rename = pe.Node(interface=util.Rename(
            format_string="%(subject_id)s_structural.nii.gz"),
                                    name='structural_rename')
        brain_rename = pe.Node(interface=util.Rename(
            format_string="%(subject_id)s_structural_brain.nii.gz"),
                               name='brain_rename')

        # ==================================================================
        # Connecting the pipeline
        MelodicPreproc = pe.Workflow(name='MelodicPreproc')

        MelodicPreproc.connect(infosource, 'subject_id', selectfiles,
                               'subject_id')

        # T1 preprocessing
        MelodicPreproc.connect(selectfiles, 'T1', robustfov, 'in_file')
        MelodicPreproc.connect(robustfov, 'out_roi', T1_denoise, 'in_file')
        MelodicPreproc.connect(T1_denoise, 'out_file', structural_rename,
                               'in_file')
        MelodicPreproc.connect(infosource, 'subject_id', structural_rename,
                               'subject_id')

        MelodicPreproc.connect(T1_denoise, 'out_file', brainextraction,
                               'in_file')
        MelodicPreproc.connect(brainextraction, 'out_file', brain_rename,
                               'in_file')
        MelodicPreproc.connect(infosource, 'subject_id', brain_rename,
                               'subject_id')

        # Functional preprocessing
        MelodicPreproc.connect(selectfiles, 'func', mcflirt, 'in_file')
        MelodicPreproc.connect(mcflirt, 'out_file', functional_rename,
                               'in_file')
        MelodicPreproc.connect(infosource, 'subject_id', functional_rename,
                               'subject_id')

        # Getting the motion parameters
        MelodicPreproc.connect(selectfiles, 'func', motion_outliers, 'in_file')

        # Moving everything to a folder structure that is compatible with FSL Melodic
        MelodicPreproc.connect(functional_rename, 'out_file', datasink,
                               '@functional')
        MelodicPreproc.connect(structural_rename, 'out_file', datasink,
                               '@structural')
        MelodicPreproc.connect(brain_rename, 'out_file', datasink,
                               '@brain_structural')

        # ==================================================================
        # Running the workflow
        MelodicPreproc.base_dir = os.path.abspath(out_directory)
        MelodicPreproc.write_graph()
        MelodicPreproc.run()
コード例 #29
0
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel)

    if args.experiment is None:
        args.experiment = project["default_exp"]

    if args.altmodel:
        exp_name = "-".join([args.experiment, args.altmodel])
    else:
        exp_name = args.experiment

    # Make sure some paths are set properly
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Set roots of output storage
    anal_dir_base = op.join(project["analysis_dir"], exp_name)
    work_dir_base = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])

    ### Set up group info
    ## Regular design
    group_info = pd.read_csv(group_filepath)

    # Subject source (no iterables here)
    subject_list = lyman.determine_subjects(args.subjects)
    # Additional code (deletion caught by Dan dillon)
    subj_source = Node(IdentityInterface(fields=["subject_id"]),
                       name="subj_source")
    subj_source.inputs.subject_id = subject_list

    print(group_info)
    print(subject_list)

    groups = [
        group_info[group_info.subid == x].reset_index().at[0, 'group']
        for x in subject_list
    ]
    group_vector = [1 if sub == "group1" else 2
                    for sub in groups]  # 1 for group1, 2 for group2

    # Set up the regressors and contrasts
    regressors = dict(group1_mean=[int(sub == 'group1') for sub in groups],
                      group2_mean=[int(sub == 'group2') for sub in groups])
    print(regressors)

    # DECIDE WHICH CONTRAST YOU WANT HERE:
    contrasts = [[
        contrast_name, "T", ["group1_mean", "group2_mean"], contrast_vals
    ]]

    print('Using this contrast:')
    print(contrast_name)
    print(contrast_vals)

    # Subject level contrast source
    contrast_source = Node(IdentityInterface(fields=["l1_contrast"]),
                           iterables=("l1_contrast", exp["contrast_names"]),
                           name="contrast_source")

    # Group workflow
    space = args.regspace
    wf_name = "_".join([space, args.output])
    if space == "mni":
        mfx, mfx_input, mfx_output = wf.create_volume_mixedfx_workflow_groups(
            wf_name, subject_list, regressors, contrasts, exp, group_vector)
    else:
        mfx, mfx_input, mfx_output = wf.create_surface_ols_workflow(
            wf_name, subject_list, exp)

    # Mixed effects inputs
    ffxspace = "mni" if space == "mni" else "epi"
    ffxsmooth = "unsmoothed" if args.unsmoothed else "smoothed"
    mfx_base = op.join("{subject_id}/ffx/%s/%s/{l1_contrast}" %
                       (ffxspace, ffxsmooth))
    templates = dict(copes=op.join(mfx_base, "cope1.nii.gz"))
    if space == "mni":
        templates.update(
            dict(varcopes=op.join(mfx_base, "varcope1.nii.gz"),
                 dofs=op.join(mfx_base, "tdof_t1.nii.gz")))
    else:
        templates.update(
            dict(reg_file=op.join(anal_dir_base, "{subject_id}/preproc/run_1",
                                  "func2anat_tkreg.dat")))

    # Workflow source node
    mfx_source = MapNode(
        SelectFiles(templates,
                    base_directory=anal_dir_base,
                    sort_filelist=True), "subject_id", "mfx_source")

    # Workflow input connections
    mfx.connect([
        (contrast_source, mfx_source, [("l1_contrast", "l1_contrast")]),
        (contrast_source, mfx_input, [("l1_contrast", "l1_contrast")]),
        (subj_source, mfx_source, [("subject_id", "subject_id")]),
        (mfx_source, mfx_input, [("copes", "copes")])
    ]),
    if space == "mni":
        mfx.connect([
            (mfx_source, mfx_input, [("varcopes", "varcopes"),
                                     ("dofs", "dofs")]),
        ])
    else:
        mfx.connect([(mfx_source, mfx_input, [("reg_file", "reg_file")]),
                     (subj_source, mfx_input, [("subject_id", "subject_id")])])

    # Mixed effects outputs
    mfx_sink = Node(DataSink(base_directory="/".join(
        [anal_dir_base, args.output, space]),
                             substitutions=[("/stats", "/"), ("/_hemi_", "/"),
                                            ("_glm_results", "")],
                             parameterization=True),
                    name="mfx_sink")

    mfx_outwrap = tools.OutputWrapper(mfx, subj_source, mfx_sink, mfx_output)
    mfx_outwrap.sink_outputs()
    mfx_outwrap.set_mapnode_substitutions(1)
    mfx_outwrap.add_regexp_substitutions([(r"_l1_contrast_[-\w]*/", "/"),
                                          (r"_mni_hemi_[lr]h", "")])
    mfx.connect(contrast_source, "l1_contrast", mfx_sink, "container")

    # Set a few last things
    mfx.base_dir = work_dir_base

    # Execute
    lyman.run_workflow(mfx, args=args)

    # Clean up
    if project["rm_working_dir"]:
        shutil.rmtree(project["working_dir"])
コード例 #30
0
    def whole_brain_tractography(subject_list, base_directory, out_directory):

        # ==================================================================
        # Loading required packages
        import nipype.pipeline.engine as pe
        import nipype.interfaces.utility as util
        from nipype.interfaces.freesurfer import ApplyVolTransform
        from nipype.interfaces.freesurfer import BBRegister
        import nipype.interfaces.fsl as fsl
        import nipype.interfaces.dipy as dipy
        import nipype.interfaces.diffusion_toolkit as dtk
        import nipype.algorithms.misc as misc
        from nipype.interfaces.utility import Merge
        from BrainTypes_additional_interfaces import Tractography
        from BrainTypes_additional_interfaces import DipyDenoise
        from BrainTypes_additional_pipelines import DWIPreproc
        from BrainTypes_additional_interfaces import CalcMatrix
        from BrainTypes_additional_pipelines import T1Preproc
        from BrainTypes_additional_pipelines import SubjectSpaceParcellation
        from BrainTypes_additional_interfaces import Extractb0 as extract_b0

        from nipype import SelectFiles
        import os

        # ==================================================================
        # Defining the nodes for the workflow

        # Getting the subject ID
        infosource = pe.Node(
            interface=util.IdentityInterface(fields=['subject_id']),
            name='infosource')
        infosource.iterables = ('subject_id', subject_list)

        # Getting the relevant diffusion-weighted data
        templates = dict(T1='{subject_id}/anat/{subject_id}_T1w.nii.gz',
                         dwi='{subject_id}/dwi/{subject_id}_dwi.nii.gz',
                         bvec='{subject_id}/dwi/{subject_id}_dwi.bvec',
                         bval='{subject_id}/dwi/{subject_id}_dwi.bval')

        selectfiles = pe.Node(SelectFiles(templates), name='selectfiles')
        selectfiles.inputs.base_directory = os.path.abspath(base_directory)

        # ==============================================================
        # T1 processing
        t1_preproc = pe.Node(interface=T1Preproc(), name='t1_preproc')
        t1_preproc.inputs.out_directory = out_directory + '/whole_brain_tractography/'
        t1_preproc.inputs.template_directory = template_directory

        # DWI processing
        dwi_preproc = pe.Node(interface=DWIPreproc(), name='dwi_preproc')
        dwi_preproc.inputs.out_directory = out_directory + '/whole_brain_tractography/'
        dwi_preproc.inputs.acqparams = acquisition_parameters
        dwi_preproc.inputs.index_file = index_file
        dwi_preproc.inputs.out_directory = out_directory + '/whole_brain_tractography/'

        # Eroding the brain mask
        erode_mask = pe.Node(interface=fsl.maths.ErodeImage(),
                             name='erode_mask')

        # Reconstruction and tractography
        tractography = pe.Node(interface=Tractography(), name='tractography')
        tractography.iterables = ('model', ['CSA', 'CSD'])

        # smoothing the tracts
        smooth = pe.Node(interface=dtk.SplineFilter(step_length=0.5),
                         name='smooth')

        # Moving to subject space
        subject_parcellation = pe.Node(interface=SubjectSpaceParcellation(),
                                       name='subject_parcellation')
        subject_parcellation.inputs.source_subject = 'fsaverage'
        subject_parcellation.inputs.source_annot_file = 'aparc'
        subject_parcellation.inputs.out_directory = out_directory + '/connectome/'
        subject_parcellation.inputs.parcellation_directory = parcellation_directory

        # Co-registering T1 and dwi
        bbreg = pe.Node(interface=BBRegister(), name='bbreg')
        bbreg.inputs.init = 'fsl'
        bbreg.inputs.contrast_type = 't2'

        applyreg = pe.Node(interface=ApplyVolTransform(), name='applyreg')
        applyreg.inputs.interp = 'nearest'
        applyreg.inputs.inverse = True

        # ==================================================================
        # Setting up the workflow
        whole_brain_tractography = pe.Workflow(name='whole_brain_tractography')

        # Reading in files
        whole_brain_tractography.connect(infosource, 'subject_id', selectfiles,
                                         'subject_id')

        # DWI preprocessing
        whole_brain_tractography.connect(infosource, 'subject_id', dwi_preproc,
                                         'subject_id')
        whole_brain_tractography.connect(selectfiles, 'dwi', dwi_preproc,
                                         'dwi')
        whole_brain_tractography.connect(selectfiles, 'bval', dwi_preproc,
                                         'bvals')
        whole_brain_tractography.connect(selectfiles, 'bvec', dwi_preproc,
                                         'bvecs')

        # CSD model and streamline tracking
        whole_brain_tractography.connect(dwi_preproc, 'mask', erode_mask,
                                         'in_file')

        whole_brain_tractography.connect(selectfiles, 'bvec', tractography,
                                         'bvec')
        whole_brain_tractography.connect(selectfiles, 'bval', tractography,
                                         'bval')
        whole_brain_tractography.connect(dwi_preproc, 'dwi', tractography,
                                         'in_file')
        whole_brain_tractography.connect(dwi_preproc, 'FA', tractography, 'FA')
        whole_brain_tractography.connect(erode_mask, 'out_file', tractography,
                                         'brain_mask')

        # Smoothing the trackfile
        whole_brain_tractography.connect(tractography, 'out_track', smooth,
                                         'track_file')

        # Preprocessing the T1-weighted file
        whole_brain_tractography.connect(infosource, 'subject_id', t1_preproc,
                                         'subject_id')
        whole_brain_tractography.connect(selectfiles, 'T1', t1_preproc, 'T1')
        whole_brain_tractography.connect(t1_preproc, 'wm',
                                         subject_parcellation, 'wm')
        whole_brain_tractography.connect(t1_preproc, 'subjects_dir',
                                         subject_parcellation, 'subjects_dir')
        whole_brain_tractography.connect(t1_preproc, 'subject_id',
                                         subject_parcellation, 'subject_id')

        # Getting the parcellation into diffusion space
        whole_brain_tractography.connect(t1_preproc, 'subject_id', bbreg,
                                         'subject_id')
        whole_brain_tractography.connect(t1_preproc, 'subjects_dir', bbreg,
                                         'subjects_dir')
        whole_brain_tractography.connect(dwi_preproc, 'b0', bbreg,
                                         'source_file')

        whole_brain_tractography.connect(dwi_preproc, 'b0', applyreg,
                                         'source_file')
        whole_brain_tractography.connect(bbreg, 'out_reg_file', applyreg,
                                         'reg_file')
        whole_brain_tractography.connect(subject_parcellation,
                                         'renum_expanded', applyreg,
                                         'target_file')

        # ==================================================================
        # Running the workflow
        whole_brain_tractography.base_dir = os.path.abspath(out_directory)
        whole_brain_tractography.write_graph()
        whole_brain_tractography.run()