コード例 #1
0
def vol2png(qcname, tag="", overlay=True, overlayiterated=True):
    import PUMI.func_preproc.Onevol as onevol

    QCDir = os.path.abspath(globals._SinkDir_ + "/" + globals._QCDir_)
    if not os.path.exists(QCDir):
        os.makedirs(QCDir)

    if tag:
        tag = "_" + tag

    inputspec = pe.Node(
        utility.IdentityInterface(fields=['bg_image', 'overlay_image']),
        name='inputspec')

    analysisflow = pe.Workflow(name=qcname + tag + '_qc')

    myonevol_bg = onevol.onevol_workflow(wf_name="onebg")
    analysisflow.connect(inputspec, 'bg_image', myonevol_bg, 'inputspec.func')

    if overlay and not overlayiterated:
        #myonevol_ol = onevol.onevol_workflow(wf_name="oneol")
        #analysisflow.connect(inputspec, 'overlay_image', myonevol_ol, 'inputspec.func')
        slicer = pe.MapNode(interface=fsl.Slicer(),
                            iterfield=['in_file'],
                            name='slicer')

    # Create png images for quality check
    if overlay and overlayiterated:
        myonevol_ol = onevol.onevol_workflow(wf_name="oneol")
        analysisflow.connect(inputspec, 'overlay_image', myonevol_ol,
                             'inputspec.func')
        slicer = pe.MapNode(interface=fsl.Slicer(),
                            iterfield=['in_file', 'image_edges'],
                            name='slicer')
    if not overlay:
        slicer = pe.MapNode(interface=fsl.Slicer(),
                            iterfield=['in_file'],
                            name='slicer')

    slicer.inputs.image_width = 2000
    slicer.inputs.out_file = qcname
    # set output all axial slices into one picture
    slicer.inputs.sample_axial = 5
    #slicer.inputs.middle_slices = True

    # Save outputs which are important
    ds_qc = pe.Node(interface=io.DataSink(), name='ds_qc')
    ds_qc.inputs.base_directory = QCDir
    ds_qc.inputs.regexp_substitutions = [("(\/)[^\/]*$", tag + ".ppm")]

    analysisflow.connect(myonevol_bg, 'outputspec.func1vol', slicer, 'in_file')
    if overlay and not overlayiterated:
        analysisflow.connect(inputspec, 'overlay_image', slicer, 'image_edges')
    if overlay and overlayiterated:
        analysisflow.connect(myonevol_ol, 'outputspec.func1vol', slicer,
                             'image_edges')
    analysisflow.connect(slicer, 'out_file', ds_qc, qcname)

    return analysisflow
コード例 #2
0
def create_activation_pics(sbref_brain, thresh_zstat1, thresh_zstat2,
                           thresh_zfstat1):
    import nipype.interfaces.fsl as fsl

    Overlay_t1_Contrast = fsl.Overlay()
    Overlay_t1_Contrast.inputs.background_image = sbref_brain
    Overlay_t1_Contrast.inputs.stat_image = thresh_zstat1
    Overlay_t1_Contrast.inputs.auto_thresh_bg = True
    Overlay_t1_Contrast.inputs.stat_thresh = (2.300302, 12)
    Overlay_t1_Contrast.inputs.transparency = True
    Overlay_t1_Contrast.inputs.out_file = 'rendered_thresh_zstat1.nii.gz'

    Overlay_t1_Contrast.run()

    Slicer_t1_Contrast = fsl.Slicer()
    Slicer_t1_Contrast.inputs.in_file = 'rendered_thresh_zstat1.nii.gz'
    Slicer_t1_Contrast.inputs.all_axial = True
    Slicer_t1_Contrast.inputs.image_width = 750
    Slicer_t1_Contrast.inputs.out_file = 'rendered_thresh_zstat1.png'

    Slicer_t1_Contrast.run()
    #===============================================================================

    Overlay_t2_Contrast = fsl.Overlay()
    Overlay_t2_Contrast.inputs.background_image = sbref_brain
    Overlay_t2_Contrast.inputs.stat_image = thresh_zstat1
    Overlay_t2_Contrast.inputs.auto_thresh_bg = True
    Overlay_t2_Contrast.inputs.stat_thresh = (2.300302, 12)
    Overlay_t2_Contrast.inputs.transparency = True
    Overlay_t2_Contrast.inputs.out_file = 'rendered_thresh_zstat2.nii.gz'

    Overlay_t2_Contrast.run()

    Slicer_t2_Contrast = fsl.Slicer()
    Slicer_t2_Contrast.inputs.in_file = 'rendered_thresh_zstat2.nii.gz'
    Slicer_t2_Contrast.inputs.all_axial = True
    Slicer_t2_Contrast.inputs.image_width = 750
    Slicer_t2_Contrast.inputs.out_file = 'rendered_thresh_zstat2.png'

    Slicer_t2_Contrast.run()
    #===============================================================================

    Overlay_f_Contrast = fsl.Overlay()
    Overlay_f_Contrast.inputs.background_image = sbref_brain
    Overlay_f_Contrast.inputs.stat_image = thresh_zstat1
    Overlay_f_Contrast.inputs.auto_thresh_bg = True
    Overlay_f_Contrast.inputs.stat_thresh = (2.300302, 12)
    Overlay_f_Contrast.inputs.transparency = True
    Overlay_f_Contrast.inputs.out_file = 'rendered_thresh_zfstat1.nii.gz'

    Overlay_f_Contrast.run()

    Slicer_f_Contrast = fsl.Slicer()
    Slicer_f_Contrast.inputs.in_file = 'rendered_thresh_zfstat1.nii.gz'
    Slicer_f_Contrast.inputs.all_axial = True
    Slicer_f_Contrast.inputs.image_width = 750
    Slicer_f_Contrast.inputs.out_file = 'rendered_thresh_zfstat1.png'

    Slicer_f_Contrast.run()
コード例 #3
0
ファイル: xnat_bet.py プロジェクト: PennBBL/utils
def slice_bet(tmpdir, niftiname):
    slice = fsl.Slicer()
    slice.inputs.in_file = niftiname
    slice.inputs.args = '-s 1 -x 0.4 ' + tmpdir + '/1.png -x 0.5 ' + tmpdir + '/2.png -x 0.6 ' + tmpdir + '/3.png -y 0.4 ' + tmpdir + '/4.png -y 0.5 ' + tmpdir + '/5.png -y 0.6 ' + tmpdir + '/6.png -z 0.4 ' + tmpdir + '/7.png -z 0.5 ' + tmpdir + '/8.png -z 0.6'
    slice.inputs.out_file = tmpdir + '/9.png'
    res = slice.run()
    print "Sliced"
コード例 #4
0
def create_tbss_1_preproc(name='tbss_1_preproc'):
    """Preprocess FA data for TBSS: erodes a little and zero end slicers and 
    creates masks(for use in FLIRT & FNIRT from FSL).
    A pipeline that does the same as tbss_1_preproc script in FSL
    
    Example
    --------
    
    >>>tbss1 = tbss.create_tbss_1_preproc(name='tbss1')
    >>>tbss1.run()
    
    Inputs::
    
        inputnode.fa_list
    
    Outputs::
    
        outputnode.fa_list
        outputnode.mask_list

    """
    
    # Define the inputnode
    inputnode = pe.Node(interface = util.IdentityInterface(fields=["fa_list"]),
                        name="inputnode")

    # Prep the FA images
    prepfa = pe.MapNode(fsl.ImageMaths(suffix="_prep"), 
                                    name="prepfa",
                                    iterfield=['in_file','op_string'])
    
    # Slicer
    slicer = pe.MapNode(fsl.Slicer(all_axial = True, image_width=1280),
                        name='slicer', 
                        iterfield=['in_file'])
    
    # Create a mask
    getmask = pe.MapNode(fsl.ImageMaths(op_string="-bin", suffix="_mask"),
                        name="getmask",
                        iterfield=['in_file'])
    
    # Define the tbss1 workflow
    tbss1 = pe.Workflow(name="tbss1")
    tbss1.connect([
        (inputnode, prepfa, [("fa_list", "in_file")]),
        (inputnode, prepfa, [(("fa_list", tbss1_op_string), "op_string")]),
        (prepfa, getmask, [("out_file", "in_file")]),
        (prepfa, slicer,[('out_file', 'in_file')]),
        ])
    
    # Define the outputnode
    outputnode = pe.Node(interface = util.IdentityInterface(fields=["fa_list",
                                                                "mask_list"]), 
                        name="outputnode")
    tbss1.connect([
                (prepfa, outputnode, [("out_file", "fa_list")]),
                (getmask, outputnode, [("out_file","mask_list")]),
                ])
    return tbss1
コード例 #5
0
def slice_it_up(input, input_type, logpath, outfile, arguments):
    ####INVOKE SLICER FOR QA PURPOSES
    slice = fsl.Slicer()
    slice.inputs.in_file = input
    if input_type == 'mprage_nifti':
        slice.inputs.image_width = 1200
        slice.inputs.sample_axial = 7
        slice.inputs.out_file = outfile
        res = slice.run()
        add_to_log(logpath, "Sliced at : " + outfile)
    elif input_type == 'nifti':
        slice.inputs.image_width = 800
        slice.inputs.all_axial = True
        slice.inputs.out_file = outfile
        res = slice.run()
        add_to_log(logpath, "Sliced at : " + outfile)
コード例 #6
0
def create_overlay_workflow(name='overlay'):
    """Setup overlay workflow
    """

    overlay = pe.Workflow(name='overlay')
    overlaystats = pe.MapNode(interface=fsl.Overlay(), name="overlaystats",
                              iterfield=['stat_image'])
    overlaystats.inputs.show_negative_stats = True
    overlaystats.inputs.auto_thresh_bg = True

    slicestats = pe.MapNode(interface=fsl.Slicer(),
                            name="slicestats",
                            iterfield=['in_file'])
    slicestats.inputs.all_axial = True
    slicestats.inputs.image_width = 512

    overlay.connect(overlaystats, 'out_file', slicestats, 'in_file')
    return overlay
コード例 #7
0
def create_visualize_pipeline(name='visualize'):

    # initiate workflow
    visualize = Workflow(name='visualize')
    # inputnode
    inputnode = Node(
        util.IdentityInterface(fields=['ts_transformed', 'mni_template']),
        name='inputnode')
    # outputnode
    outputnode = Node(util.IdentityInterface(fields=['output_image']),
                      name='outputnode')

    #apply smoothing
    slicer = Node(fsl.Slicer(sample_axial=6, image_width=750), name='smooth')

    visualize.connect([(inputnode, slicer, [('ts_transformed', 'in_file'),
                                            ('mni_template', 'image_edges')]),
                       (slicer, outputnode, [('out_file', 'output_image')])])

    return visualize
コード例 #8
0
groupmodelfitcz.inputs.inputspec.regressors = _REGRESSORS_
groupmodelfitcz.inputs.inputspec.contrasts = _CONTRASTS_
groupmodelfitcz.inputs.inputspec.groups = _GROUPS_

pipe.connect(smoothcz, 'outputnode.smoothed_files', groupmodelfitcz,
             'inputspec.copes')
pipe.connect(preproc, 'outputspec.func2anat_mat', groupmodelfitcz,
             'inputspec.func2anat_mat')
pipe.connect(anatproc, 'outputspec.out_warpfield', groupmodelfitcz,
             'inputspec.anat_to_std_warp')

########################################################################################################################
# Nodes for QC

png_bet = pe.MapNode(interface=fsl.Slicer(),
                     name='png_bet',
                     iterfield=['in_file'])
png_bet.inputs.image_width = 1750
png_bet.inputs.all_axial = True
pipe.connect(anatproc, 'outputspec.out_brain', png_bet, 'in_file')

substitutions = [('trait_added', '')]  # bugfix?
regex_subs = [('.*/trait_added', ''), ('mapflow/_qc_bet.*/s', 's'),
              ('/bet/.*.png', '.png')]
qc_bet = pe.MapNode(nio.DataSink(infields=['bet'], parameterization=False),
                    name='qc_bet',
                    iterfield=['container', 'bet'])

qc_bet.inputs.container = _SUBJECTS_
qc_bet.inputs.regexp_substitutions = regex_subs
コード例 #9
0
#==========================================================================================================================================================
#overlay thresh_zstat1

overlay_zstat = Node(fsl.Overlay(), name='overlay')
overlay_zstat.inputs.auto_thresh_bg = True
overlay_zstat.inputs.stat_thresh = (3.1, 10)
overlay_zstat.inputs.transparency = True
overlay_zstat.inputs.out_file = 'rendered_thresh_zstat.nii.gz'
overlay_zstat.inputs.show_negative_stats = True
overlay_zstat.inputs.background_image = template_brain

#==========================================================================================================================================================
#generate pics thresh_zstat1

slicer_zstat = Node(fsl.Slicer(), name='slicer')
slicer_zstat.inputs.sample_axial = 2
slicer_zstat.inputs.image_width = 2000
slicer_zstat.inputs.out_file = 'rendered_thresh_zstat.png'

proc_3rd_level.connect([
    (infosource, selectfiles, [('frequencies', 'frequencies'),
                               ('zstats', 'zstats')]),
    (selectfiles, smooth_est, [('zstat', 'zstat')]),
    (
        selectfiles, cluster_zstats, [('zstat', 'zstat')]
    ),  #I need the original file to get the number and then i mask it inside the function
    (smooth_est, cluster_zstats, [('volume', 'volume'), ('dlh', 'dlh')]),
    (cluster_zstats, apply_thresh, [('threshold_file', 'in_file')]),
    (cluster_zstats, overlay_zstat, [('threshold_file', 'stat_image')]),
    (overlay_zstat, slicer_zstat, [('out_file', 'in_file')]),
コード例 #10
0
ファイル: easy_thres.py プロジェクト: amrka/ratpype
def overlay(wf_name='overlay', samethres=True):
    """

    samethres: use False in case of voxel-based thresholding, where ptoz must be a mapnode (also in case of TFCE corr)

    - Get the maximum intensity value of the output thresholded image. This used is while rendering the Z statistic image::

        fslstats zstat1_cluster_threshold.nii.gz -R

        arguments
        -R  : output <min intensity> <max intensity>

    - Rendering. For details see `FEAT <http://www.fmrib.ox.ac.uk/fsl/feat5/detail.html#poststats>`_::

        overlay 1 0 MNI152_T1_2mm_brain.nii.gz
               -a zstat1_cluster_threshold.nii.gz
               2.30 15.67
               zstat1_cluster_threshold_overlay.nii.gz

        slicer zstat1_cluster_threshold_overlay.nii.gz
               -L  -A 750
               zstat1_cluster_threshold_overlay.png

      The Z statistic range selected for rendering is automatically calculated by default,
      to run from red (minimum Z statistic after thresholding) to yellow (maximum Z statistic, here
      maximum intensity).
    """
    overl = pe.Workflow(name=wf_name)

    inputnode = pe.Node(
        util.IdentityInterface(fields=['stat_image', 'threshold', 'bg_image']),
        name='inputspec')

    outputnode = pe.Node(
        util.IdentityInterface(fields=['overlay_threshold', 'rendered_image']),
        name='outputspec')

    #max and minimum intensity values
    image_stats = pe.MapNode(interface=fsl.ImageStats(),
                             name='image_stats',
                             iterfield=['in_file'])
    image_stats.inputs.op_string = '-p 100'

    #create tuple of z_threshold and max intensity value of threshold file
    if (samethres):
        create_tuple = pe.MapNode(util.Function(
            input_names=['infile_a', 'infile_b'],
            output_names=['out_file'],
            function=get_tuple),
                                  name='create_tuple',
                                  iterfield=['infile_b'],
                                  nested=True)
    else:
        create_tuple = pe.MapNode(util.Function(
            input_names=['infile_a', 'infile_b'],
            output_names=['out_file'],
            function=get_tuple),
                                  name='create_tuple',
                                  iterfield=['infile_a', 'infile_b'],
                                  nested=True)

    #colour activation overlaying
    overlay = pe.MapNode(
        interface=fsl.Overlay(),
        name='overlay',
        iterfield=['stat_image', 'stat_thresh', 'background_image'])
    overlay.inputs.transparency = True
    overlay.inputs.auto_thresh_bg = True
    overlay.inputs.out_type = 'float'

    #colour rendering
    slicer = pe.MapNode(interface=fsl.Slicer(),
                        name='slicer',
                        iterfield=['in_file'])
    #set max picture width
    slicer.inputs.image_width = 1750
    # set output all axial slices into one picture
    slicer.inputs.all_axial = True

    overl.connect(inputnode, 'stat_image', image_stats, 'in_file')

    overl.connect(image_stats, 'out_stat', create_tuple, 'infile_b')
    overl.connect(inputnode, 'threshold', create_tuple, 'infile_a')

    overl.connect(inputnode, 'stat_image', overlay, 'stat_image')
    overl.connect(create_tuple, 'out_file', overlay, 'stat_thresh')

    overl.connect(inputnode, 'bg_image', overlay, 'background_image')

    overl.connect(overlay, 'out_file', slicer, 'in_file')

    overl.connect(overlay, 'out_file', outputnode, 'overlay_threshold')
    overl.connect(slicer, 'out_file', outputnode, 'rendered_image')

    return overl
コード例 #11
0
def create_preproc_report_wf(report_dir, name="preproc_report"):
    wf = pe.Workflow(name=name)

    inputspec = pe.Node(util.IdentityInterface(fields=[
        'art_detect_plot', 'mean_epi', 'reg_file', 'ribbon', 'fssubjects_dir',
        'tsnr_file', 'report_name', 'subject_id'
    ]),
                        name="inputspec")

    def plot_epi_to_t1_coregistration(epi_file, reg_file, ribbon,
                                      fssubjects_dir):
        import pylab as plt
        from nipy.labs import viz
        import nibabel as nb
        import numpy as np
        import os
        import nipype.interfaces.freesurfer as fs
        anat = nb.load(ribbon).get_data()
        anat[anat > 1] = 1
        anat_affine = nb.load(ribbon).get_affine()
        func = nb.load(epi_file).get_data()
        func_affine = nb.load(epi_file).get_affine()
        fig = plt.figure(figsize=(8, 6), edgecolor='k', facecolor='k')
        slicer = viz.plot_anat(np.asarray(func),
                               np.asarray(func_affine),
                               black_bg=True,
                               cmap=plt.cm.spectral,
                               cut_coords=(-6, 3, 12),
                               figure=fig,
                               axes=[0, .50, 1, .33])
        slicer.contour_map(np.asarray(anat),
                           np.asarray(anat_affine),
                           levels=[.51],
                           colors=[
                               'r',
                           ])
        slicer.title(
            "Mean EPI with cortical surface contour overlay (before registration)",
            size=12,
            color='w',
            alpha=0)

        res = fs.ApplyVolTransform(source_file=epi_file,
                                   reg_file=reg_file,
                                   fs_target=True,
                                   subjects_dir=fssubjects_dir).run()

        func = nb.load(res.outputs.transformed_file).get_data()
        func_affine = nb.load(res.outputs.transformed_file).get_affine()
        slicer = viz.plot_anat(np.asarray(func),
                               np.asarray(func_affine),
                               black_bg=True,
                               cmap=plt.cm.spectral,
                               cut_coords=(-6, 3, 12),
                               figure=fig,
                               axes=[0, 0, 1, .33])
        slicer.contour_map(np.asarray(anat),
                           np.asarray(anat_affine),
                           levels=[.51],
                           colors=[
                               'r',
                           ])
        slicer.title(
            "Mean EPI with cortical surface contour overlay (after registration)",
            size=12,
            color='w',
            alpha=0)
        plt.savefig("reg_plot.png",
                    facecolor=fig.get_facecolor(),
                    edgecolor='none')
        return os.path.abspath("reg_plot.png")

    plot_reg = pe.Node(util.Function(
        function=plot_epi_to_t1_coregistration,
        input_names=['epi_file', 'reg_file', 'ribbon', 'fssubjects_dir'],
        output_names=['plot_file']),
                       name="plot_reg")
    wf.connect(inputspec, "mean_epi", plot_reg, "epi_file")
    wf.connect(inputspec, "reg_file", plot_reg, "reg_file")
    wf.connect(inputspec, "ribbon", plot_reg, "ribbon")
    wf.connect(inputspec, "fssubjects_dir", plot_reg, "fssubjects_dir")

    plot_tsnr = pe.Node(fsl.Slicer(), name="plot_tsnr")
    plot_tsnr.inputs.all_axial = True
    plot_tsnr.inputs.image_width = 600

    wf.connect(inputspec, "tsnr_file", plot_tsnr, "in_file")

    write_report = pe.Node(ReportSink(
        orderfields=["motion parameters", "tSNR", "coregistration"]),
                           name="write_report")
    write_report.inputs.base_directory = report_dir

    def prepend_title(s_id):
        return "Resting state fMRI preprocessing report for " + s_id

    wf.connect(inputspec, ("subject_id", prepend_title), write_report,
               "report_name")
    wf.connect(inputspec, "art_detect_plot", write_report, "motion parameters")
    wf.connect(plot_tsnr, "out_file", write_report, "tSNR")
    wf.connect(plot_reg, "plot_file", write_report, "coregistration")

    return wf
コード例 #12
0
clustering_t.inputs.out_index_file = 'cluster_mask_zstat'
clustering_t.inputs.out_localmax_txt_file = 'lmax_zstat.txt'
clustering_t.inputs.connectivity = 26

# ============================================================================================================================
# In[15]:
# overlay t contrast
overlay_t_contrast = Node(fsl.Overlay(), name='overlay_t_contrast')
overlay_t_contrast.inputs.auto_thresh_bg = True
overlay_t_contrast.inputs.stat_thresh = (2.300302, 5)
overlay_t_contrast.inputs.transparency = True

# ============================================================================================================================
# In[15]:
# slicer
slicer_t_contrast = Node(fsl.Slicer(), name='generate_t_contrast_image')
slicer_t_contrast.inputs.all_axial = True
slicer_t_contrast.inputs.image_width = 500

# ============================================================================================================================
# ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
# ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
# ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
# ============================================================================================================================

mask_zfstat = Node(fsl.ApplyMask(), name='mask_zfstat')
mask_zfstat.inputs.out_file = 'thresh_zfstat.nii.gz'

# ============================================================================================================================
# In[15]:
#==========================================================================================================================================================
#overlay thresh_zstat1

overlay_cope = Node(fsl.Overlay(), name='overlay')
overlay_cope.inputs.auto_thresh_bg = True
overlay_cope.inputs.stat_thresh = (3.1, 10)
overlay_cope.inputs.transparency = True
overlay_cope.inputs.out_file = 'rendered_thresh_zstat.nii.gz'
overlay_cope.inputs.show_negative_stats = True
overlay_cope.inputs.background_image = standard_brain

#==========================================================================================================================================================
#generate pics thresh_zstat1

slicer_cope = Node(fsl.Slicer(), name='slicer')
slicer_cope.inputs.sample_axial = 2
slicer_cope.inputs.image_width = 2000
slicer_cope.inputs.out_file = 'rendered_thresh_zstat.png'

proc_3rd_level.connect([
    (infosource, selectfiles, [('tasks', 'tasks'),
                               ('contrasts', 'contrasts')]),
    (selectfiles, smooth_est, [('contrast', 'contrast')]),
    (selectfiles, mask_zstat, [('contrast', 'in_file')]),
    (mask_zstat, cluster_copes, [('out_file', 'in_file')]),
    (smooth_est, cluster_copes, [('volume', 'volume'), ('dlh', 'dlh')]),

    # (selectfiles, cluster_copes, [('contrast','cope_file')]),
    (cluster_copes, overlay_cope, [('threshold_file', 'stat_image')]),
    (overlay_cope, slicer_cope, [('out_file', 'in_file')]),
コード例 #14
0
def create_confound_removal_workflow(workflow_name="confound_removal"):

    inputnode = pe.Node(util.IdentityInterface(
        fields=["subject_id", "timeseries", "reg_file", "motion_parameters"]),
                        name="inputs")

    # Get the Freesurfer aseg volume from the Subjects Directory
    getaseg = pe.Node(io.FreeSurferSource(subjects_dir=fs.Info.subjectsdir()),
                      name="getaseg")

    # Binarize the Aseg to use as a whole brain mask
    asegmask = pe.Node(fs.Binarize(min=0.5, dilate=2), name="asegmask")

    # Extract and erode a mask of the deep cerebral white matter
    extractwm = pe.Node(fs.Binarize(match=[2, 41], erode=3), name="extractwm")

    # Extract and erode a mask of the ventricles and CSF
    extractcsf = pe.Node(fs.Binarize(match=[4, 5, 14, 15, 24, 31, 43, 44, 63],
                                     erode=1),
                         name="extractcsf")

    # Mean the timeseries across the fourth dimension
    meanfunc = pe.MapNode(fsl.MeanImage(),
                          iterfield=["in_file"],
                          name="meanfunc")

    # Invert the anatomical coregistration and resample the masks
    regwm = pe.MapNode(fs.ApplyVolTransform(inverse=True, interp="nearest"),
                       iterfield=["source_file", "reg_file"],
                       name="regwm")

    regcsf = pe.MapNode(fs.ApplyVolTransform(inverse=True, interp="nearest"),
                        iterfield=["source_file", "reg_file"],
                        name="regcsf")

    regbrain = pe.MapNode(fs.ApplyVolTransform(inverse=True, interp="nearest"),
                          iterfield=["source_file", "reg_file"],
                          name="regbrain")

    # Convert to Nifti for FSL tools
    convertwm = pe.MapNode(fs.MRIConvert(out_type="niigz"),
                           iterfield=["in_file"],
                           name="convertwm")

    convertcsf = pe.MapNode(fs.MRIConvert(out_type="niigz"),
                            iterfield=["in_file"],
                            name="convertcsf")

    convertbrain = pe.MapNode(fs.MRIConvert(out_type="niigz"),
                              iterfield=["in_file"],
                              name="convertbrain")

    # Add the mask images together for a report image
    addconfmasks = pe.MapNode(fsl.ImageMaths(suffix="conf",
                                             op_string="-mul 2 -add",
                                             out_data_type="char"),
                              iterfield=["in_file", "in_file2"],
                              name="addconfmasks")

    # Overlay and slice the confound mask overlaied on mean func for reporting
    confoverlay = pe.MapNode(fsl.Overlay(auto_thresh_bg=True,
                                         stat_thresh=(.7, 2)),
                             iterfield=["background_image", "stat_image"],
                             name="confoverlay")

    confslice = pe.MapNode(fsl.Slicer(image_width=800, label_slices=False),
                           iterfield=["in_file"],
                           name="confslice")
    confslice.inputs.sample_axial = 2

    # Extract the mean signal from white matter and CSF masks
    wmtcourse = pe.MapNode(fs.SegStats(exclude_id=0, avgwf_txt_file=True),
                           iterfield=["segmentation_file", "in_file"],
                           name="wmtcourse")

    csftcourse = pe.MapNode(fs.SegStats(exclude_id=0, avgwf_txt_file=True),
                            iterfield=["segmentation_file", "in_file"],
                            name="csftcourse")

    # Extract the mean signal from over the whole brain
    globaltcourse = pe.MapNode(fs.SegStats(exclude_id=0, avgwf_txt_file=True),
                               iterfield=["segmentation_file", "in_file"],
                               name="globaltcourse")

    # Build the confound design matrix
    conf_inputs = [
        "motion_params", "global_waveform", "wm_waveform", "csf_waveform"
    ]
    confmatrix = pe.MapNode(util.Function(input_names=conf_inputs,
                                          output_names=["confound_matrix"],
                                          function=make_confound_matrix),
                            iterfield=conf_inputs,
                            name="confmatrix")

    # Regress the confounds out of the timeseries
    confregress = pe.MapNode(fsl.FilterRegressor(filter_all=True),
                             iterfield=["in_file", "design_file", "mask"],
                             name="confregress")

    # Rename the confound mask png
    renamepng = pe.MapNode(util.Rename(format_string="confound_sources.png"),
                           iterfield=["in_file"],
                           name="renamepng")

    # Define the outputs
    outputnode = pe.Node(
        util.IdentityInterface(fields=["timeseries", "confound_sources"]),
        name="outputs")

    # Define and connect the confound workflow
    confound = pe.Workflow(name=workflow_name)

    confound.connect([
        (inputnode, meanfunc, [("timeseries", "in_file")]),
        (inputnode, getaseg, [("subject_id", "subject_id")]),
        (getaseg, extractwm, [("aseg", "in_file")]),
        (getaseg, extractcsf, [("aseg", "in_file")]),
        (getaseg, asegmask, [("aseg", "in_file")]),
        (extractwm, regwm, [("binary_file", "target_file")]),
        (extractcsf, regcsf, [("binary_file", "target_file")]),
        (asegmask, regbrain, [("binary_file", "target_file")]),
        (meanfunc, regwm, [("out_file", "source_file")]),
        (meanfunc, regcsf, [("out_file", "source_file")]),
        (meanfunc, regbrain, [("out_file", "source_file")]),
        (inputnode, regwm, [("reg_file", "reg_file")]),
        (inputnode, regcsf, [("reg_file", "reg_file")]),
        (inputnode, regbrain, [("reg_file", "reg_file")]),
        (regwm, convertwm, [("transformed_file", "in_file")]),
        (regcsf, convertcsf, [("transformed_file", "in_file")]),
        (regbrain, convertbrain, [("transformed_file", "in_file")]),
        (convertwm, addconfmasks, [("out_file", "in_file")]),
        (convertcsf, addconfmasks, [("out_file", "in_file2")]),
        (addconfmasks, confoverlay, [("out_file", "stat_image")]),
        (meanfunc, confoverlay, [("out_file", "background_image")]),
        (confoverlay, confslice, [("out_file", "in_file")]),
        (confslice, renamepng, [("out_file", "in_file")]),
        (regwm, wmtcourse, [("transformed_file", "segmentation_file")]),
        (inputnode, wmtcourse, [("timeseries", "in_file")]),
        (regcsf, csftcourse, [("transformed_file", "segmentation_file")]),
        (inputnode, csftcourse, [("timeseries", "in_file")]),
        (regbrain, globaltcourse, [("transformed_file", "segmentation_file")]),
        (inputnode, globaltcourse, [("timeseries", "in_file")]),
        (inputnode, confmatrix, [("motion_parameters", "motion_params")]),
        (wmtcourse, confmatrix, [("avgwf_txt_file", "wm_waveform")]),
        (csftcourse, confmatrix, [("avgwf_txt_file", "csf_waveform")]),
        (globaltcourse, confmatrix, [("avgwf_txt_file", "global_waveform")]),
        (confmatrix, confregress, [("confound_matrix", "design_file")]),
        (inputnode, confregress, [("timeseries", "in_file")]),
        (convertbrain, confregress, [("out_file", "mask")]),
        (confregress, outputnode, [("out_file", "timeseries")]),
        (renamepng, outputnode, [("out_file", "confound_sources")]),
    ])

    return confound
コード例 #15
0
ファイル: fsl_flow.py プロジェクト: jsegawa/Nipype_SEQ03
Setup overlay workflow
----------------------

"""

overlay = pe.Workflow(name='overlay')
overlaystats = pe.MapNode(interface=fsl.Overlay(),
                          name="overlaystats",
                          iterfield=['stat_image'])
overlaystats.inputs.show_negative_stats = True
overlaystats.inputs.auto_thresh_bg = True
"""Use :class:`nipype.interfaces.fsl.Slicer` to create images of the overlaid
statistical volumes for a report of the first-level results.
"""

slicestats = pe.MapNode(interface=fsl.Slicer(),
                        name="slicestats",
                        iterfield=['in_file'])
slicestats.inputs.all_axial = True
slicestats.inputs.image_width = 512

overlay.connect(overlaystats, 'out_file', slicestats, 'in_file')
"""
Set up first-level workflow
---------------------------

"""


def sort_copes(files):
    numelements = len(files[0])
コード例 #16
0
def create_skullstrip_workflow(name="skullstrip"):

    # Define the workflow inputs
    inputnode = pe.Node(util.IdentityInterface(fields=["timeseries"]),
                        name="inputs")

    # Mean the timeseries across the fourth dimension
    meanfunc1 = pe.MapNode(fsl.MeanImage(),
                           iterfield=["in_file"],
                           name="meanfunc1")

    # Skullstrip the mean functional image
    stripmean = pe.MapNode(fsl.BET(mask=True, no_output=True, frac=0.3),
                           iterfield=["in_file"],
                           name="stripmean")

    # Use the mask from skullstripping to strip each timeseries
    maskfunc1 = pe.MapNode(fsl.ApplyMask(),
                           iterfield=["in_file", "mask_file"],
                           name="maskfunc1")

    # Determine the 2nd and 98th percentile intensities of each run
    getthresh = pe.MapNode(fsl.ImageStats(op_string="-p 2 -p 98"),
                           iterfield=["in_file"],
                           name="getthreshold")

    # Threshold functional data at 10% of the 98th percentile
    threshold = pe.MapNode(fsl.ImageMaths(out_data_type="char",
                                          suffix="_thresh"),
                           iterfield=["in_file"],
                           name="threshold")

    # Dilate the mask
    dilatemask = pe.MapNode(fsl.DilateImage(operation="max"),
                            iterfield=["in_file"],
                            name="dilatemask")

    # Mask the runs again with this new mask
    maskfunc2 = pe.MapNode(fsl.ApplyMask(),
                           iterfield=["in_file", "mask_file"],
                           name="maskfunc2")

    # Get a new mean image from each functional run
    meanfunc2 = pe.MapNode(fsl.MeanImage(),
                           iterfield=["in_file"],
                           name="meanfunc2")

    # Slice the mean func for reporting
    meanslice = pe.MapNode(fsl.Slicer(image_width=800, label_slices=False),
                           iterfield=["in_file", "image_edges"],
                           name="meanslice")
    meanslice.inputs.sample_axial = 2

    # Rename the outputs
    meanname = pe.MapNode(util.Rename(format_string="mean_func",
                                      keep_ext=True),
                          iterfield=["in_file"],
                          name="meanname")

    maskname = pe.MapNode(util.Rename(format_string="functional_mask",
                                      keep_ext=True),
                          iterfield=["in_file"],
                          name="maskname")

    pngname = pe.MapNode(util.Rename(format_string="mean_func.png"),
                         iterfield=["in_file"],
                         name="pngname")

    # Define the workflow outputs
    outputnode = pe.Node(util.IdentityInterface(
        fields=["timeseries", "mean_func", "mask_file", "report_png"]),
                         name="outputs")

    # Define and connect the workflow
    skullstrip = pe.Workflow(name=name)

    skullstrip.connect([
        (inputnode, meanfunc1, [("timeseries", "in_file")]),
        (meanfunc1, stripmean, [("out_file", "in_file")]),
        (inputnode, maskfunc1, [("timeseries", "in_file")]),
        (stripmean, maskfunc1, [("mask_file", "mask_file")]),
        (maskfunc1, getthresh, [("out_file", "in_file")]),
        (getthresh, threshold, [(("out_stat", get_thresh_op), "op_string")]),
        (maskfunc1, threshold, [("out_file", "in_file")]),
        (threshold, dilatemask, [("out_file", "in_file")]),
        (inputnode, maskfunc2, [("timeseries", "in_file")]),
        (dilatemask, maskfunc2, [("out_file", "mask_file")]),
        (maskfunc2, meanfunc2, [("out_file", "in_file")]),
        (meanfunc2, meanslice, [("out_file", "in_file")]),
        (dilatemask, meanslice, [("out_file", "image_edges")]),
        (meanslice, pngname, [("out_file", "in_file")]),
        (meanfunc2, meanname, [("out_file", "in_file")]),
        (dilatemask, maskname, [("out_file", "in_file")]),
        (maskfunc2, outputnode, [("out_file", "timeseries")]),
        (pngname, outputnode, [("out_file", "report_png")]),
        (maskname, outputnode, [("out_file", "mask_file")]),
        (meanname, outputnode, [("out_file", "mean_func")]),
    ])

    return skullstrip
コード例 #17
0
ファイル: easy_thresh.py プロジェクト: haipan/C-PAC
def easy_thresh(wf_name):
    """
    Workflow for carrying out cluster-based thresholding 
    and colour activation overlaying
    
    Parameters
    ----------
    wf_name : string 
        Workflow name
        
    Returns
    -------
    easy_thresh : object 
        Easy thresh workflow object
    
    Notes
    -----
    
    `Source <https://github.com/FCP-INDI/C-PAC/blob/master/CPAC/easy_thresh/easy_thresh.py>`_
        
    Workflow Inputs::
    
        inputspec.z_stats : string (nifti file)
            z_score stats output for t or f contrast from flameo
        
        inputspec.merge_mask : string (nifti file)
            mask generated from 4D Merged derivative file
        
        inputspec.z_threshold : float
            Z Statistic threshold value for cluster thresholding. It is used to 
            determine what level of activation would be statistically significant. 
            Increasing this will result in higher estimates of required effect.
        
        inputspec.p_threshold : float
            Probability threshold for cluster thresholding.
        
        inputspec.paramerters : string (tuple)
            tuple containing which MNI and FSLDIR path information
            
    Workflow Outputs::
    
        outputspec.cluster_threshold : string (nifti files)
           the thresholded Z statistic image for each t contrast
        
        outputspec.cluster_index : string (nifti files)
            image of clusters for each t contrast; the values 
            in the clusters are the index numbers as used 
            in the cluster list.
        
        outputspec.overlay_threshold : string (nifti files)
            3D color rendered stats overlay image for t contrast
            After reloading this image, use the Statistics Color 
            Rendering GUI to reload the color look-up-table
        
        outputspec.overlay_rendered_image : string (nifti files)
           2D color rendered stats overlay picture for each t contrast
        
        outputspec.cluster_localmax_txt : string (text files)
            local maxima text file, defines the coordinates of maximum value
            in the cluster
    
    
    Order of commands:
    
    - Estimate smoothness of the image::
        
        smoothest --mask= merge_mask.nii.gz --zstat=.../flameo/stats/zstat1.nii.gz
        
        arguments
        --mask  :  brain mask volume
        --zstat :  filename of zstat/zfstat image
    
    - Create mask. For details see `fslmaths <http://www.fmrib.ox.ac.uk/fslcourse/lectures/practicals/intro/index.htm#fslutils>`_::
        
        fslmaths ../flameo/stats/zstat1.nii.gz 
                 -mas merge_mask.nii.gz 
                 zstat1_mask.nii.gz
        
        arguments
        -mas   : use (following image>0) to mask current image

    - Copy Geometry image dimensions, voxel dimensions, voxel dimensions units string, image orientation/origin or qform/sform info) from one image to another::
    
        fslcpgeom MNI152_T1_2mm_brain.nii.gz zstat1_mask.nii.gz
    
    - Cluster based thresholding. For details see `FEAT <http://www.fmrib.ox.ac.uk/fsl/feat5/detail.html#poststats>`_::
        
        cluster --dlh = 0.0023683100 
                --in = zstat1_mask.nii.gz 
                --oindex = zstat1_cluster_index.nii.gz 
                --olmax = zstat1_cluster_localmax.txt
                --othresh = zstat1_cluster_threshold.nii.gz 
                --pthresh = 0.0500000000 
                --thresh = 2.3000000000 
                --volume = 197071
                
        arguments 
        --in    :    filename of input volume
        --dlh   :    smoothness estimate = sqrt(det(Lambda))
        --oindex  :  filename for output of cluster index
        --othresh :  filename for output of thresholded image
        --olmax   :  filename for output of local maxima text file
        --volume  :  number of voxels in the mask
        --pthresh :  p-threshold for clusters
        --thresh  :  threshold for input volume
        
     Z statistic image is thresholded to show which voxels or clusters of voxels are activated at a particular significance level.
     A Z statistic threshold is used to define contiguous clusters. Then each cluster's estimated significance level (from GRF-theory) 
     is compared with the cluster probability threshold. Significant clusters are then used to mask the original Z statistic image.
    
    - Get the maximum intensity value of the output thresholded image. This used is while rendering the Z statistic image:: 
        
        fslstats zstat1_cluster_threshold.nii.gz -R
        
        arguments
        -R  : output <min intensity> <max intensity>

    - Rendering. For details see `FEAT <http://www.fmrib.ox.ac.uk/fsl/feat5/detail.html#poststats>`_::
         
        overlay 1 0 MNI152_T1_2mm_brain.nii.gz 
               -a zstat1_cluster_threshold.nii.gz 
               2.30 15.67 
               zstat1_cluster_threshold_overlay.nii.gz
               
        slicer zstat1_cluster_threshold_overlay.nii.gz 
               -L  -A 750 
               zstat1_cluster_threshold_overlay.png
    
      The Z statistic range selected for rendering is automatically calculated by default, 
      to run from red (minimum Z statistic after thresholding) to yellow (maximum Z statistic, here 
      maximum intensity).
      
    High Level Workflow Graph:
    
    .. image:: ../images/easy_thresh.dot.png
       :width: 800
    
    
    Detailed Workflow Graph:
    
    .. image:: ../images/easy_thresh_detailed.dot.png
       :width: 800
               
    Examples
    --------
    
    >>> import easy_thresh
    >>> preproc = easy_thresh.easy_thresh("new_workflow")
    >>> preproc.inputs.inputspec.z_stats= 'flameo/stats/zstat1.nii.gz'
    >>> preproc.inputs.inputspec.merge_mask = 'merge_mask/alff_Z_fn2standard_merged_mask.nii.gz'
    >>> preproc.inputs.inputspec.z_threshold = 2.3
    >>> preproc.inputs.inputspec.p_threshold = 0.05
    >>> preproc.inputs.inputspec.parameters = ('/usr/local/fsl/', 'MNI152')
    >>> preporc.run()  -- SKIP doctest
    
    """

    easy_thresh = pe.Workflow(name=wf_name)

    inputnode = pe.Node(util.IdentityInterface(fields=[
        'z_stats', 'merge_mask', 'z_threshold', 'p_threshold', 'parameters'
    ]),
                        name='inputspec')

    outputnode = pe.Node(util.IdentityInterface(fields=[
        'cluster_threshold', 'cluster_index', 'cluster_localmax_txt',
        'overlay_threshold', 'rendered_image'
    ]),
                         name='outputspec')

    ### fsl easythresh
    # estimate image smoothness
    smooth_estimate = pe.MapNode(interface=fsl.SmoothEstimate(),
                                 name='smooth_estimate',
                                 iterfield=['zstat_file'])

    # run clustering after fixing stats header for talspace
    zstat_mask = pe.MapNode(interface=fsl.MultiImageMaths(),
                            name='zstat_mask',
                            iterfield=['in_file'])
    #operations to perform
    #-mas use (following image>0) to mask current image
    zstat_mask.inputs.op_string = '-mas %s'

    #fslcpgeom
    #copy certain parts of the header information (image dimensions,
    #voxel dimensions, voxel dimensions units string, image orientation/origin
    #or qform/sform info) from one image to another
    copy_geometry = pe.MapNode(util.Function(
        input_names=['infile_a', 'infile_b'],
        output_names=['out_file'],
        function=copy_geom),
                               name='copy_geometry',
                               iterfield=['infile_a', 'infile_b'])

    ##cluster-based thresholding
    #After carrying out the initial statistical test, the resulting
    #Z statistic image is then normally thresholded to show which voxels or
    #clusters of voxels are activated at a particular significance level.
    #A Z statistic threshold is used to define contiguous clusters.
    #Then each cluster's estimated significance level (from GRF-theory) is
    #compared with the cluster probability threshold. Significant clusters
    #are then used to mask the original Z statistic image for later production
    #of colour blobs.This method of thresholding is an alternative to
    #Voxel-based correction, and is normally more sensitive to activation.
    #    cluster = pe.MapNode(interface=fsl.Cluster(),
    #                            name='cluster',
    #                            iterfield=['in_file', 'volume', 'dlh'])
    #    #output of cluster index (in size order)
    #    cluster.inputs.out_index_file = True
    #    #thresholded image
    #    cluster.inputs.out_threshold_file = True
    #    #local maxima text file
    #    #defines the cluster cordinates
    #    cluster.inputs.out_localmax_txt_file = True

    cluster = pe.MapNode(util.Function(
        input_names=[
            'in_file', 'volume', 'dlh', 'threshold', 'pthreshold', 'parameters'
        ],
        output_names=['index_file', 'threshold_file', 'localmax_txt_file'],
        function=call_cluster),
                         name='cluster',
                         iterfield=['in_file', 'volume', 'dlh'])

    #max and minimum intensity values
    image_stats = pe.MapNode(interface=fsl.ImageStats(),
                             name='image_stats',
                             iterfield=['in_file'])
    image_stats.inputs.op_string = '-R'

    #create tuple of z_threshold and max intensity value of threshold file
    create_tuple = pe.MapNode(util.Function(
        input_names=['infile_a', 'infile_b'],
        output_names=['out_file'],
        function=get_tuple),
                              name='create_tuple',
                              iterfield=['infile_b'])

    #colour activation overlaying
    overlay = pe.MapNode(interface=fsl.Overlay(),
                         name='overlay',
                         iterfield=['stat_image', 'stat_thresh'])
    overlay.inputs.transparency = True
    overlay.inputs.auto_thresh_bg = True
    overlay.inputs.out_type = 'float'

    #colour rendering
    slicer = pe.MapNode(interface=fsl.Slicer(),
                        name='slicer',
                        iterfield=['in_file'])
    #set max picture width
    slicer.inputs.image_width = 750
    # set output all axial slices into one picture
    slicer.inputs.all_axial = True

    #function mapnode to get the standard fsl brain image
    #based on parameters as FSLDIR,MNI and voxel size
    get_backgroundimage = pe.MapNode(util.Function(
        input_names=['in_file', 'file_parameters'],
        output_names=['out_file'],
        function=get_standard_background_img),
                                     name='get_bckgrndimg1',
                                     iterfield=['in_file'])

    #function node to get the standard fsl brain image
    #outputs single file
    get_backgroundimage2 = pe.Node(util.Function(
        input_names=['in_file', 'file_parameters'],
        output_names=['out_file'],
        function=get_standard_background_img),
                                   name='get_backgrndimg2')

    #connections
    easy_thresh.connect(inputnode, 'z_stats', smooth_estimate, 'zstat_file')
    easy_thresh.connect(inputnode, 'merge_mask', smooth_estimate, 'mask_file')

    easy_thresh.connect(inputnode, 'z_stats', zstat_mask, 'in_file')
    easy_thresh.connect(inputnode, 'merge_mask', zstat_mask, 'operand_files')

    easy_thresh.connect(zstat_mask, 'out_file', get_backgroundimage, 'in_file')
    easy_thresh.connect(inputnode, 'parameters', get_backgroundimage,
                        'file_parameters')

    easy_thresh.connect(get_backgroundimage, 'out_file', copy_geometry,
                        'infile_a')
    easy_thresh.connect(zstat_mask, 'out_file', copy_geometry, 'infile_b')

    easy_thresh.connect(copy_geometry, 'out_file', cluster, 'in_file')
    easy_thresh.connect(inputnode, 'z_threshold', cluster, 'threshold')
    easy_thresh.connect(inputnode, 'p_threshold', cluster, 'pthreshold')
    easy_thresh.connect(smooth_estimate, 'volume', cluster, 'volume')
    easy_thresh.connect(smooth_estimate, 'dlh', cluster, 'dlh')
    easy_thresh.connect(inputnode, 'parameters', cluster, 'parameters')

    easy_thresh.connect(cluster, 'threshold_file', image_stats, 'in_file')

    easy_thresh.connect(image_stats, 'out_stat', create_tuple, 'infile_b')
    easy_thresh.connect(inputnode, 'z_threshold', create_tuple, 'infile_a')

    easy_thresh.connect(cluster, 'threshold_file', overlay, 'stat_image')
    easy_thresh.connect(create_tuple, 'out_file', overlay, 'stat_thresh')

    easy_thresh.connect(inputnode, 'merge_mask', get_backgroundimage2,
                        'in_file')
    easy_thresh.connect(inputnode, 'parameters', get_backgroundimage2,
                        'file_parameters')

    easy_thresh.connect(get_backgroundimage2, 'out_file', overlay,
                        'background_image')

    easy_thresh.connect(overlay, 'out_file', slicer, 'in_file')

    easy_thresh.connect(cluster, 'threshold_file', outputnode,
                        'cluster_threshold')
    easy_thresh.connect(cluster, 'index_file', outputnode, 'cluster_index')
    easy_thresh.connect(cluster, 'localmax_txt_file', outputnode,
                        'cluster_localmax_txt')
    easy_thresh.connect(overlay, 'out_file', outputnode, 'overlay_threshold')
    easy_thresh.connect(slicer, 'out_file', outputnode, 'rendered_image')

    return easy_thresh
コード例 #18
0
"""

selectcontrast = pe.Node(interface=util.Select(), name="selectcontrast")
"""Use :class:`nipype.interfaces.fsl.Overlay` to combine the statistical output of
the contrast estimate and a background image into one volume.
"""

overlaystats = pe.Node(interface=fsl.Overlay(), name="overlaystats")
overlaystats.inputs.stat_thresh = (3, 10)
overlaystats.inputs.show_negative_stats = True
overlaystats.inputs.auto_thresh_bg = True
"""Use :class:`nipype.interfaces.fsl.Slicer` to create images of the overlaid
statistical volumes for a report of the first-level results.
"""

slicestats = pe.Node(interface=fsl.Slicer(), name="slicestats")
slicestats.inputs.all_axial = True
slicestats.inputs.image_width = 750

l1analysis.connect([
    (modelspec, level1design, [('session_info', 'session_info')]),
    (level1design, level1estimate, [('spm_mat_file', 'spm_mat_file')]),
    (level1estimate, contrastestimate, [('spm_mat_file', 'spm_mat_file'),
                                        ('beta_images', 'beta_images'),
                                        ('residual_image', 'residual_image')]),
    (contrastestimate, selectcontrast, [('spmT_images', 'inlist')]),
    (selectcontrast, overlaystats, [('out', 'stat_image')]),
    (overlaystats, slicestats, [('out_file', 'in_file')])
])
"""
Preproc + Analysis pipeline
コード例 #19
0
ファイル: hb-preproc.py プロジェクト: 62442katieb/hb-idconn
bet = pe.Node(fsl.BET(frac=0.1, mask=True),
                 name="bet_func")
bet2 = pe.Node(fsl.BET(frac=0.1),
                 name="bet_struc")
segment = pe.Node(fsl.FAST(out_basename='fast_'),
                     name="fastSeg")
flirting = pe.Node(fsl.FLIRT(cost_func='normmi', dof=7, searchr_x=[-180, 180],
                             searchr_y=[-180, 180], searchr_z=[-180,180]),
                   name="struc_2_func")
applyxfm = pe.MapNode(fsl.ApplyXfm(apply_xfm = True),
                      name="MaskEPI", iterfield=['in_file'])
erosion = pe.MapNode(fsl.ErodeImage(),
                     name="erode_masks", iterfield=['in_file'])
regcheckoverlay = pe.Node(fsl.Overlay(auto_thresh_bg=True, stat_thresh=(100,500)),
                         name='OverlayCoreg')
regcheck = pe.Node(fsl.Slicer(),
                  name='CheckCoreg')
#filterfeeder = pe.MapNode(fsl.ImageMeants(eig=True, ))

datasink = pe.Node(nio.DataSink(),
                   name='datasink')
datasink.inputs.base_directory = "/Users/Katie/Dropbox/Data/habenula/derivatives/hb_test"

# Connect alllllll the nodes!!
hb_test_wf.connect(subj_iterable, 'subject_id', DataGrabber, 'subject_id')
hb_test_wf.connect(DataGrabber, 'bold', moco, 'in_file')
hb_test_wf.connect(moco, 'out_file', extractb0, 'in_file')
hb_test_wf.connect(extractb0, 'roi_file', bet, 'in_file')
hb_test_wf.connect(bet, 'out_file', datasink, '@epi_brain')
hb_test_wf.connect(DataGrabber, 'T1', bet2, 'in_file')
hb_test_wf.connect(bet2, 'out_file', flirting, 'in_file')
コード例 #20
0
def min_func_preproc(subject, sessions, data_dir, fs_dir, wd, sink, TR,
                     EPI_resolution):

    #initiate min func preproc workflow
    wf = pe.Workflow(name='MPP')
    wf.base_dir = wd
    wf.config['execution']['crashdump_dir'] = wf.base_dir + "/crash_files"

    ## set fsl output type to nii.gz
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')

    # I/O nodes
    inputnode = pe.Node(util.IdentityInterface(fields=['subjid', 'fs_dir']),
                        name='inputnode')
    inputnode.inputs.subjid = subject
    inputnode.inputs.fs_dir = fs_dir

    ds = pe.Node(nio.DataSink(base_directory=sink, parameterization=False),
                 name='sink')

    ds.inputs.substitutions = [('moco.nii.gz.par', 'moco.par'),
                               ('moco.nii.gz_', 'moco_')]

    #infosource to interate over sessions: COND, EXT1, EXT2
    sessions_infosource = pe.Node(util.IdentityInterface(fields=['session']),
                                  name='session')
    sessions_infosource.iterables = [('session', sessions)]

    #select files
    templates = {
        'func_data': '{session}/func_data.nii.gz',
        'T1_brain': 'T1/T1_brain.nii.gz',
        'wmedge': 'T1/MASKS/aparc_aseg.WMedge.nii.gz'
    }

    selectfiles = pe.Node(nio.SelectFiles(templates, base_directory=data_dir),
                          name='selectfiles')

    wf.connect(sessions_infosource, 'session', selectfiles, 'session')
    wf.connect(sessions_infosource, 'session', ds, 'container')

    ##########################################################################
    ########################    START   ######################################
    ##########################################################################

    ###########################################################################
    ########################    No. 3   ######################################

    #change the data type to float
    fsl_float = pe.Node(fsl.maths.MathsCommand(output_datatype='float'),
                        name='fsl_float')

    wf.connect(selectfiles, 'func_data', fsl_float, 'in_file')

    ###########################################################################
    ########################    No. 4   ######################################

    #get FD from fsl_motion_outliers
    FD = pe.Node(fsl.MotionOutliers(out_file='func_data_FD_outliers.txt',
                                    out_metric_values='func_data_FD.txt',
                                    metric='fd'),
                 name='FD')

    wf.connect(fsl_float, 'out_file', FD, 'in_file')
    wf.connect(FD, 'out_metric_values', ds, 'QC.@FD')
    wf.connect(FD, 'out_file', ds, 'QC.@FDoutliers')

    ###########################################################################
    ########################    No. 5   ######################################

    #slice timing correction: sequential ascending
    slicetimer = pe.Node(
        fsl.SliceTimer(
            index_dir=False,
            interleaved=False,
            #slice_direction=3, #z direction
            time_repetition=TR,
            out_file='func_data_stc.nii.gz'),
        name='slicetimer')

    wf.connect(fsl_float, 'out_file', slicetimer, 'in_file')
    wf.connect(slicetimer, 'slice_time_corrected_file', ds, 'TEMP.@slicetimer')

    ###########################################################################
    ########################    No. 6   ######################################
    #do realignment to the middle or first volume
    mcflirt = pe.Node(fsl.MCFLIRT(save_mats=True,
                                  save_plots=True,
                                  save_rms=True,
                                  ref_vol=1,
                                  out_file='func_data_stc_moco.nii.gz'),
                      name='mcflirt')

    wf.connect(slicetimer, 'slice_time_corrected_file', mcflirt, 'in_file')
    wf.connect(mcflirt, 'out_file', ds, 'TEMP.@mcflirt')
    wf.connect(mcflirt, 'par_file', ds, 'MOCO.@par_file')
    wf.connect(mcflirt, 'rms_files', ds, 'MOCO.@rms_files')
    wf.connect(mcflirt, 'mat_file', ds, 'MOCO_MAT.@mcflirt')

    # plot motion parameters
    rotplotter = pe.Node(fsl.PlotMotionParams(in_source='fsl',
                                              plot_type='rotations',
                                              out_file='rotation.png'),
                         name='rotplotter')

    transplotter = pe.Node(fsl.PlotMotionParams(in_source='fsl',
                                                plot_type='translations',
                                                out_file='translation.png'),
                           name='transplotter')

    dispplotter = pe.Node(
        interface=fsl.PlotMotionParams(in_source='fsl',
                                       plot_type='displacement',
                                       out_file='displacement.png'),
        name='dispplotter')

    wf.connect(mcflirt, 'par_file', rotplotter, 'in_file')
    wf.connect(mcflirt, 'par_file', transplotter, 'in_file')
    wf.connect(mcflirt, 'rms_files', dispplotter, 'in_file')
    wf.connect(rotplotter, 'out_file', ds, 'PLOTS.@rotplot')
    wf.connect(transplotter, 'out_file', ds, 'PLOTS.@transplot')
    wf.connect(dispplotter, 'out_file', ds, 'PLOTS.@disppplot')

    #calculate tSNR and the mean

    moco_Tmean = pe.Node(fsl.maths.MathsCommand(args='-Tmean',
                                                out_file='moco_Tmean.nii.gz'),
                         name='moco_Tmean')

    moco_Tstd = pe.Node(fsl.maths.MathsCommand(args='-Tstd',
                                               out_file='moco_Tstd.nii.gz'),
                        name='moco_Tstd')

    tSNR0 = pe.Node(fsl.maths.MultiImageMaths(op_string='-div %s',
                                              out_file='moco_tSNR.nii.gz'),
                    name='moco_tSNR')

    wf.connect(mcflirt, 'out_file', moco_Tmean, 'in_file')
    wf.connect(mcflirt, 'out_file', moco_Tstd, 'in_file')
    wf.connect(moco_Tmean, 'out_file', tSNR0, 'in_file')
    wf.connect(moco_Tstd, 'out_file', tSNR0, 'operand_files')
    wf.connect(moco_Tmean, 'out_file', ds, 'TEMP.@moco_Tmean')
    wf.connect(moco_Tstd, 'out_file', ds, 'TEMP.@moco_Tstd')
    wf.connect(tSNR0, 'out_file', ds, 'TEMP.@moco_Tsnr')

    ###########################################################################
    ########################    No. 7   ######################################

    #bias field correction of mean epi for better coregistration
    bias = pe.Node(
        fsl.FAST(
            img_type=2,
            #restored_image='epi_Tmeanrestored.nii.gz',
            output_biascorrected=True,
            out_basename='moco_Tmean',
            no_pve=True,
            probability_maps=False),
        name='bias')

    wf.connect(moco_Tmean, 'out_file', bias, 'in_files')
    wf.connect(bias, 'restored_image', ds, 'TEMP.@restored_image')

    #co-registration to anat using FS BBregister and mean EPI
    bbregister = pe.Node(fs.BBRegister(
        subject_id=subject,
        subjects_dir=fs_dir,
        contrast_type='t2',
        init='fsl',
        out_fsl_file='func2anat.mat',
        out_reg_file='func2anat.dat',
        registered_file='moco_Tmean_restored2anat.nii.gz',
        epi_mask=True),
                         name='bbregister')

    wf.connect(bias, 'restored_image', bbregister, 'source_file')
    wf.connect(bbregister, 'registered_file', ds, 'TEMP.@registered_file')
    wf.connect(bbregister, 'out_fsl_file', ds, 'COREG.@out_fsl_file')
    wf.connect(bbregister, 'out_reg_file', ds, 'COREG.@out_reg_file')
    wf.connect(bbregister, 'min_cost_file', ds, 'COREG.@min_cost_file')

    #inverse func2anat mat
    inverseXFM = pe.Node(fsl.ConvertXFM(invert_xfm=True,
                                        out_file='anat2func.mat'),
                         name='inverseXFM')

    wf.connect(bbregister, 'out_fsl_file', inverseXFM, 'in_file')
    wf.connect(inverseXFM, 'out_file', ds, 'COREG.@out_fsl_file_inv')

    #plot the corregistration quality
    slicer = pe.Node(fsl.Slicer(middle_slices=True, out_file='func2anat.png'),
                     name='slicer')

    wf.connect(selectfiles, 'wmedge', slicer, 'image_edges')
    wf.connect(bbregister, 'registered_file', slicer, 'in_file')
    wf.connect(slicer, 'out_file', ds, 'PLOTS.@func2anat')

    ###########################################################################
    ########################    No. 8   ######################################
    #MOCO and COREGISTRATION

    #resample T1 to EPI resolution to use it as a reference image
    resample_T1 = pe.Node(
        fsl.FLIRT(datatype='float',
                  apply_isoxfm=EPI_resolution,
                  out_file='T1_brain_EPI.nii.gz'),
        #interp='nearestneighbour'),keep spline so it looks nicer
        name='resample_T1')

    wf.connect(selectfiles, 'T1_brain', resample_T1, 'in_file')
    wf.connect(selectfiles, 'T1_brain', resample_T1, 'reference')
    wf.connect(resample_T1, 'out_file', ds, 'COREG.@resample_T1')

    #concate matrices (moco and func2anat) volume-wise
    concat_xfm = pe.MapNode(fsl.ConvertXFM(concat_xfm=True),
                            iterfield=['in_file'],
                            name='concat_xfm')

    wf.connect(mcflirt, 'mat_file', concat_xfm, 'in_file')
    wf.connect(bbregister, 'out_fsl_file', concat_xfm, 'in_file2')
    wf.connect(concat_xfm, 'out_file', ds, 'MOCO2ANAT_MAT.@concat_out')

    #split func_data
    split = pe.Node(fsl.Split(dimension='t'), name='split')

    wf.connect(slicetimer, 'slice_time_corrected_file', split, 'in_file')

    #motion correction and corregistration in one interpolation step
    flirt = pe.MapNode(fsl.FLIRT(apply_xfm=True,
                                 interp='spline',
                                 datatype='float'),
                       iterfield=['in_file', 'in_matrix_file'],
                       name='flirt')

    wf.connect(split, 'out_files', flirt, 'in_file')
    wf.connect(resample_T1, 'out_file', flirt, 'reference')
    wf.connect(concat_xfm, 'out_file', flirt, 'in_matrix_file')

    #merge the files to have 4d dataset motion corrected and co-registerd to T1
    merge = pe.Node(fsl.Merge(dimension='t',
                              merged_file='func_data_stc_moco2anat.nii.gz'),
                    name='merge')

    wf.connect(flirt, 'out_file', merge, 'in_files')
    wf.connect(merge, 'merged_file', ds, 'TEMP.@merged')

    ###########################################################################
    ########################    No. 9   ######################################

    #run BET on co-registered EPI in 1mm and get the mask
    bet = pe.Node(fsl.BET(mask=True,
                          functional=True,
                          out_file='moco_Tmean_restored2anat_BET.nii.gz'),
                  name='bet')

    wf.connect(bbregister, 'registered_file', bet, 'in_file')
    wf.connect(bet, 'out_file', ds, 'TEMP.@func_data_example')
    wf.connect(bet, 'mask_file', ds, 'TEMP.@func_data_mask')

    #resample BET mask to EPI resolution
    resample_mask = pe.Node(fsl.FLIRT(
        datatype='int',
        apply_isoxfm=EPI_resolution,
        interp='nearestneighbour',
        out_file='prefiltered_func_data_mask.nii.gz'),
                            name='resample_mask')

    wf.connect(bet, 'mask_file', resample_mask, 'in_file')
    wf.connect(resample_T1, 'out_file', resample_mask, 'reference')
    wf.connect(resample_mask, 'out_file', ds, '@mask')

    #apply the mask to 4D data to get rid of the "eyes and the rest"
    mask4D = pe.Node(fsl.maths.ApplyMask(), name='mask')

    wf.connect(merge, 'merged_file', mask4D, 'in_file')
    wf.connect(resample_mask, 'out_file', mask4D, 'mask_file')

    ###########################################################################
    ########################    No. 10   ######################################

    #get the values necessary for intensity normalization
    median = pe.Node(fsl.utils.ImageStats(op_string='-k %s -p 50'),
                     name='median')

    wf.connect(resample_mask, 'out_file', median, 'mask_file')
    wf.connect(mask4D, 'out_file', median, 'in_file')

    #compute the scaling factor
    def get_factor(val):

        factor = 10000 / val
        return factor

    get_scaling_factor = pe.Node(util.Function(input_names=['val'],
                                               output_names=['out_val'],
                                               function=get_factor),
                                 name='scaling_factor')

    #normalize the 4D func data with one scaling factor
    multiplication = pe.Node(fsl.maths.BinaryMaths(
        operation='mul', out_file='prefiltered_func_data.nii.gz'),
                             name='multiplication')

    wf.connect(median, 'out_stat', get_scaling_factor, 'val')
    wf.connect(get_scaling_factor, 'out_val', multiplication, 'operand_value')
    wf.connect(mask4D, 'out_file', multiplication, 'in_file')
    wf.connect(multiplication, 'out_file', ds, '@prefiltered_func_data')

    ###########################################################################
    ########################    No. 11   ######################################

    #calculate tSNR and the mean of the new prefiltered and detrend dataset

    tsnr_detrend = pe.Node(misc.TSNR(
        regress_poly=1,
        detrended_file='prefiltered_func_data_detrend.nii.gz',
        mean_file='prefiltered_func_data_detrend_Tmean.nii.gz',
        tsnr_file='prefiltered_func_data_detrend_tSNR.nii.gz'),
                           name='tsnr_detrend')

    wf.connect(multiplication, 'out_file', tsnr_detrend, 'in_file')
    wf.connect(tsnr_detrend, 'tsnr_file', ds, 'QC.@tsnr_detrend')
    wf.connect(tsnr_detrend, 'mean_file', ds, 'QC.@detrend_mean_file')
    wf.connect(tsnr_detrend, 'detrended_file', ds, '@detrend_file')

    #resample the EPI mask to original EPI dimensions
    convert2func = pe.Node(fsl.FLIRT(apply_xfm=True,
                                     interp='nearestneighbour',
                                     out_file='func_data_mask2func.nii.gz'),
                           name='conver2func')

    wf.connect(resample_mask, 'out_file', convert2func, 'in_file')
    wf.connect(bias, 'restored_image', convert2func, 'reference')
    wf.connect(inverseXFM, 'out_file', convert2func, 'in_matrix_file')
    wf.connect(convert2func, 'out_file', ds, 'QC.@inv')

    ###########################################################################
    ########################    RUN   ######################################
    wf.write_graph(dotfilename='wf.dot',
                   graph2use='colored',
                   format='pdf',
                   simple_form=True)
    wf.run(plugin='MultiProc', plugin_args={'n_procs': 2})
    #wf.run()
    return
コード例 #21
0
register_flow.connect(highres2standard, 'out_matrix_file', reg_outputnode,
                      'highres2standard')
register_flow.connect(highres2standard_warp, 'fieldcoeff_file', reg_outputnode,
                      'highres2standard_warp')
register_flow.connect(func2standard, 'out_file', reg_outputnode,
                      'func2standard')
register_flow.connect(func2standard_warp, 'out_file', reg_outputnode,
                      'func2standard_warp')

# Assign templates
register_flow.inputs.inputspec.target_image = template
register_flow.inputs.inputspec.target_image_brain = template_brain
register_flow.inputs.inputspec.target_mask = template_mask

# Check registration
slicer = MapNode(fsl.Slicer(), iterfield=['in_file'], name="slicer")
slicer.inputs.image_edges = os.path.join(os.environ['FSLDIR'], 'data',
                                         'standard',
                                         'MNI152_T1_2mm_edges.nii.gz')
slicer.inputs.args = '-a'

# Down sample template using isotropic resampling
down_sampler_ = Node(IdentityInterface(fields=['in_file']),
                     name='identitynode')
down_sampler_.inputs.in_file = template_brain
down_sampler = Node(fsl.FLIRT(), name='down_sampler')
down_sampler.inputs.apply_isoxfm = down_sampling

# Normalize functional images to down sampled template
warpall_func = MapNode(fsl.ApplyWarp(interp='trilinear'),
                       iterfield=['in_file'],
コード例 #22
0
ファイル: tbss.py プロジェクト: sebastientourbier/nipype_lts5
def create_tbss_1_preproc(name='tbss_1_preproc'):
    """Preprocess FA data for TBSS: erodes a little and zero end slicers and
    creates masks(for use in FLIRT & FNIRT from FSL).
    A pipeline that does the same as tbss_1_preproc script in FSL

    Example
    -------

    >>> from nipype.workflows.dmri.fsl import tbss
    >>> tbss1 = tbss.create_tbss_1_preproc()
    >>> tbss1.inputs.inputnode.fa_list = ['s1_FA.nii', 's2_FA.nii', 's3_FA.nii']

    Inputs::

        inputnode.fa_list

    Outputs::

        outputnode.fa_list
        outputnode.mask_list
        outputnode.slices

    """

    # Define the inputnode
    inputnode = pe.Node(interface=util.IdentityInterface(fields=["fa_list"]),
                        name="inputnode")

    # Prep the FA images
    prepfa = pe.MapNode(fsl.ImageMaths(suffix="_prep"),
                        name="prepfa",
                        iterfield=['in_file', 'op_string'])

    # Slicer
    slicer = pe.MapNode(fsl.Slicer(all_axial=True, image_width=1280),
                        name='slicer',
                        iterfield=['in_file'])

    # Create a mask
    getmask1 = pe.MapNode(fsl.ImageMaths(op_string="-bin", suffix="_mask"),
                          name="getmask1",
                          iterfield=['in_file'])
    getmask2 = pe.MapNode(
        fsl.MultiImageMaths(op_string="-dilD -dilD -sub 1 -abs -add %s"),
        name="getmask2",
        iterfield=['in_file', 'operand_files'])

    #    $FSLDIR/bin/fslmaths FA/${f}_FA_mask -dilD -dilD -sub 1 -abs -add FA/${f}_FA_mask FA/${f}_FA_mask -odt char
    # Define the tbss1 workflow
    tbss1 = pe.Workflow(name=name)
    tbss1.connect([
        (inputnode, prepfa, [("fa_list", "in_file")]),
        (inputnode, prepfa, [(("fa_list", tbss1_op_string), "op_string")]),
        (prepfa, getmask1, [("out_file", "in_file")]),
        (getmask1, getmask2, [("out_file", "in_file"),
                              ("out_file", "operand_files")]),
        (prepfa, slicer, [('out_file', 'in_file')]),
    ])

    # Define the outputnode
    outputnode = pe.Node(interface=util.IdentityInterface(
        fields=["fa_list", "mask_list", "slices"]),
                         name="outputnode")
    tbss1.connect([(prepfa, outputnode, [("out_file", "fa_list")]),
                   (getmask2, outputnode, [("out_file", "mask_list")]),
                   (slicer, outputnode, [('out_file', 'slices')])])
    return tbss1
コード例 #23
0
def create_bbregister_workflow(name="bbregister", contrast_type="t2"):

    # Define the workflow inputs
    inputnode = pe.Node(
        util.IdentityInterface(fields=["subject_id", "source_file"]),
        name="inputs")

    # Estimate the registration to Freesurfer conformed space
    func2anat = pe.MapNode(fs.BBRegister(contrast_type=contrast_type,
                                         init="fsl",
                                         epi_mask=True,
                                         registered_file=True,
                                         out_fsl_file=True),
                           iterfield=["source_file"],
                           name="func2anat")

    # Set up a node to grab the target from the subjects directory
    fssource = pe.Node(io.FreeSurferSource(subjects_dir=fs.Info.subjectsdir()),
                       name="fssource")
    # Always overwrite the grab; shouldn't cascade unless the underlying image changes
    fssource.overwrite = True

    # Convert the target to nifti
    convert = pe.Node(fs.MRIConvert(out_type="niigz"), name="convertbrain")

    # Swap dimensions so stuff looks nice in the report
    flipbrain = pe.Node(fsl.SwapDimensions(new_dims=("RL", "PA", "IS")),
                        name="flipbrain")

    flipfunc = pe.MapNode(fsl.SwapDimensions(new_dims=("RL", "PA", "IS")),
                          iterfield=["in_file"],
                          name="flipfunc")

    # Slice up the registration
    func2anatpng = pe.MapNode(fsl.Slicer(middle_slices=True,
                                         show_orientation=False,
                                         scaling=.6,
                                         label_slices=False),
                              iterfield=["in_file"],
                              name="func2anatpng")

    # Rename some files
    pngname = pe.MapNode(util.Rename(format_string="func2anat.png"),
                         iterfield=["in_file"],
                         name="pngname")

    costname = pe.MapNode(util.Rename(format_string="func2anat_cost.dat"),
                          iterfield=["in_file"],
                          name="costname")

    tkregname = pe.MapNode(util.Rename(format_string="func2anat_tkreg.dat"),
                           iterfield=["in_file"],
                           name="tkregname")

    flirtname = pe.MapNode(util.Rename(format_string="func2anat_flirt.mat"),
                           iterfield=["in_file"],
                           name="flirtname")

    # Merge the slicer png and cost file into a report list
    report = pe.Node(util.Merge(2, axis="hstack"), name="report")

    # Define the workflow outputs
    outputnode = pe.Node(
        util.IdentityInterface(fields=["tkreg_mat", "flirt_mat", "report"]),
        name="outputs")

    bbregister = pe.Workflow(name=name)

    # Connect the registration
    bbregister.connect([
        (inputnode, func2anat, [("subject_id", "subject_id"),
                                ("source_file", "source_file")]),
        (inputnode, fssource, [("subject_id", "subject_id")]),
        (func2anat, flipfunc, [("registered_file", "in_file")]),
        (flipfunc, func2anatpng, [("out_file", "in_file")]),
        (fssource, convert, [("brain", "in_file")]),
        (convert, flipbrain, [("out_file", "in_file")]),
        (flipbrain, func2anatpng, [("out_file", "image_edges")]),
        (func2anatpng, pngname, [("out_file", "in_file")]),
        (func2anat, tkregname, [("out_reg_file", "in_file")]),
        (func2anat, flirtname, [("out_fsl_file", "in_file")]),
        (func2anat, costname, [("min_cost_file", "in_file")]),
        (costname, report, [("out_file", "in1")]),
        (pngname, report, [("out_file", "in2")]),
        (tkregname, outputnode, [("out_file", "tkreg_mat")]),
        (flirtname, outputnode, [("out_file", "flirt_mat")]),
        (report, outputnode, [("out", "report")]),
    ])

    return bbregister
コード例 #24
0
cluster_copes1.inputs.use_mm = True

#==========================================================================================================================================================
#overlay thresh_zstat1

overlay_cope1 = Node(fsl.Overlay(), name='overlay_cope1')
overlay_cope1.inputs.auto_thresh_bg = True
overlay_cope1.inputs.stat_thresh = (2.300302, 14)
overlay_cope1.inputs.transparency = True
overlay_cope1.inputs.out_file = 'rendered_thresh_zstat1.nii.gz'
overlay_cope1.inputs.show_negative_stats = True

#==========================================================================================================================================================
#generate pics thresh_zstat1

slicer_cope1 = Node(fsl.Slicer(), name='slicer_cope1')
slicer_cope1.inputs.sample_axial = 2
slicer_cope1.inputs.image_width = 2000
slicer_cope1.inputs.out_file = 'rendered_thresh_zstat1.png'

#===========================================================================================================================================================
#trasnform copes from 2nd level to template space to be ready fro 3rd level

cope1_2ndlevel_2_template = Node(ants.ApplyTransforms(),
                                 name='cope1_2ndlevel_2_template')
cope1_2ndlevel_2_template.inputs.dimension = 3
cope1_2ndlevel_2_template.inputs.reference_image = standard_brain
cope1_2ndlevel_2_template.inputs.output_image = 'cope1_2ndlevel_2_standard_brain.nii.gz'

varcope1_2ndlevel_2_template = Node(ants.ApplyTransforms(),
                                    name='varcope1_2ndlevel_2_template')
コード例 #25
0
                       'in_file')
dog_preproc_wf.connect(normalize_sagittal_image_node, 'out_file',
                       average_images, 'in_file2')

apply_final_mask = pe.Node(interface=fsl.maths.ApplyMask(),
                           name='apply_final_mask')

### nuts so to do this I need to mask the image first...

dog_preproc_wf.connect(average_images, 'out_file', apply_final_mask, 'in_file')
dog_preproc_wf.connect(resample_axial_mask_isotropic, 'out_file',
                       apply_final_mask, 'mask_file')

#threshold_template_mask = pe.Node(interface=fsl.ImageMaths(op_string=' -thr 1.0'), name="threshold_template_mask")

slicer_midslice = pe.MapNode(interface=fsl.Slicer(),
                             name="slicer_midslice",
                             iterfield=['in_file'])
slicer_midslice.inputs.middle_slices = True

slicer_all_axial = pe.MapNode(interface=fsl.Slicer(),
                              name="slicer_all_axial",
                              iterfield=['in_file'])
slicer_all_axial.inputs.image_width = 1000
slicer_all_axial.inputs.sample_axial = 3

datasink = pe.Node(nio.DataSink(), name='datasink')
datasink.inputs.base_directory = output_dir

merge_node = pe.Node(interface=Merge(4), name='merge_node')
dog_preproc_wf.connect(normalize_axial_image_node, 'out_file', merge_node,
コード例 #26
0
Overlay_t_Contrast.inputs.transparency = True


#-----------------------------------------------------------------------------------------------------
# In[15]:
#Overlay f contrast
Overlay_f_Contrast = Node(fsl.Overlay(), name = 'Overlay_f_Contrast')
Overlay_f_Contrast.inputs.auto_thresh_bg = True
Overlay_f_Contrast.inputs.stat_thresh = (2.300302,4.877862)
Overlay_f_Contrast.inputs.transparency = True


#-----------------------------------------------------------------------------------------------------
# In[15]:
#slicer 
Slicer_t_Contrast = Node(fsl.Slicer(), name = 'Generate_t_Contrast_Image')
Slicer_t_Contrast.inputs.all_axial = True
Slicer_t_Contrast.inputs.image_width = 750
#-----------------------------------------------------------------------------------------------------
# In[15]:
#slicer 
Slicer_f_Contrast = Node(fsl.Slicer(), name = 'Generate_f_Contrast_Image')
Slicer_f_Contrast.inputs.all_axial = True
Slicer_f_Contrast.inputs.image_width = 750

#-----------------------------------------------------------------------------------------------------
# In[15]:
#Calculate dofs of freedom for second level analysis
#N.B the number of runs have to be equal for each subject
dof = 150 - (len(subject_list) * len(session_list)) #No of volumes
#-----------------------------------------------------------------------------------------------------
コード例 #27
0
def create_realignment_workflow(name="realignment", interp_type="trilinear"):

    # Define the workflow inputs
    inputnode = pe.Node(util.IdentityInterface(fields=["timeseries"]),
                        name="inputs")

    # Get the middle volume of each run for motion correction
    extractref = pe.MapNode(fsl.ExtractROI(t_size=1),
                            iterfield=["in_file", "t_min"],
                            name="extractref")

    # Slice the example func for reporting
    exampleslice = pe.MapNode(fsl.Slicer(image_width=800, label_slices=False),
                              iterfield=["in_file"],
                              name="exampleslice")
    exampleslice.inputs.sample_axial = 2

    # Motion correct to middle volume of each run
    mcflirt = pe.MapNode(fsl.MCFLIRT(save_plots=True,
                                     save_rms=True,
                                     interpolation=interp_type),
                         name="mcflirt",
                         iterfield=["in_file", "ref_file"])

    report_inputs = ["realign_params", "rms_files"]
    report_outputs = ["max_motion_file", "disp_plot", "rot_plot", "trans_plot"]
    mcreport = pe.MapNode(util.Function(input_names=report_inputs,
                                        output_names=report_outputs,
                                        function=write_realign_report),
                          iterfield=report_inputs,
                          name="mcreport")

    # Rename some things
    exfuncname = pe.MapNode(util.Rename(format_string="example_func",
                                        keep_ext=True),
                            iterfield=["in_file"],
                            name="exfuncname")

    exslicename = pe.MapNode(util.Rename(format_string="example_func",
                                         keep_ext=True),
                             iterfield=["in_file"],
                             name="exslicename")

    parname = pe.MapNode(
        util.Rename(format_string="realignment_parameters.par"),
        iterfield=["in_file"],
        name="parname")

    # Send out all the report data as one list
    mergereport = pe.Node(util.Merge(numinputs=5, axis="hstack"),
                          name="mergereport")

    # Define the outputs
    outputnode = pe.Node(util.IdentityInterface(fields=[
        "timeseries", "example_func", "realign_parameters", "realign_report"
    ]),
                         name="outputs")

    # Define and connect the sub workflow
    realignment = pe.Workflow(name=name)

    realignment.connect([
        (inputnode, extractref, [("timeseries", "in_file"),
                                 (("timeseries", get_middle_volume), "t_min")
                                 ]),
        (extractref, exampleslice, [("roi_file", "in_file")]),
        (inputnode, mcflirt, [("timeseries", "in_file")]),
        (extractref, mcflirt, [("roi_file", "ref_file")]),
        (mcflirt, mcreport, [("par_file", "realign_params"),
                             ("rms_files", "rms_files")]),
        (exampleslice, exslicename, [("out_file", "in_file")]),
        (mcreport, mergereport, [("max_motion_file", "in1"),
                                 ("rot_plot", "in2"), ("disp_plot", "in3"),
                                 ("trans_plot", "in4")]),
        (exslicename, mergereport, [("out_file", "in5")]),
        (mcflirt, parname, [("par_file", "in_file")]),
        (parname, outputnode, [("out_file", "realign_parameters")]),
        (extractref, exfuncname, [("roi_file", "in_file")]),
        (mcflirt, outputnode, [("out_file", "timeseries")]),
        (exfuncname, outputnode, [("out_file", "example_func")]),
        (mergereport, outputnode, [("out", "realign_report")]),
    ])

    return realignment
コード例 #28
0
"""

selectcontrast = pe.Node(niu.Select(), name="selectcontrast")
"""Use :class:`nipype.interfaces.fsl.Overlay` to combine the statistical output of
the contrast estimate and a background image into one volume.
"""

overlaystats = pe.Node(fsl.Overlay(), name="overlaystats")
overlaystats.inputs.stat_thresh = (3, 10)
overlaystats.inputs.show_negative_stats = True
overlaystats.inputs.auto_thresh_bg = True
"""Use :class:`nipype.interfaces.fsl.Slicer` to create images of the overlaid
statistical volumes for a report of the first-level results.
"""

slicestats = pe.Node(fsl.Slicer(), name="slicestats")
slicestats.inputs.all_axial = True
slicestats.inputs.image_width = 750

l1analysis.connect([
    (modelspec, level1design, [('session_info', 'session_info')]),
    (level1design, level1estimate, [('spm_mat_file', 'spm_mat_file')]),
    (level1estimate, contrastestimate, [('spm_mat_file', 'spm_mat_file'),
                                        ('beta_images', 'beta_images'),
                                        ('residual_image', 'residual_image')]),
    (contrastestimate, selectcontrast, [('spmT_images', 'inlist')]),
    (selectcontrast, overlaystats, [('out', 'stat_image')]),
    (overlaystats, slicestats, [('out_file', 'in_file')])
])
"""
Preproc + Analysis pipeline
コード例 #29
0
# cortical and cerebellar white matter volumes to construct wm edge
# [lh cerebral wm, lh cerebellar wm, rh cerebral wm, rh cerebellar wm, brain stem]
wmseg = pe.Node(fs.Binarize(out_type='nii.gz',
                         match = [2, 7, 41, 46, 16],
                         binary_file='T1_brain_wmseg.nii.gz'),
            name='wmseg')
# make edge from wmseg to visualize coregistration quality
edge = pe.Node(fsl.ApplyMask(args='-edge -bin',
                          out_file='T1_brain_wmedge.nii.gz'),
       name='edge')


#visualize the segmentation
#apply smoothing
slicer = pe.Node(fsl.Slicer(sample_axial=6, image_width=750), name = 'visualize')

# connections
check_freesurfer.connect([
(infosource, fs_import, [('subject_id', 'subject_id')]),
(dirsource,  fs_import, [('fs_dir', 'subjects_dir')]),
(fs_import, brainmask, [(('aparc_aseg', get_aparc_aseg), 'in_file')]),
(fs_import, head_convert, [('T1', 'in_file')]),
(fs_import, wmseg, [(('aparc_aseg', get_aparc_aseg), 'in_file')]),
(brainmask, fillholes, [('binary_file', 'in_file')]),
(fillholes, brain, [('out_file', 'mask_file')]),
(head_convert, brain, [('out_file', 'in_file')]),
(wmseg, edge, [('binary_file', 'in_file'),
('binary_file', 'mask_file')]),
(head_convert, datasink, [('out_file', 'anat_head')]),
(fillholes, datasink, [('out_file', 'brain_mask')]),