コード例 #1
0
ファイル: slicetime.py プロジェクト: erramuzpe/pypes
def spm_slicetime(
    in_files=traits.Undefined,
    out_prefix=traits.Undefined,
    num_slices=0,
    time_repetition=-1,
    time_acquisition=-1,
    ref_slice=traits.Undefined,
    slice_order=None,
):
    """ Return a nipype interface to the SPM SliceTiming.

    Parameters
    ----------
    in_files: str or list of str
        Path to the input file(s).

    out_prefix: str
        Prefix to the output file.
        Default: 'a'

    num_slices: int

    time_repetition: int or str
        The time repetition (TR) of the input dataset in seconds
        Default: 0
        If left to default will read the TR from the nifti image header.

    time_acquisition: int
        Time of volume acquisition. usually calculated as TR-(TR/num_slices)

    ref_slice: int
        Index of the reference slice

    slice_order: list of int

    Returns
    -------
    stc: nipype interface
    """
    import nipype.interfaces.spm as spm

    stc = spm.SliceTiming()

    stc.inputs.in_files = in_files
    stc.inputs.out_prefix = 'a' if not isdefined(out_prefix) else out_prefix
    stc.inputs.slice_order = slice_order
    stc.inputs.ref_slice = ref_slice
    stc.inputs.time_repetition = time_repetition
    stc.inputs.time_acquisition = time_acquisition
    stc.inputs.num_slices = num_slices

    #res = stc.run()
    return stc
コード例 #2
0
ファイル: test_spm.py プロジェクト: yzw0041/nipype
def test_spm(name='test_spm_3d'):
    """
    A simple workflow to test SPM's installation. By default will split the 4D volume in
    time-steps.
    """
    workflow = pe.Workflow(name=name)

    inputnode = pe.Node(niu.IdentityInterface(fields=['in_data']),
                        name='inputnode')
    dgr = pe.Node(nio.DataGrabber(template="feeds/data/fmri.nii.gz",
                                  outfields=['out_file'],
                                  sort_filelist=False),
                  name='datasource')

    stc = pe.Node(spm.SliceTiming(num_slices=21,
                                  time_repetition=1.0,
                                  time_acquisition=2. - 2. / 32,
                                  slice_order=list(range(21, 0, -1)),
                                  ref_slice=10),
                  name='stc')
    realign_estimate = pe.Node(spm.Realign(jobtype='estimate'),
                               name='realign_estimate')
    realign_write = pe.Node(spm.Realign(jobtype='write'), name='realign_write')
    realign_estwrite = pe.Node(spm.Realign(jobtype='estwrite'),
                               name='realign_estwrite')
    smooth = pe.Node(spm.Smooth(fwhm=[6, 6, 6]), name='smooth')

    if name == 'test_spm_3d':
        split = pe.Node(fsl.Split(dimension="t", output_type="NIFTI"),
                        name="split")
        workflow.connect([(dgr, split, [(('out_file', _get_first), 'in_file')
                                        ]),
                          (split, stc, [("out_files", "in_files")])])
    elif name == 'test_spm_4d':
        gunzip = pe.Node(Gunzip(), name="gunzip")
        workflow.connect([(dgr, gunzip, [(('out_file', _get_first), 'in_file')
                                         ]),
                          (gunzip, stc, [("out_file", "in_files")])])
    else:
        raise NotImplementedError(
            'No implementation of the test workflow \'{}\' was found'.format(
                name))

    workflow.connect([
        (inputnode, dgr, [('in_data', 'base_directory')]),
        (stc, realign_estimate, [('timecorrected_files', 'in_files')]),
        (realign_estimate, realign_write, [('modified_in_files', 'in_files')]),
        (stc, realign_estwrite, [('timecorrected_files', 'in_files')]),
        (realign_write, smooth, [('realigned_files', 'in_files')])
    ])
    return workflow
コード例 #3
0
def process_subject(func_fname, anat_fname):
    # Drop dummy volumes
    img = nib.load(func_fname)
    dropped_img = nib.Nifti1Image(img.get_data()[..., n_dummies:], img.affine,
                                  img.header)
    fixed_fname = prefix_path('f', degz(func_fname))
    nib.save(dropped_img, fixed_fname)

    # Ungz anatomical
    if anat_fname.endswith('.gz'):
        anat_img = nib.load(anat_fname)
        anat_fname = degz(anat_fname)
        nib.save(anat_img, anat_fname)

    # Slice time correction
    num_slices = img.shape[2]
    time_for_one_slice = TR / num_slices
    TA = TR - time_for_one_slice
    st = spm.SliceTiming()
    st.inputs.in_files = fixed_fname
    st.inputs.num_slices = num_slices
    st.inputs.time_repetition = TR
    st.inputs.time_acquisition = TA
    st.inputs.slice_order = order_func(num_slices)
    st.inputs.ref_slice = ref_slice
    st.run()
    fixed_stimed = prefix_path('a', fixed_fname)

    # Realign
    realign = spm.Realign()
    realign.inputs.in_files = fixed_stimed
    # Do not write resliced files, do write mean image
    realign.inputs.write_which = write_which
    realign.run()

    # Coregistration
    coreg = spm.Coregister()
    # Coregister structural to mean image from realignment
    coreg.inputs.target = prefix_path('mean', fixed_stimed)
    coreg.inputs.source = anat_fname
    coreg.run()

    # Normalization / resampling with normalization + realign params
    seg_norm = spm.Normalize12()
    seg_norm.inputs.image_to_align = anat_fname
    seg_norm.inputs.apply_to_files = fixed_stimed
    seg_norm.inputs.write_bounding_box = bounding_box
    seg_norm.inputs.write_voxel_sizes = voxel_sizes
    seg_norm.run()
コード例 #4
0
 def slicetime(new_in_file, sliceorder):
     print "running slice time correction"
     img = nib.load(new_in_file[0])
     num_slices = img.shape[2]
     st = spm.SliceTiming()
     st.inputs.in_files = new_in_file
     st.inputs.num_slices = num_slices
     st.inputs.time_repetition = tr
     st.inputs.time_acquisition = tr - tr / num_slices
     st.inputs.slice_order = (np.asarray(sliceorder) +
                              1).astype(int).tolist()
     st.inputs.ref_slice = 1
     res_st = st.run()
     file_to_realign = res_st.outputs.timecorrected_files
     return file_to_realign
コード例 #5
0
def test_slicetiming():
    yield assert_equal, spm.SliceTiming._jobtype, 'temporal'
    yield assert_equal, spm.SliceTiming._jobname, 'st'
    input_map = dict(in_files=dict(field='scans',
                                   mandatory=True,
                                   copyfile=False),
                     num_slices=dict(field='nslices'),
                     time_repetition=dict(field='tr'),
                     time_acquisition=dict(field='ta'),
                     slice_order=dict(field='so'),
                     ref_slice=dict(field='refslice'))
    st = spm.SliceTiming()
    for key, metadata in input_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(st.inputs.traits()[key],
                                        metakey), value
コード例 #6
0
def test_slicetiming_list_outputs():
    filelist, outdir, cwd = create_files_in_directory()
    st = spm.SliceTiming(in_files=filelist[0])
    yield assert_equal, st._list_outputs()['timecorrected_files'][0][0], 'a'
    clean_directory(outdir, cwd)
コード例 #7
0
ファイル: test_preprocess.py プロジェクト: xgrg/nipype
def test_slicetiming_list_outputs(create_files_in_directory):
    filelist, outdir, cwd = create_files_in_directory
    st = spm.SliceTiming(in_files=filelist[0])
    assert st._list_outputs()['timecorrected_files'][0][0] == 'a'
コード例 #8
0
ファイル: pipelines.py プロジェクト: devhliu/neurolearn
def create_spm_preproc_func_pipeline(data_dir=None,
                                     subject_id=None,
                                     task_list=None):

    ###############################
    ## Set up Nodes
    ###############################

    ds = Node(nio.DataGrabber(infields=['subject_id', 'task_id'],
                              outfields=['func', 'struc']),
              name='datasource')
    ds.inputs.base_directory = os.path.abspath(data_dir + '/' + subject_id)
    ds.inputs.template = '*'
    ds.inputs.sort_filelist = True
    ds.inputs.template_args = {'func': [['task_id']], 'struc': []}
    ds.inputs.field_template = {
        'func': 'Functional/Raw/%s/func.nii',
        'struc': 'Structural/SPGR/spgr.nii'
    }
    ds.inputs.subject_id = subject_id
    ds.inputs.task_id = task_list
    ds.iterables = ('task_id', task_list)
    # ds.run().outputs #show datafiles

    # #Setup Data Sinker for writing output files
    # datasink = Node(nio.DataSink(), name='sinker')
    # datasink.inputs.base_directory = '/path/to/output'
    # workflow.connect(realigner, 'realignment_parameters', datasink, 'motion.@par')
    # datasink.inputs.substitutions = [('_variable', 'variable'),('file_subject_', '')]

    #Get Timing Acquisition for slice timing
    tr = 2
    ta = Node(interface=util.Function(input_names=['tr', 'n_slices'],
                                      output_names=['ta'],
                                      function=get_ta),
              name="ta")
    ta.inputs.tr = tr

    #Slice Timing: sequential ascending
    slice_timing = Node(interface=spm.SliceTiming(), name="slice_timing")
    slice_timing.inputs.time_repetition = tr
    slice_timing.inputs.ref_slice = 1

    #Realignment - 6 parameters - realign to first image of very first series.
    realign = Node(interface=spm.Realign(), name="realign")
    realign.inputs.register_to_mean = True

    #Plot Realignment
    plot_realign = Node(interface=PlotRealignmentParameters(),
                        name="plot_realign")

    #Artifact Detection
    art = Node(interface=ra.ArtifactDetect(), name="art")
    art.inputs.use_differences = [True, False]
    art.inputs.use_norm = True
    art.inputs.norm_threshold = 1
    art.inputs.zintensity_threshold = 3
    art.inputs.mask_type = 'file'
    art.inputs.parameter_source = 'SPM'

    #Coregister - 12 parameters, cost function = 'nmi', fwhm 7, interpolate, don't mask
    #anatomical to functional mean across all available data.
    coregister = Node(interface=spm.Coregister(), name="coregister")
    coregister.inputs.jobtype = 'estimate'

    # Segment structural, gray/white/csf,mni,
    segment = Node(interface=spm.Segment(), name="segment")
    segment.inputs.save_bias_corrected = True

    #Normalize - structural to MNI - then apply this to the coregistered functionals
    normalize = Node(interface=spm.Normalize(), name="normalize")
    normalize.inputs.template = os.path.abspath(t1_template_file)

    #Plot normalization Check
    plot_normalization_check = Node(interface=Plot_Coregistration_Montage(),
                                    name="plot_normalization_check")
    plot_normalization_check.inputs.canonical_img = canonical_file

    #Create Mask
    compute_mask = Node(interface=ComputeMask(), name="compute_mask")
    #remove lower 5% of histogram of mean image
    compute_mask.inputs.m = .05

    #Smooth
    #implicit masking (.im) = 0, dtype = 0
    smooth = Node(interface=spm.Smooth(), name="smooth")
    fwhmlist = [0, 5, 8]
    smooth.iterables = ('fwhm', fwhmlist)

    #Create Covariate matrix
    make_covariates = Node(interface=Create_Covariates(),
                           name="make_covariates")

    ###############################
    ## Create Pipeline
    ###############################

    Preprocessed = Workflow(name="Preprocessed")
    Preprocessed.base_dir = os.path.abspath(data_dir + '/' + subject_id +
                                            '/Functional')

    Preprocessed.connect([
        (ds, ta, [(('func', get_n_slices), "n_slices")]),
        (ta, slice_timing, [("ta", "time_acquisition")]),
        (ds, slice_timing, [
            ('func', 'in_files'),
            (('func', get_n_slices), "num_slices"),
            (('func', get_slice_order), "slice_order"),
        ]),
        (slice_timing, realign, [('timecorrected_files', 'in_files')]),
        (realign, compute_mask, [('mean_image', 'mean_volume')]),
        (realign, coregister, [('mean_image', 'target')]),
        (ds, coregister, [('struc', 'source')]),
        (coregister, segment, [('coregistered_source', 'data')]),
        (segment, normalize, [
            ('transformation_mat', 'parameter_file'),
            ('bias_corrected_image', 'source'),
        ]),
        (realign, normalize, [('realigned_files', 'apply_to_files'),
                              (('realigned_files', get_vox_dims),
                               'write_voxel_sizes')]),
        (normalize, smooth, [('normalized_files', 'in_files')]),
        (compute_mask, art, [('brain_mask', 'mask_file')]),
        (realign, art, [('realignment_parameters', 'realignment_parameters')]),
        (realign, art, [('realigned_files', 'realigned_files')]),
        (realign, plot_realign, [('realignment_parameters',
                                  'realignment_parameters')]),
        (normalize, plot_normalization_check, [('normalized_files', 'wra_img')
                                               ]),
        (realign, make_covariates, [('realignment_parameters',
                                     'realignment_parameters')]),
        (art, make_covariates, [('outlier_files', 'spike_id')]),
    ])
    return Preprocessed
コード例 #9
0
    def build_core_nodes(self):
        """Build and connect the core nodes of the pipelines.
        """

        import fmri_preprocessing_workflows as utils
        import nipype.interfaces.utility as nutil
        import nipype.interfaces.spm as spm
        import nipype.pipeline.engine as npe
        from clinica.utils.filemanip import zip_nii, unzip_nii

        # Zipping
        # =======
        unzip_node = npe.MapNode(name='Unzipping',
                                 iterfield=['in_file'],
                                 interface=nutil.Function(
                                     input_names=['in_file'],
                                     output_names=['out_file'],
                                     function=unzip_nii))

        unzip_T1w = unzip_node.clone('UnzippingT1w')
        unzip_phasediff = unzip_node.clone('UnzippingPhasediff')
        unzip_bold = unzip_node.clone('UnzippingBold')
        unzip_magnitude1 = unzip_node.clone('UnzippingMagnitude1')

        # FieldMap calculation
        # ====================
        if self.parameters['unwarping']:
            fm_node = npe.MapNode(name="FieldMapCalculation",
                                  iterfield=[
                                      'phase', 'magnitude', 'epi', 'et',
                                      'blipdir', 'tert'
                                  ],
                                  interface=spm.FieldMap())

        # Slice timing correction
        # =======================
        st_node = npe.MapNode(name="SliceTimingCorrection",
                              iterfield=[
                                  'in_files', 'time_repetition', 'slice_order',
                                  'num_slices', 'ref_slice', 'time_acquisition'
                              ],
                              interface=spm.SliceTiming())

        # Motion correction and unwarping
        # ===============================

        if self.parameters['unwarping']:
            mc_node = npe.MapNode(name="MotionCorrectionUnwarping",
                                  iterfield=["scans", "pmscan"],
                                  interface=spm.RealignUnwarp())
            mc_node.inputs.register_to_mean = True
            mc_node.inputs.reslice_mask = False
        else:
            mc_node = npe.MapNode(name="MotionCorrection",
                                  iterfield=["in_files"],
                                  interface=spm.Realign())
            mc_node.inputs.register_to_mean = True

        # Brain extraction
        # ================
        import os.path as path
        from nipype.interfaces.freesurfer import MRIConvert
        if self.parameters['freesurfer_brain_mask']:
            brain_masks = [
                path.join(self.caps_directory, 'subjects', self.subjects[i],
                          self.sessions[i], 't1/freesurfer_cross_sectional',
                          self.subjects[i] + '_' + self.sessions[i],
                          'mri/brain.mgz') for i in range(len(self.subjects))
            ]
            conv_brain_masks = [
                str(self.subjects[i] + '_' + self.sessions[i] + '.nii')
                for i in range(len(self.subjects))
            ]
            bet_node = npe.MapNode(interface=MRIConvert(),
                                   iterfield=["in_file", "out_file"],
                                   name="BrainConversion")
            bet_node.inputs.in_file = brain_masks
            bet_node.inputs.out_file = conv_brain_masks
            bet_node.inputs.out_type = 'nii'
        else:
            bet_node = utils.BrainExtractionWorkflow(name="BrainExtraction")

        # Registration
        # ============
        reg_node = npe.MapNode(
            interface=spm.Coregister(),
            iterfield=["apply_to_files", "source", "target"],
            name="Registration")

        # Normalization
        # =============
        norm_node = npe.MapNode(interface=spm.Normalize12(),
                                iterfield=['image_to_align', 'apply_to_files'],
                                name='Normalization')

        # Smoothing
        # =========
        smooth_node = npe.MapNode(interface=spm.Smooth(),
                                  iterfield=['in_files'],
                                  name='Smoothing')
        smooth_node.inputs.fwhm = self.parameters['full_width_at_half_maximum']

        # Zipping
        # =======
        zip_node = npe.MapNode(name='Zipping',
                               iterfield=['in_file'],
                               interface=nutil.Function(
                                   input_names=['in_file'],
                                   output_names=['out_file'],
                                   function=zip_nii))

        zip_bet_node = zip_node.clone('ZippingBET')
        zip_mc_node = zip_node.clone('ZippingMC')
        zip_reg_node = zip_node.clone('ZippingRegistration')
        zip_norm_node = zip_node.clone('ZippingNormalization')
        zip_smooth_node = zip_node.clone('ZippingSmoothing')

        # Connections
        # ===========

        if self.parameters['freesurfer_brain_mask']:
            self.connect([
                # Brain extraction
                (bet_node, reg_node, [('out_file', 'target')]),
                (bet_node, zip_bet_node, [('out_file', 'in_file')]),
            ])
        else:
            self.connect([
                # Brain extraction
                (unzip_T1w, bet_node, [('out_file', 'Segmentation.data')]),
                (unzip_T1w, bet_node, [('out_file', 'ApplyMask.in_file')]),
                (bet_node, reg_node, [('ApplyMask.out_file', 'target')]),
                (bet_node, zip_bet_node, [('Fill.out_file', 'in_file')]),
            ])

        if self.parameters['unwarping']:
            self.connect([
                # FieldMap calculation
                (self.input_node, fm_node, [('et', 'et')]),
                (self.input_node, fm_node, [('blipdir', 'blipdir')]),
                (self.input_node, fm_node, [('tert', 'tert')]),
                (self.input_node, unzip_phasediff, [('phasediff', 'in_file')]),
                (self.input_node, unzip_magnitude1, [('magnitude1', 'in_file')
                                                     ]),
                (unzip_magnitude1, fm_node, [('out_file', 'magnitude')]),
                (unzip_phasediff, fm_node, [('out_file', 'phase')]),
                (unzip_bold, fm_node, [('out_file', 'epi')]),
                # Motion correction and unwarping
                (st_node, mc_node, [('timecorrected_files', 'scans')]),
                (fm_node, mc_node, [('vdm', 'pmscan')]),
                (mc_node, reg_node, [('realigned_unwarped_files',
                                      'apply_to_files')]),
                (mc_node, zip_mc_node, [('realigned_unwarped_files', 'in_file')
                                        ]),
            ])
        else:
            self.connect([
                # Motion correction and unwarping
                (st_node, mc_node, [('timecorrected_files', 'in_files')]),
                (mc_node, reg_node, [('realigned_files', 'apply_to_files')]),
                (mc_node, zip_mc_node, [('realigned_files', 'in_file')]),
            ])
        self.connect([
            # Unzipping
            (self.input_node, unzip_T1w, [('T1w', 'in_file')]),
            (self.input_node, unzip_bold, [('bold', 'in_file')]),
            # Slice timing correction
            (unzip_bold, st_node, [('out_file', 'in_files')]),
            (self.input_node, st_node, [('time_repetition', 'time_repetition')
                                        ]),
            (self.input_node, st_node, [('num_slices', 'num_slices')]),
            (self.input_node, st_node, [('slice_order', 'slice_order')]),
            (self.input_node, st_node, [('ref_slice', 'ref_slice')]),
            (self.input_node, st_node, [('time_acquisition',
                                         'time_acquisition')]),
            # Registration
            (mc_node, reg_node, [('mean_image', 'source')]),
            # Normalization
            (unzip_T1w, norm_node, [('out_file', 'image_to_align')]),
            (reg_node, norm_node, [('coregistered_files', 'apply_to_files')]),
            # Smoothing
            (norm_node, smooth_node, [('normalized_files', 'in_files')]),
            # Zipping
            (reg_node, zip_reg_node, [('coregistered_files', 'in_file')]),
            (norm_node, zip_norm_node, [('normalized_files', 'in_file')]),
            (smooth_node, zip_smooth_node, [('smoothed_files', 'in_file')]),
            # Returning output
            (zip_bet_node, self.output_node, [('out_file', 't1_brain_mask')]),
            (mc_node, self.output_node, [('realignment_parameters',
                                          'mc_params')]),
            (zip_mc_node, self.output_node, [('out_file', 'native_fmri')]),
            (zip_reg_node, self.output_node, [('out_file', 't1_fmri')]),
            (zip_norm_node, self.output_node, [('out_file', 'mni_fmri')]),
            (zip_smooth_node, self.output_node, [('out_file',
                                                  'mni_smoothed_fmri')]),
        ])
コード例 #10
0
####EPI preprocessing####

#Convert EPI dicoms to nii (with embeded metadata)
epi_stack = Node(dcmstack.DcmStack(), name='epistack')
epi_stack.inputs.embed_meta = True
epi_stack.inputs.out_format = 'epi'
epi_stack.inputs.out_ext = '.nii'
#Outputs: out_file

#Despiking using afni (position based on Jo et al. (2013)).
despike = Node(afni.Despike(), name='despike')
despike.inputs.outputtype = 'NIFTI'
#Outputs: out_file

#Slice timing corrected (gets timing from header)
st_corr = Node(spm.SliceTiming(), name='slicetiming_correction')
st_corr.inputs.ref_slice = 1
#Outputs: timecorrected_files

#Realignment using SPM <--- Maybe just estimate and apply all transforms at the end?
realign = Node(spm.Realign(), name='realign')
realign.inputs.register_to_mean = False
realign.inputs.quality = 1.0
#Outputs: realignment_parameters, reliced epi images (motion corrected)

tsnr = Node(misc.TSNR(), name='tsnr')
tsnr.inputs.regress_poly = 2
#Outputs: detrended_file, mean_file, stddev_file, tsnr_file

smooth = Node(spm.Smooth(), name='smooth')
smooth.inputs.fwhm = fwhm
コード例 #11
0
 def __init__(self, **template_dict):
     self.node = pe.Node(interface=spm.SliceTiming(), name='slicetiming')
     self.node.inputs.paths = template_dict['spm_path']
コード例 #12
0
    return list(odd) + list(even)


order_func = ascending_interleaved

# Drop dummy volumes
img = nib.load(base_fname)
dropped_img = nib.Nifti1Image(img.get_data()[..., n_dummies:], img.affine,
                              img.header)
nib.save(dropped_img, 'f' + base_fname)

# Slice time correction
num_slices = img.shape[2]
time_for_one_slice = TR / num_slices
TA = TR - time_for_one_slice
st = spm.SliceTiming()
st.inputs.in_files = 'f' + base_fname
st.inputs.num_slices = num_slices
st.inputs.time_repetition = TR
st.inputs.time_acquisition = TA
st.inputs.slice_order = order_func(num_slices)
st.inputs.ref_slice = ref_slice
st.run()

# Realign
realign = spm.Realign()
realign.inputs.in_files = 'af' + base_fname
# Do not write resliced files, do write mean image
realign.inputs.write_which = write_which
realign.run()
コード例 #13
0
def create_preproc_func_pipeline(
):  #ref_slice, n_skip=4, n_slices=30, tr=2.5, sparse=False):

    #    if sparse:
    #        real_tr = tr/2
    #    else:
    #        real_tr = tr

    inputnode = pe.Node(interface=util.IdentityInterface(
        fields=['func', "struct", "TR", "sparse"]),
                        name="inputnode")

    skip = pe.Node(interface=fsl.ExtractROI(), name="skip")
    skip.inputs.t_min = 4  #TODO
    skip.inputs.t_size = 100000

    realign = pe.Node(interface=spm.Realign(), name="realign")
    realign.inputs.register_to_mean = True

    tr_convert = pe.Node(interface=util.Function(input_names=['tr', 'sparse'],
                                                 output_names=['tr'],
                                                 function=get_tr),
                         name="tr_converter")
    ta = pe.Node(interface=util.Function(input_names=['real_tr', 'n_slices'],
                                         output_names=['ta'],
                                         function=get_ta),
                 name="ta")

    slice_timing = pe.Node(interface=spm.SliceTiming(), name="slice_timing")
    #slice_timing.inputs.num_slices = n_slices
    #slice_timing.inputs.time_repetition = real_tr
    #slice_timing.inputs.time_acquisition = real_tr - real_tr/float(n_slices)
    #slice_timing.inputs.slice_order = range(1,n_slices+1,2) + range(2,n_slices+1,2)
    #slice_timing.inputs.ref_slice = ref_slice

    coregister = pe.Node(interface=spm.Coregister(), name="coregister")
    coregister.inputs.jobtype = "estimate"

    smooth = pe.Node(interface=spm.Smooth(), name="smooth")
    smooth.iterables = ('fwhm', [[8, 8, 8], [0, 0, 0]])

    art = pe.Node(interface=ra.ArtifactDetect(), name="art")
    art.inputs.use_differences = [True, False]
    art.inputs.use_norm = True
    art.inputs.norm_threshold = 1
    art.inputs.zintensity_threshold = 3
    art.inputs.mask_type = 'file'
    art.inputs.parameter_source = 'SPM'

    compute_mask = pe.Node(interface=ComputeMask(), name="compute_mask")

    plot_realign = pe.Node(interface=neuroutils.PlotRealignemntParameters(),
                           name="plot_realign")

    preproc_func = pe.Workflow(name="preproc_func")
    preproc_func.connect([
        (inputnode, skip, [("func", "in_file")]),
        (inputnode, coregister, [("struct", "target")]),
        (realign, coregister, [('mean_image', 'source'),
                               ('realigned_files', 'apply_to_files')]),
        (coregister, compute_mask, [('coregistered_source', 'mean_volume')]),
        (skip, slice_timing, [("roi_file", "in_files"),
                              (('roi_file', get_n_slices), "num_slices"),
                              (('roi_file', get_slice_order), "slice_order"),
                              (('roi_file', get_ref_slice), "ref_slice")]),
        (inputnode, tr_convert, [("sparse", "sparse"), ("TR", "tr")]),
        (tr_convert, slice_timing, [("tr", "time_repetition")]),
        (tr_convert, ta, [("tr", "real_tr")]),
        (skip, ta, [(('roi_file', get_n_slices), "n_slices")]),
        (ta, slice_timing, [("ta", "time_acquisition")]),
        (slice_timing, realign, [("timecorrected_files", "in_files")]),
        (coregister, smooth, [("coregistered_files", "in_files")]),
        (compute_mask, art, [('brain_mask', 'mask_file')]),
        (realign, art, [('realignment_parameters', 'realignment_parameters')]),
        (realign, art, [('realigned_files', 'realigned_files')]),
        (realign, plot_realign, [('realignment_parameters',
                                  'realignment_parameters')])
    ])

    return preproc_func
コード例 #14
0
ファイル: test_spm.py プロジェクト: tyarkoni/nipype
from __future__ import division
from builtins import range
import nipype.pipeline.engine as pe
from nipype.interfaces import spm
from nipype.interfaces import fsl
from nipype.algorithms.misc import Gunzip
import os

in_file = "feeds/data/fmri.nii.gz"

split = pe.Node(fsl.Split(dimension="t", output_type="NIFTI"), name="split")
split.inputs.in_file = os.path.abspath(in_file)

stc = pe.Node(interface=spm.SliceTiming(), name='stc')
stc.inputs.num_slices = 21
stc.inputs.time_repetition = 1.0
stc.inputs.time_acquisition = 2. - 2. / 32
stc.inputs.slice_order = list(range(21, 0, -1))
stc.inputs.ref_slice = 10

realign_estimate = pe.Node(interface=spm.Realign(), name='realign_estimate')
realign_estimate.inputs.jobtype = "estimate"

realign_write = pe.Node(interface=spm.Realign(), name='realign_write')
realign_write.inputs.jobtype = "write"

realign_estwrite = pe.Node(interface=spm.Realign(), name='realign_estwrite')
realign_estwrite.inputs.jobtype = "estwrite"
realign_estwrite.inputs.register_to_mean = True

smooth = pe.Node(interface=spm.Smooth(), name='smooth')
コード例 #15
0
    def __init__(self,
                 subject,
                 func_source,
                 struct_source,
                 datasink,
                 TR,
                 num_slices,
                 dim=2):
        self.subject = subject
        # specify input and output nodes
        self.func_source = func_source
        self.struct_source = struct_source
        self.datasink = datasink

        self.TR = TR
        self.num_slices = num_slices

        # specify nodes
        # structual process
        self.bet_struct = pe.Node(interface=fsl.BET(),
                                  name='non_brain_removal_BET_struct')
        self.bet_struct.inputs.output_type = "NIFTI"
        self.bet_struct.inputs.frac = 0.3

        self.coregister_struct = pe.Node(interface=spm.Coregister(),
                                         name="coregister_struct_to_mni")
        self.coregister_struct.inputs.target = '/mnt/Program/python/dev/images/MNI152_T1_2mm_brain.nii'
        if dim == 1:
            self.coregister_struct.inputs.target = '/mnt/Program/python/dev/images/MNI152_T1_1mm_brain.nii'
        self.coregister_struct.inputs.write_interp = 7
        self.coregister_struct.inputs.separation = [1.0, 1.0]
        self.coregister_struct.inputs.jobtype = 'estwrite'

        self.segment_struct = pe.Node(interface=spm.Segment(),
                                      name="segment_struct")
        # self.segment_struct.inputs.affine_regularization = 'mni'
        self.segment_struct.inputs.csf_output_type = [True, True, True]
        self.segment_struct.inputs.gm_output_type = [True, True, True]
        self.segment_struct.inputs.wm_output_type = [True, True, True]

        self.normalize_struct = pe.Node(interface=spm.Normalize(),
                                        name='normalize_struct')
        self.normalize_struct.inputs.jobtype = 'write'
        self.normalize_struct.inputs.write_bounding_box = [[-90, -126, -72],
                                                           [90, 90, 108]
                                                           ]  # 91, 109, 91
        if dim == 1:
            self.normalize_struct.inputs.write_bounding_box = [[
                -91, -126, -72
            ], [90, 91, 109]]  # 182, 218, 182
            self.normalize_struct.inputs.write_voxel_sizes = [1, 1, 1]

        self.normalize_gm = pe.Node(interface=spm.Normalize(),
                                    name='normalize_gm')
        self.normalize_gm.inputs.jobtype = 'write'
        self.normalize_gm.inputs.write_bounding_box = [[-90, -126, -72],
                                                       [90, 90,
                                                        108]]  # 91, 109, 91
        if dim == 1:
            self.normalize_gm.inputs.write_bounding_box = [[-91, -126, -72],
                                                           [90, 91, 109]
                                                           ]  # 182, 218, 182
            self.normalize_gm.inputs.write_voxel_sizes = [1, 1, 1]

        self.normalize_wm = pe.Node(interface=spm.Normalize(),
                                    name='normalize_wm')
        self.normalize_wm.inputs.jobtype = 'write'
        self.normalize_wm.inputs.write_bounding_box = [[-90, -126, -72],
                                                       [90, 90,
                                                        108]]  # 91, 109, 91
        if dim == 1:
            self.normalize_wm.inputs.write_bounding_box = [[-91, -126, -72],
                                                           [90, 91, 109]
                                                           ]  # 182, 218, 182
            self.normalize_wm.inputs.write_voxel_sizes = [1, 1, 1]

        self.normalize_csf = pe.Node(interface=spm.Normalize(),
                                     name='normalize_csf')
        self.normalize_csf.inputs.jobtype = 'write'
        self.normalize_csf.inputs.write_bounding_box = [[-90, -126, -72],
                                                        [90, 90,
                                                         108]]  # 91, 109, 91
        if dim == 1:
            self.normalize_csf.inputs.write_bounding_box = [[-91, -126, -72],
                                                            [90, 91, 109]
                                                            ]  # 182, 218, 182
            self.normalize_csf.inputs.write_voxel_sizes = [1, 1, 1]

        ###################################################################################################

        # functional process
        self.fslsplit = pe.Node(interface=fsl.Split(), name='fslsplit')
        self.fslsplit.inputs.dimension = 't'
        self.fslsplit.inputs.output_type = "NIFTI"

        self.fslmerge = pe.Node(interface=fsl.Merge(), name='fslmerge')
        self.fslmerge.inputs.dimension = 't'
        self.fslmerge.inputs.output_type = "NIFTI"

        # helper function(s)
        def bet_each(in_files, subject_name):
            '''
            @param in_files: list of image files
            @return out_files: list of image files after applied fsl.BET on it
            '''
            from nipype.interfaces import fsl
            import nipype.pipeline.engine as pe

            out_files = list()
            step_no = 0
            for file_ in in_files:
                bet = pe.Node(interface=fsl.BET(),
                              name='BET_for_step_{}_{}'.format(
                                  step_no, subject_name))
                bet.inputs.in_file = file_
                bet.inputs.out_file = file_[:len(file_) - 4] + '_bet.nii'
                bet.inputs.output_type = "NIFTI"
                bet.inputs.frac = 0.5

                bet.run()
                out_files.append(bet.inputs.out_file)

                step_no += 1
            return out_files

        # bet_func return a list of NIFITI files
        self.bet_func = pe.Node(interface=Function(
            input_names=['in_files', 'subject_name'],
            output_names=['out_files'],
            function=bet_each),
                                name='non_brain_removal_BET_func')
        self.bet_func.inputs.subject_name = self.subject

        self.realign = pe.Node(interface=spm.Realign(),
                               name='realign_motion_correction')
        self.realign.inputs.register_to_mean = True

        self.coregister_func = pe.Node(interface=spm.Coregister(),
                                       name="coregister_func_to_mni")
        self.coregister_func.inputs.target = '/mnt/Program/python/dev/images/MNI152_T1_2mm_brain.nii'
        self.coregister_func.inputs.write_interp = 7
        self.coregister_func.inputs.separation = [1.0, 1.0]
        self.coregister_func.inputs.jobtype = 'estwrite'

        self.segment = pe.Node(interface=spm.Segment(), name="segment")

        self.normalize_func = pe.Node(interface=spm.Normalize(),
                                      name="normalize_func")
        self.normalize_func.inputs.jobtype = 'write'
        self.normalize_func.inputs.write_bounding_box = [[-90, -126, -72],
                                                         [90, 90,
                                                          108]]  # 91, 109, 91

        self.smooth = pe.Node(interface=spm.Smooth(), name="smooth")
        self.smooth.inputs.fwhm = [8, 8, 8]

        # backup node(s)
        self.slice_timing = pe.Node(interface=spm.SliceTiming(),
                                    name='time_slice_correction')
        self.slice_timing.inputs.time_repetition = self.TR
        self.slice_timing.inputs.num_slices = self.num_slices
        self.slice_timing.inputs.time_acquisition = self.TR - (self.TR /
                                                               self.num_slices)
        self.slice_timing.inputs.slice_order = list(
            range(self.num_slices, 0, -1))
        self.slice_timing.inputs.ref_slice = 1

        self.direct_normalize = pe.Node(interface=spm.Normalize12(),
                                        name='direct_normalize')
        self.direct_normalize.inputs.image_to_align = 'images/MNI152_T1_2mm_brain.nii'
        self.direct_normalize.inputs.affine_regularization_type = 'size'

        # specify workflow instance
        self.workflow = pe.Workflow(name='preprocess_workflow')

        # connect nodes
        self.workflow.connect([
            (self.struct_source, self.bet_struct, [('outfiles', 'in_file')]),
            (self.bet_struct, self.coregister_struct, [('out_file', 'source')
                                                       ]),
            (self.coregister_struct, self.segment_struct,
             [('coregistered_source', 'data')]),
            (self.segment_struct, self.normalize_struct,
             [('transformation_mat', 'parameter_file')]),
            (self.coregister_struct, self.normalize_struct,
             [('coregistered_source', 'apply_to_files')]),
            (self.segment_struct, self.normalize_gm, [('transformation_mat',
                                                       'parameter_file')]),
            (self.segment_struct, self.normalize_gm, [('native_gm_image',
                                                       'apply_to_files')]),
            (self.segment_struct, self.normalize_wm, [('transformation_mat',
                                                       'parameter_file')]),
            (self.segment_struct, self.normalize_wm, [('native_wm_image',
                                                       'apply_to_files')]),
            (self.segment_struct, self.normalize_csf, [('transformation_mat',
                                                        'parameter_file')]),
            (self.segment_struct, self.normalize_csf, [('native_csf_image',
                                                        'apply_to_files')]),
            (self.func_source, self.fslsplit, [('outfiles', 'in_file')]),
            (self.fslsplit, self.bet_func, [('out_files', 'in_files')]),
            (self.bet_func, self.fslmerge, [('out_files', 'in_files')]),
            (self.fslmerge, self.realign, [('merged_file', 'in_files')]),
            (self.realign, self.datasink, [('realignment_parameters',
                                            'realignment_parameters')]),
            (self.realign, self.coregister_func, [('mean_image', 'source')]),
            (self.realign, self.coregister_func, [('realigned_files',
                                                   'apply_to_files')]),
            (self.coregister_func, self.segment, [('coregistered_source',
                                                   'data')]),
            (self.segment, self.normalize_func, [('transformation_mat',
                                                  'parameter_file')]),
            (self.coregister_func, self.normalize_func, [('coregistered_files',
                                                          'apply_to_files')]),
            (self.normalize_func, self.smooth, [('normalized_files',
                                                 'in_files')]),  # end
            (self.normalize_func, self.datasink, [('normalized_files',
                                                   'before_smooth')]),
            (self.smooth, self.datasink, [('smoothed_files', 'final_out')]),
            (self.normalize_struct, self.datasink,
             [('normalized_files', 'standardized_struct_file')]),
            # (self.segment_struct, self.datasink, [('native_csf_image', 'csf'), ('native_gm_image', 'grey_matter'), ('native_wm_image', 'white_matter')])
            (self.normalize_gm, self.datasink, [('normalized_files',
                                                 'grey_matter')]),
            (self.normalize_wm, self.datasink, [('normalized_files',
                                                 'white_matter')]),
            (self.normalize_csf, self.datasink, [('normalized_files', 'csf')]),
        ])

        # backup workflow(s)
        self.workflow_only_fmri = pe.Workflow(name='preprocess_workflow')

        # connect nodes
        self.workflow_only_fmri.connect([
            (self.func_source, self.fslsplit, [('outfiles', 'in_file')]),
            (self.fslsplit, self.bet_func, [('out_files', 'in_files')]),
            (self.bet_func, self.fslmerge, [('out_files', 'in_files')]),
            (self.fslmerge, self.realign, [('merged_file', 'in_files')]),
            (self.realign, self.direct_normalize, [('realigned_files',
                                                    'apply_to_files')]),
            (self.direct_normalize, self.smooth, [('normalized_files',
                                                   'in_files')]),  # end
            (self.direct_normalize, self.datasink, [('normalized_files',
                                                     'before_smooth')]),
            (self.smooth, self.datasink, [('smoothed_files', 'final_out')])
        ])
コード例 #16
0
def create_workflow(files,
                    target_file,
                    subject_id,
                    TR,
                    slice_times,
                    norm_threshold=1,
                    num_components=5,
                    vol_fwhm=None,
                    surf_fwhm=None,
                    lowpass_freq=-1,
                    highpass_freq=-1,
                    subjects_dir=None,
                    sink_directory=os.getcwd(),
                    target_subject=['fsaverage3', 'fsaverage4'],
                    name='resting'):

    wf = Workflow(name=name)

    # Rename files in case they are named identically
    name_unique = MapNode(Rename(format_string='rest_%(run)02d'),
                          iterfield=['in_file', 'run'],
                          name='rename')
    name_unique.inputs.keep_ext = True
    name_unique.inputs.run = list(range(1, len(files) + 1))
    name_unique.inputs.in_file = files

    realign = Node(interface=spm.Realign(), name="realign")
    realign.inputs.jobtype = 'estwrite'

    num_slices = len(slice_times)
    slice_timing = Node(interface=spm.SliceTiming(), name="slice_timing")
    slice_timing.inputs.num_slices = num_slices
    slice_timing.inputs.time_repetition = TR
    slice_timing.inputs.time_acquisition = TR - TR / float(num_slices)
    slice_timing.inputs.slice_order = (np.argsort(slice_times) + 1).tolist()
    slice_timing.inputs.ref_slice = int(num_slices / 2)

    # Comute TSNR on realigned data regressing polynomials upto order 2
    tsnr = MapNode(TSNR(regress_poly=2), iterfield=['in_file'], name='tsnr')
    wf.connect(slice_timing, 'timecorrected_files', tsnr, 'in_file')

    # Compute the median image across runs
    calc_median = Node(Function(input_names=['in_files'],
                                output_names=['median_file'],
                                function=median,
                                imports=imports),
                       name='median')
    wf.connect(tsnr, 'detrended_file', calc_median, 'in_files')
    """Segment and Register
    """

    registration = create_reg_workflow(name='registration')
    wf.connect(calc_median, 'median_file', registration,
               'inputspec.mean_image')
    registration.inputs.inputspec.subject_id = subject_id
    registration.inputs.inputspec.subjects_dir = subjects_dir
    registration.inputs.inputspec.target_image = target_file
    """Use :class:`nipype.algorithms.rapidart` to determine which of the
    images in the functional series are outliers based on deviations in
    intensity or movement.
    """

    art = Node(interface=ArtifactDetect(), name="art")
    art.inputs.use_differences = [True, True]
    art.inputs.use_norm = True
    art.inputs.norm_threshold = norm_threshold
    art.inputs.zintensity_threshold = 9
    art.inputs.mask_type = 'spm_global'
    art.inputs.parameter_source = 'SPM'
    """Here we are connecting all the nodes together. Notice that we add the merge node only if you choose
    to use 4D. Also `get_vox_dims` function is passed along the input volume of normalise to set the optimal
    voxel sizes.
    """

    wf.connect([
        (name_unique, realign, [('out_file', 'in_files')]),
        (realign, slice_timing, [('realigned_files', 'in_files')]),
        (slice_timing, art, [('timecorrected_files', 'realigned_files')]),
        (realign, art, [('realignment_parameters', 'realignment_parameters')]),
    ])

    def selectindex(files, idx):
        import numpy as np
        from nipype.utils.filemanip import filename_to_list, list_to_filename
        return list_to_filename(
            np.array(filename_to_list(files))[idx].tolist())

    mask = Node(fsl.BET(), name='getmask')
    mask.inputs.mask = True
    wf.connect(calc_median, 'median_file', mask, 'in_file')

    # get segmentation in normalized functional space

    def merge_files(in1, in2):
        out_files = filename_to_list(in1)
        out_files.extend(filename_to_list(in2))
        return out_files

    # filter some noise

    # Compute motion regressors
    motreg = Node(Function(
        input_names=['motion_params', 'order', 'derivatives'],
        output_names=['out_files'],
        function=motion_regressors,
        imports=imports),
                  name='getmotionregress')
    wf.connect(realign, 'realignment_parameters', motreg, 'motion_params')

    # Create a filter to remove motion and art confounds
    createfilter1 = Node(Function(
        input_names=['motion_params', 'comp_norm', 'outliers', 'detrend_poly'],
        output_names=['out_files'],
        function=build_filter1,
        imports=imports),
                         name='makemotionbasedfilter')
    createfilter1.inputs.detrend_poly = 2
    wf.connect(motreg, 'out_files', createfilter1, 'motion_params')
    wf.connect(art, 'norm_files', createfilter1, 'comp_norm')
    wf.connect(art, 'outlier_files', createfilter1, 'outliers')

    filter1 = MapNode(fsl.GLM(out_f_name='F_mcart.nii',
                              out_pf_name='pF_mcart.nii',
                              demean=True),
                      iterfield=['in_file', 'design', 'out_res_name'],
                      name='filtermotion')

    wf.connect(slice_timing, 'timecorrected_files', filter1, 'in_file')
    wf.connect(slice_timing, ('timecorrected_files', rename, '_filtermotart'),
               filter1, 'out_res_name')
    wf.connect(createfilter1, 'out_files', filter1, 'design')

    createfilter2 = MapNode(Function(input_names=[
        'realigned_file', 'mask_file', 'num_components', 'extra_regressors'
    ],
                                     output_names=['out_files'],
                                     function=extract_noise_components,
                                     imports=imports),
                            iterfield=['realigned_file', 'extra_regressors'],
                            name='makecompcorrfilter')
    createfilter2.inputs.num_components = num_components

    wf.connect(createfilter1, 'out_files', createfilter2, 'extra_regressors')
    wf.connect(filter1, 'out_res', createfilter2, 'realigned_file')
    wf.connect(registration,
               ('outputspec.segmentation_files', selectindex, [0, 2]),
               createfilter2, 'mask_file')

    filter2 = MapNode(fsl.GLM(out_f_name='F.nii',
                              out_pf_name='pF.nii',
                              demean=True),
                      iterfield=['in_file', 'design', 'out_res_name'],
                      name='filter_noise_nosmooth')
    wf.connect(filter1, 'out_res', filter2, 'in_file')
    wf.connect(filter1, ('out_res', rename, '_cleaned'), filter2,
               'out_res_name')
    wf.connect(createfilter2, 'out_files', filter2, 'design')
    wf.connect(mask, 'mask_file', filter2, 'mask')

    bandpass = Node(Function(
        input_names=['files', 'lowpass_freq', 'highpass_freq', 'fs'],
        output_names=['out_files'],
        function=bandpass_filter,
        imports=imports),
                    name='bandpass_unsmooth')
    bandpass.inputs.fs = 1. / TR
    bandpass.inputs.highpass_freq = highpass_freq
    bandpass.inputs.lowpass_freq = lowpass_freq
    wf.connect(filter2, 'out_res', bandpass, 'files')
    """Smooth the functional data using
    :class:`nipype.interfaces.spm.Smooth`.
    """

    smooth = Node(interface=spm.Smooth(), name="smooth")
    smooth.inputs.fwhm = vol_fwhm

    wf.connect(bandpass, 'out_files', smooth, 'in_files')

    collector = Node(Merge(2), name='collect_streams')
    wf.connect(smooth, 'smoothed_files', collector, 'in1')
    wf.connect(bandpass, 'out_files', collector, 'in2')
    """
    Transform the remaining images. First to anatomical and then to target
    """

    warpall = MapNode(ants.ApplyTransforms(),
                      iterfield=['input_image'],
                      name='warpall')
    warpall.inputs.input_image_type = 3
    warpall.inputs.interpolation = 'Linear'
    warpall.inputs.invert_transform_flags = [False, False]
    warpall.inputs.terminal_output = 'file'
    warpall.inputs.reference_image = target_file
    warpall.inputs.args = '--float'
    warpall.inputs.num_threads = 1

    # transform to target
    wf.connect(collector, 'out', warpall, 'input_image')
    wf.connect(registration, 'outputspec.transforms', warpall, 'transforms')

    mask_target = Node(fsl.ImageMaths(op_string='-bin'), name='target_mask')

    wf.connect(registration, 'outputspec.anat2target', mask_target, 'in_file')

    maskts = MapNode(fsl.ApplyMask(), iterfield=['in_file'], name='ts_masker')
    wf.connect(warpall, 'output_image', maskts, 'in_file')
    wf.connect(mask_target, 'out_file', maskts, 'mask_file')

    # map to surface
    # extract aparc+aseg ROIs
    # extract subcortical ROIs
    # extract target space ROIs
    # combine subcortical and cortical rois into a single cifti file

    #######
    # Convert aparc to subject functional space

    # Sample the average time series in aparc ROIs
    sampleaparc = MapNode(
        freesurfer.SegStats(default_color_table=True),
        iterfield=['in_file', 'summary_file', 'avgwf_txt_file'],
        name='aparc_ts')
    sampleaparc.inputs.segment_id = ([8] + list(range(10, 14)) +
                                     [17, 18, 26, 47] + list(range(49, 55)) +
                                     [58] + list(range(1001, 1036)) +
                                     list(range(2001, 2036)))

    wf.connect(registration, 'outputspec.aparc', sampleaparc,
               'segmentation_file')
    wf.connect(collector, 'out', sampleaparc, 'in_file')

    def get_names(files, suffix):
        """Generate appropriate names for output files
        """
        from nipype.utils.filemanip import (split_filename, filename_to_list,
                                            list_to_filename)
        out_names = []
        for filename in files:
            _, name, _ = split_filename(filename)
            out_names.append(name + suffix)
        return list_to_filename(out_names)

    wf.connect(collector, ('out', get_names, '_avgwf.txt'), sampleaparc,
               'avgwf_txt_file')
    wf.connect(collector, ('out', get_names, '_summary.stats'), sampleaparc,
               'summary_file')

    # Sample the time series onto the surface of the target surface. Performs
    # sampling into left and right hemisphere
    target = Node(IdentityInterface(fields=['target_subject']), name='target')
    target.iterables = ('target_subject', filename_to_list(target_subject))

    samplerlh = MapNode(freesurfer.SampleToSurface(),
                        iterfield=['source_file'],
                        name='sampler_lh')
    samplerlh.inputs.sampling_method = "average"
    samplerlh.inputs.sampling_range = (0.1, 0.9, 0.1)
    samplerlh.inputs.sampling_units = "frac"
    samplerlh.inputs.interp_method = "trilinear"
    samplerlh.inputs.smooth_surf = surf_fwhm
    # samplerlh.inputs.cortex_mask = True
    samplerlh.inputs.out_type = 'niigz'
    samplerlh.inputs.subjects_dir = subjects_dir

    samplerrh = samplerlh.clone('sampler_rh')

    samplerlh.inputs.hemi = 'lh'
    wf.connect(collector, 'out', samplerlh, 'source_file')
    wf.connect(registration, 'outputspec.out_reg_file', samplerlh, 'reg_file')
    wf.connect(target, 'target_subject', samplerlh, 'target_subject')

    samplerrh.set_input('hemi', 'rh')
    wf.connect(collector, 'out', samplerrh, 'source_file')
    wf.connect(registration, 'outputspec.out_reg_file', samplerrh, 'reg_file')
    wf.connect(target, 'target_subject', samplerrh, 'target_subject')

    # Combine left and right hemisphere to text file
    combiner = MapNode(Function(input_names=['left', 'right'],
                                output_names=['out_file'],
                                function=combine_hemi,
                                imports=imports),
                       iterfield=['left', 'right'],
                       name="combiner")
    wf.connect(samplerlh, 'out_file', combiner, 'left')
    wf.connect(samplerrh, 'out_file', combiner, 'right')

    # Sample the time series file for each subcortical roi
    ts2txt = MapNode(Function(
        input_names=['timeseries_file', 'label_file', 'indices'],
        output_names=['out_file'],
        function=extract_subrois,
        imports=imports),
                     iterfield=['timeseries_file'],
                     name='getsubcortts')
    ts2txt.inputs.indices = [8] + list(range(10, 14)) + [17, 18, 26, 47] +\
        list(range(49, 55)) + [58]
    ts2txt.inputs.label_file = \
        os.path.abspath(('OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152_'
                         '2mm_v2.nii.gz'))
    wf.connect(maskts, 'out_file', ts2txt, 'timeseries_file')

    ######

    substitutions = [('_target_subject_', ''),
                     ('_filtermotart_cleaned_bp_trans_masked', ''),
                     ('_filtermotart_cleaned_bp', '')]
    regex_subs = [
        ('_ts_masker.*/sar', '/smooth/'),
        ('_ts_masker.*/ar', '/unsmooth/'),
        ('_combiner.*/sar', '/smooth/'),
        ('_combiner.*/ar', '/unsmooth/'),
        ('_aparc_ts.*/sar', '/smooth/'),
        ('_aparc_ts.*/ar', '/unsmooth/'),
        ('_getsubcortts.*/sar', '/smooth/'),
        ('_getsubcortts.*/ar', '/unsmooth/'),
        ('series/sar', 'series/smooth/'),
        ('series/ar', 'series/unsmooth/'),
        ('_inverse_transform./', ''),
    ]
    # Save the relevant data into an output directory
    datasink = Node(interface=DataSink(), name="datasink")
    datasink.inputs.base_directory = sink_directory
    datasink.inputs.container = subject_id
    datasink.inputs.substitutions = substitutions
    datasink.inputs.regexp_substitutions = regex_subs  # (r'(/_.*(\d+/))', r'/run\2')
    wf.connect(realign, 'realignment_parameters', datasink,
               'resting.qa.motion')
    wf.connect(art, 'norm_files', datasink, 'resting.qa.art.@norm')
    wf.connect(art, 'intensity_files', datasink, 'resting.qa.art.@intensity')
    wf.connect(art, 'outlier_files', datasink, 'resting.qa.art.@outlier_files')
    wf.connect(registration, 'outputspec.segmentation_files', datasink,
               'resting.mask_files')
    wf.connect(registration, 'outputspec.anat2target', datasink,
               'resting.qa.ants')
    wf.connect(mask, 'mask_file', datasink, 'resting.mask_files.@brainmask')
    wf.connect(mask_target, 'out_file', datasink, 'resting.mask_files.target')
    wf.connect(filter1, 'out_f', datasink, 'resting.qa.compmaps.@mc_F')
    wf.connect(filter1, 'out_pf', datasink, 'resting.qa.compmaps.@mc_pF')
    wf.connect(filter2, 'out_f', datasink, 'resting.qa.compmaps')
    wf.connect(filter2, 'out_pf', datasink, 'resting.qa.compmaps.@p')
    wf.connect(bandpass, 'out_files', datasink,
               'resting.timeseries.@bandpassed')
    wf.connect(smooth, 'smoothed_files', datasink,
               'resting.timeseries.@smoothed')
    wf.connect(createfilter1, 'out_files', datasink,
               'resting.regress.@regressors')
    wf.connect(createfilter2, 'out_files', datasink,
               'resting.regress.@compcorr')
    wf.connect(maskts, 'out_file', datasink, 'resting.timeseries.target')
    wf.connect(sampleaparc, 'summary_file', datasink,
               'resting.parcellations.aparc')
    wf.connect(sampleaparc, 'avgwf_txt_file', datasink,
               'resting.parcellations.aparc.@avgwf')
    wf.connect(ts2txt, 'out_file', datasink,
               'resting.parcellations.grayo.@subcortical')

    datasink2 = Node(interface=DataSink(), name="datasink2")
    datasink2.inputs.base_directory = sink_directory
    datasink2.inputs.container = subject_id
    datasink2.inputs.substitutions = substitutions
    datasink2.inputs.regexp_substitutions = regex_subs  # (r'(/_.*(\d+/))', r'/run\2')
    wf.connect(combiner, 'out_file', datasink2,
               'resting.parcellations.grayo.@surface')
    return wf
コード例 #17
0
def mod_realign(node, in_file, tr, do_slicetime, sliceorder, parameters={}):
    import nipype.interfaces.fsl as fsl
    import nipype.interfaces.spm as spm
    import nipype.interfaces.nipy as nipy
    import os
    parameter_source = "FSL"
    keys = parameters.keys()
    if node == "nipy":
        realign = nipy.FmriRealign4d()
        realign.inputs.in_file = in_file
        realign.inputs.tr = tr
        if "loops" in keys:
            realign.inputs.loops = parameters["loops"]
        if "speedup" in keys:
            realign.inputs.speedup = parameters["speedup"]
        if "between_loops" in keys:
            realign.inputs.between_loops = parameters["between_loops"]
        if do_slicetime:
            realign.inputs.slice_order = sliceorder
            realign.inputs.time_interp = True

        res = realign.run()
        out_file = res.outputs.out_file
        par_file = res.outputs.par_file

    elif node == "fsl":
        if not isinstance(in_file, list):
            in_file = [in_file]
        out_file = []
        par_file = []
        # get the first volume of first run as ref file
        if not do_slicetime:
            extract = fsl.ExtractROI()
            extract.inputs.t_min = 0
            extract.inputs.t_size = 1
            extract.inputs.in_file = in_file[0]
            ref_vol = extract.run().outputs.roi_file

        for idx, file in enumerate(in_file):
            if do_slicetime:
                slicetime = fsl.SliceTimer()
                slicetime.inputs.in_file = file
                sliceorder_file = os.path.abspath('FSL_custom_order.txt')
                with open(sliceorder_file, 'w') as custom_order_fp:
                    for t in sliceorder:
                        custom_order_fp.write('%d\n' % (t + 1))
                slicetime.inputs.custom_order = sliceorder_file
                slicetime.inputs.time_repetition = tr
                res = slicetime.run()
                file_to_realign = res.outputs.slice_time_corrected_file
                if not idx:
                    extract = fsl.ExtractROI()
                    extract.inputs.t_min = 0
                    extract.inputs.t_size = 1
                    extract.inputs.in_file = file_to_realign
                    ref_vol = extract.run().outputs.roi_file
            else:
                file_to_realign = file
            realign = fsl.MCFLIRT(interpolation='spline', ref_file=ref_vol)
            realign.inputs.save_plots = True
            realign.inputs.mean_vol = True
            realign.inputs.in_file = file_to_realign
            realign.inputs.out_file = 'fsl_corr_' + \
                                      os.path.split(file_to_realign)[1]
            Realign_res = realign.run()
            out_file.append(Realign_res.outputs.out_file)
            par_file.append(Realign_res.outputs.par_file)

    elif node == 'spm':
        import numpy as np
        import nibabel as nib
        import nipype.interfaces.freesurfer as fs
        if not isinstance(in_file, list):
            in_file = [in_file]
        new_in_file = []
        for f in in_file:
            if f.endswith('.nii.gz'):
                convert = fs.MRIConvert()
                convert.inputs.in_file = f
                convert.inputs.out_type = 'nii'
                convert.inputs.in_type = 'niigz'
                f = convert.run().outputs.out_file
                new_in_file.append(f)
            else:
                new_in_file.append(f)
        if do_slicetime:
            img = nib.load(new_in_file[0])
            num_slices = img.shape[2]
            st = spm.SliceTiming()
            st.inputs.in_files = new_in_file
            st.inputs.num_slices = num_slices
            st.inputs.time_repetition = tr
            st.inputs.time_acquisition = tr - tr / num_slices
            st.inputs.slice_order = (np.asarray(sliceorder) +
                                     1).astype(int).tolist()
            st.inputs.ref_slice = 1
            res_st = st.run()
            file_to_realign = res_st.outputs.timecorrected_files
        else:
            file_to_realign = new_in_file
        par_file = []
        realign = spm.Realign()
        realign.inputs.in_files = file_to_realign
        #realign.inputs.out_prefix = 'spm_corr_'
        res = realign.run()
        parameters = res.outputs.realignment_parameters
        if not isinstance(parameters, list):
            parameters = [parameters]
        par_file = parameters
        parameter_source = 'SPM'
        fsl.ImageMaths(in_file=res.outputs.realigned_files,
                       out_file=res.outputs.realigned_files,
                       op_string='-nan').run()
        out_file = res.outputs.realigned_files
    elif node == 'afni':
        import nipype.interfaces.afni as afni
        import nibabel as nib
        import numpy as np
        if not isinstance(in_file, list):
            in_file = [in_file]
        img = nib.load(in_file[0])
        Nz = img.shape[2]
        out_file = []
        par_file = []
        # get the first volume of first run as ref file
        if not do_slicetime:
            extract = fsl.ExtractROI()
            extract.inputs.t_min = 0
            extract.inputs.t_size = 1
            extract.inputs.in_file = in_file[0]
            ref_vol = extract.run().outputs.roi_file

        for idx, file in enumerate(in_file):
            if do_slicetime:
                slicetime = afni.TShift()
                slicetime.inputs.in_file = file
                custom_order = open(
                    os.path.abspath('afni_custom_order_file.txt'), 'w')
                tpattern = []
                for i in xrange(len(sliceorder)):
                    tpattern.append((i * tr / float(Nz), sliceorder[i]))
                tpattern.sort(key=lambda x: x[1])
                for i, t in enumerate(tpattern):
                    print '%f\n' % (t[0])
                    custom_order.write('%f\n' % (t[0]))
                custom_order.close()

                slicetime.inputs.args = '-tpattern @%s' % os.path.abspath(
                    'afni_custom_order_file.txt')
                slicetime.inputs.tr = str(tr) + 's'
                slicetime.inputs.outputtype = 'NIFTI_GZ'
                res = slicetime.run()
                file_to_realign = res.outputs.out_file

                if not idx:
                    extract = fsl.ExtractROI()
                    extract.inputs.t_min = 0
                    extract.inputs.t_size = 1
                    extract.inputs.in_file = file_to_realign
                    ref_vol = extract.run().outputs.roi_file

            else:
                file_to_realign = file

            realign = afni.Volreg()
            realign.inputs.in_file = file_to_realign
            realign.inputs.out_file = "afni_corr_" + os.path.split(
                file_to_realign)[1]
            realign.inputs.oned_file = "afni_realignment_parameters.par"
            realign.inputs.basefile = ref_vol
            Realign_res = realign.run()
            out_file.append(Realign_res.outputs.out_file)

            parameters = Realign_res.outputs.oned_file
            if not isinstance(parameters, list):
                parameters = [parameters]
            for i, p in enumerate(parameters):
                foo = np.genfromtxt(p)
                boo = foo[:, [1, 2, 0, 4, 5, 3]]
                boo[:, :3] = boo[:, :3] * np.pi / 180
                np.savetxt(os.path.abspath('realignment_parameters_%d.par' %
                                           i),
                           boo,
                           delimiter='\t')
                par_file.append(
                    os.path.abspath('realignment_parameters_%d.par' % i))

            #par_file.append(Realign_res.outputs.oned_file)

    return out_file, par_file, parameter_source
コード例 #18
0
Setup preprocessing workflow
----------------------------

This is a generic preprocessing workflow that can be used by different analyses

"""

preproc = pe.Workflow(name='preproc')
"""Use :class:`nipype.interfaces.spm.Realign` for motion correction
and register all images to the mean image.
"""

realign = pe.Node(interface=spm.Realign(), name="realign")

slice_timing = pe.Node(interface=spm.SliceTiming(), name="slice_timing")
"""Use :class:`nipype.interfaces.spm.Coregister` to perform a rigid
body registration of the functional data to the structural data.
"""

coregister = pe.Node(interface=spm.Coregister(), name="coregister")
coregister.inputs.jobtype = 'estimate'

segment = pe.Node(interface=spm.Segment(), name="segment")
segment.inputs.save_bias_corrected = True
"""Uncomment the following line for faster execution
"""

# segment.inputs.gaussians_per_class = [1, 1, 1, 4]
"""Warp functional and structural data to SPM's T1 template using
:class:`nipype.interfaces.spm.Normalize`.  The tutorial data set
コード例 #19
0
                                name='camino_VtkStreamlines')

#Wraps the executable command ``dt2nii``.
camino_DT2NIfTI = pe.Node(interface=camino.DT2NIfTI(), name='camino_DT2NIfTI')

#Wraps the executable command ``erode``.
mrtrix_Erode = pe.Node(interface=mrtrix.Erode(), name='mrtrix_Erode')

#Converts separate b-values and b-vectors from text files (FSL style) into a
mrtrix_FSL2MRTrix = pe.Node(interface=mrtrix.FSL2MRTrix(),
                            name='mrtrix_FSL2MRTrix')

#Wraps the executable command ``mrconvert``.
mrtrix_MRConvert = pe.Node(interface=mrtrix.MRConvert(),
                           name='mrtrix_MRConvert')

#Use spm to perform slice timing correction.
spm_SliceTiming = pe.Node(interface=spm.SliceTiming(), name='spm_SliceTiming')

#uses  spm_reslice to resample in_file into space of space_defining
spm_Reslice = pe.Node(interface=spm.Reslice(), name='spm_Reslice')

#Create a workflow to connect all those nodes
analysisflow = nipype.Workflow('MyWorkflow')

#Run the workflow
plugin = 'MultiProc'  #adjust your desired plugin here
plugin_args = {'n_procs': 1}  #adjust to your number of cores
analysisflow.write_graph(graph2use='flat', format='png', simple_form=False)
analysisflow.run(plugin=plugin, plugin_args=plugin_args)