Beispiel #1
0
    def __init__(self, datasink, TR, num_vol):
        # specify input and output nodes
        self.datasink = datasink
        self.TR = TR
        self.num_vol = num_vol

        # specify nodes
        # SpecifyModel - Generates SPM-specific Model
        self.modelspec = pe.Node(interface=model.SpecifySPMModel(),
                                 name='model_specification')
        self.modelspec.inputs.input_units = 'secs'
        self.modelspec.inputs.output_units = 'secs'
        self.modelspec.inputs.time_repetition = self.TR
        self.modelspec.inputs.high_pass_filter_cutoff = 128
        subjectinfo = [
            Bunch(conditions=['None'],
                  onsets=[list(range(self.num_vol))],
                  durations=[[0.5]])
        ]
        self.modelspec.inputs.subject_info = subjectinfo

        # Level1Design - Generates an SPM design matrix
        self.level1design = pe.Node(interface=spm.Level1Design(),
                                    name='first_level_design')
        self.level1design.inputs.bases = {'hrf': {'derivs': [1, 1]}}
        self.level1design.inputs.interscan_interval = self.TR
        self.level1design.inputs.timing_units = 'secs'

        # EstimateModel - estimate the parameters of the model
        # method can be 'Classical', 'Bayesian' or 'Bayesian2'
        self.level1estimate = pe.Node(interface=spm.EstimateModel(),
                                      name="first_level_estimate")
        self.level1estimate.inputs.estimation_method = {'Classical': 1}

        self.threshold = pe.Node(interface=spm.Threshold(), name="threshold")
        self.threshold.inputs.contrast_index = 1

        # EstimateContrast - estimates contrasts
        self.contrast_estimate = pe.Node(interface=spm.EstimateContrast(),
                                         name="contrast_estimate")
        cont1 = ('active > rest', 'T', ['None'], [1])
        contrasts = [cont1]
        self.contrast_estimate.inputs.contrasts = contrasts

        # specify workflow instance
        self.workflow = pe.Workflow(name='first_level_analysis_workflow')

        # connect nodes
        self.workflow.connect([
            (self.modelspec, self.level1design, [('session_info',
                                                  'session_info')]),
            (self.level1design, self.level1estimate, [('spm_mat_file',
                                                       'spm_mat_file')]),
            (self.level1estimate, self.contrast_estimate,
             [('spm_mat_file', 'spm_mat_file'), ('beta_images', 'beta_images'),
              ('residual_image', 'residual_image')]),
            # (self.contrast_estimate, self.threshold, [('spm_mat_file', 'spm_mat_file'), ('spmT_images', 'stat_image')]),
            (self.contrast_estimate, self.datasink,
             [('con_images', 'contrast_img'), ('spmT_images', 'contrast_T')])
        ])
Beispiel #2
0
                                    (('timecorrected_files', get_vox_dims),
                                     'write_voxel_sizes')]),
    (normalize_func, smooth, [('normalized_files', 'in_files')]),
])
"""
Set up analysis workflow
------------------------

"""

l1analysis = pe.Workflow(name='analysis')
"""Generate SPM-specific design information using
:class:`nipype.interfaces.spm.SpecifyModel`.
"""

modelspec = pe.Node(interface=model.SpecifySPMModel(), name="modelspec")
"""Generate a first level SPM.mat file for analysis
:class:`nipype.interfaces.spm.Level1Design`.
"""

level1design = pe.Node(interface=spm.Level1Design(), name="level1design")
"""Use :class:`nipype.interfaces.spm.EstimateModel` to determine the
parameters of the model.
"""

level1estimate = pe.Node(interface=spm.EstimateModel(), name="level1estimate")
level1estimate.inputs.estimation_method = {'Classical': 1}

threshold = pe.Node(interface=spm.Threshold(), name="threshold")
"""Use :class:`nipype.interfaces.spm.EstimateContrast` to estimate the
first level contrasts specified in a few steps above.
Beispiel #3
0
    (normalize_and_smooth_func, art, [('normalized_files', 'realigned_files')
                                      ]),
    (skullstrip, art, [('mask_file', 'mask_file')]),
])
"""
Set up analysis workflow
------------------------

"""

l1analysis = pe.Workflow(name='analysis')
"""Generate SPM-specific design information using
:class:`nipype.interfaces.spm.SpecifyModel`.
"""

modelspec = pe.Node(model.SpecifySPMModel(), name="modelspec")
modelspec.inputs.concatenate_runs = True
"""Generate a first level SPM.mat file for analysis
:class:`nipype.interfaces.spm.Level1Design`.
"""

level1design = pe.Node(spm.Level1Design(), name="level1design")
level1design.inputs.bases = {'hrf': {'derivs': [0, 0]}}
"""Use :class:`nipype.interfaces.spm.EstimateModel` to determine the
parameters of the model.
"""

level1estimate = pe.Node(spm.EstimateModel(), name="level1estimate")
level1estimate.inputs.estimation_method = {'Classical': 1}
"""Use :class:`nipype.interfaces.spm.EstimateContrast` to estimate the
first level contrasts specified in a few steps above.
    fields=['subj_id', 'task', 'timept', 'kernel', 'contrasts'],
    contrasts=contrast_list),
                 name='info_lvl1')

subjs = get_subject_list(output_dir, 'exo')
print(len(subjs))
tasks = ["fp_run1", "fp_run2"]  # TODO turn into paradigm object field
timepts = [1, 2, 3, 4]
kernels = fwhmlist
info_lvl1.iterables = [("subj_id", subjs[-20:]), ("task", tasks),
                       ('timept', timepts[0:2]), ('kernel', kernels)]

# Specify first level generically
from nipype.algorithms import modelgen

modelspec = Node(interface=modelgen.SpecifySPMModel(), name='modelspec')
modelspec.inputs.input_units = 'secs'
modelspec.inputs.time_repetition = 2  # TODO change to read from file header to make more flexible
modelspec.inputs.high_pass_filter_cutoff = 128
if len(tasks) > 1:
    modelspec.inputs.concatenate_runs = False

# Load the parameters for the model design
templates_func = {}
par_info = paradigm_info(
    glob.glob(os.path.join(
        data_dir,
        'experimental_params*.csv'))[0])  # Returns dictionary with parameters

for t, task in enumerate(tasks):
    if not modelspec.inputs.subject_info:
Beispiel #5
0
                                             function=_specify_contrast),
                                name='contrasts_of_interest')
contrasts_of_interest.inputs.conditions = CONDITIONS
workflow.connect(infosource, 'subject_id', contrasts_of_interest, 'subject_id')

# fmri model specifications
unzip_source = pe.MapNode(misc.Gunzip(),
                          iterfield=['in_file'],
                          name='unzip_source')
workflow.connect(datasource, 'func', unzip_source, 'in_file')

smooth = pe.Node(interface=spm.Smooth(fwhm=[8, 8, 8]),
                    name='smooth')
workflow.connect(unzip_source, 'out_file', smooth, 'in_files')

modelspec = pe.Node(interface=modelgen.SpecifySPMModel(),
                    name='modelspec')
modelspec.inputs.input_units = 'secs'
modelspec.inputs.output_units = 'secs'
modelspec.inputs.time_repetition = TR
modelspec.inputs.high_pass_filter_cutoff = HIGHPASS_CUTOFF
workflow.connect(get_session_informations, 'informations', modelspec, 'subject_info')
workflow.connect(smooth, 'smoothed_files', modelspec, 'functional_runs')

# merge runs's masks
merge_masks = pe.Node(interface=fsl.Merge(dimension='t'),
                     name='merge_masks')
workflow.connect(datasource, 'mask', merge_masks, 'in_files')

# create mean runs mask
mean_mask = pe.Node(interface=fsl.MeanImage(args='-bin', output_type='NIFTI'),
    print "S%d" % subj,
    sys.stdout.flush()

    for f in glob.glob(output_dir + 'S' + str(subj) + '/by_category/*.mat'):
        os.remove(f)

    for f in glob.glob(output_dir + 'S' + str(subj) + '/by_category/*.nii'):
        os.remove(f)

    os.chdir(output_dir + 'S' + str(subj) + '/by_category')

    print "Specify model",
    sys.stdout.flush()

    modelspec = model.SpecifySPMModel()

    modelspec.inputs.input_units = 'secs'
    modelspec.inputs.output_units = 'secs'
    modelspec.inputs.time_repetition = TR
    modelspec.inputs.high_pass_filter_cutoff = 128
    modelspec.inputs.functional_runs = [
        data_dir + 'nifti/' + method + '/picture/S' + str(subj) + '_picture_' +
        method + '.nii'
    ]
    modelspec.inputs.subject_info = get_picture_category_info(subj)

    out = modelspec.run()

    print "- Design",
    sys.stdout.flush()
def create_model_fit_pipeline(high_pass_filter_cutoff=128,
                              nipy=False,
                              ar1=True,
                              name="model",
                              save_residuals=False):
    inputnode = pe.Node(interface=util.IdentityInterface(fields=[
        'outlier_files', "realignment_parameters", "functional_runs", "mask",
        'conditions', 'onsets', 'durations', 'TR', 'contrasts', 'units',
        'sparse'
    ]),
                        name="inputnode")

    modelspec = pe.Node(interface=model.SpecifySPMModel(), name="modelspec")
    if high_pass_filter_cutoff:
        modelspec.inputs.high_pass_filter_cutoff = high_pass_filter_cutoff

    create_subject_info = pe.Node(interface=util.Function(
        input_names=['conditions', 'onsets', 'durations'],
        output_names=['subject_info'],
        function=create_subject_inf),
                                  name="create_subject_info")

    modelspec.inputs.concatenate_runs = True
    #modelspec.inputs.input_units             = units
    modelspec.inputs.output_units = "secs"
    #modelspec.inputs.time_repetition         = tr
    #modelspec.inputs.subject_info = subjectinfo

    model_pipeline = pe.Workflow(name=name)

    model_pipeline.connect([
        (inputnode, create_subject_info, [('conditions', 'conditions'),
                                          ('onsets', 'onsets'),
                                          ('durations', 'durations')]),
        (inputnode, modelspec, [('realignment_parameters',
                                 'realignment_parameters'),
                                ('functional_runs', 'functional_runs'),
                                ('outlier_files', 'outlier_files'),
                                ('units', 'input_units'),
                                ('TR', 'time_repetition')]),
        (create_subject_info, modelspec, [('subject_info', 'subject_info')]),
    ])

    if nipy:
        model_estimate = pe.Node(interface=FitGLM(), name="level1estimate")
        model_estimate.inputs.TR = tr
        model_estimate.inputs.normalize_design_matrix = True
        model_estimate.inputs.save_residuals = save_residuals
        if ar1:
            model_estimate.inputs.model = "ar1"
            model_estimate.inputs.method = "kalman"
        else:
            model_estimate.inputs.model = "spherical"
            model_estimate.inputs.method = "ols"

        model_pipeline.connect([
            (modelspec, model_estimate, [('session_info', 'session_info')]),
            (inputnode, model_estimate, [('mask', 'mask')])
        ])

        if contrasts:
            contrast_estimate = pe.Node(interface=EstimateContrast(),
                                        name="contrastestimate")
            contrast_estimate.inputs.contrasts = contrasts
            model_pipeline.connect([
                (model_estimate, contrast_estimate,
                 [("beta", "beta"), ("nvbeta", "nvbeta"), ("s2", "s2"),
                  ("dof", "dof"), ("axis", "axis"), ("constants", "constants"),
                  ("reg_names", "reg_names")]),
                (inputnode, contrast_estimate, [('mask', 'mask')]),
            ])
    else:
        level1design = pe.Node(interface=spm.Level1Design(),
                               name="level1design")
        level1design.inputs.bases = {'hrf': {'derivs': [0, 0]}}
        if ar1:
            level1design.inputs.model_serial_correlations = "AR(1)"
        else:
            level1design.inputs.model_serial_correlations = "none"

        level1design.inputs.timing_units = modelspec.inputs.output_units

        #level1design.inputs.interscan_interval = modelspec.inputs.time_repetition
        #        if sparse:
        #            level1design.inputs.microtime_resolution = n_slices*2
        #        else:
        #            level1design.inputs.microtime_resolution = n_slices
        #level1design.inputs.microtime_onset = ref_slice

        microtime_resolution = pe.Node(interface=util.Function(
            input_names=['volume', 'sparse'],
            output_names=['microtime_resolution'],
            function=_get_microtime_resolution),
                                       name="microtime_resolution")

        level1estimate = pe.Node(interface=spm.EstimateModel(),
                                 name="level1estimate")
        level1estimate.inputs.estimation_method = {'Classical': 1}

        contrastestimate = pe.Node(interface=spm.EstimateContrast(),
                                   name="contrastestimate")
        #contrastestimate.inputs.contrasts = contrasts

        threshold = pe.MapNode(interface=spm.Threshold(),
                               name="threshold",
                               iterfield=['contrast_index', 'stat_image'])
        #threshold.inputs.contrast_index = range(1,len(contrasts)+1)

        threshold_topo_ggmm = neuroutils.CreateTopoFDRwithGGMM(
            "threshold_topo_ggmm")
        #threshold_topo_ggmm.inputs.inputnode.contrast_index = range(1,len(contrasts)+1)

        model_pipeline.connect([
            (modelspec, level1design, [('session_info', 'session_info')]),
            (inputnode, level1design, [('mask', 'mask_image'),
                                       ('TR', 'interscan_interval'),
                                       (("functional_runs", get_ref_slice),
                                        "microtime_onset")]),
            (inputnode, microtime_resolution, [("functional_runs", "volume"),
                                               ("sparse", "sparse")]),
            (microtime_resolution, level1design, [("microtime_resolution",
                                                   "microtime_resolution")]),
            (level1design, level1estimate, [('spm_mat_file', 'spm_mat_file')]),
            (inputnode, contrastestimate, [('contrasts', 'contrasts')]),
            (level1estimate, contrastestimate,
             [('spm_mat_file', 'spm_mat_file'), ('beta_images', 'beta_images'),
              ('residual_image', 'residual_image')]),
            (contrastestimate, threshold, [('spm_mat_file', 'spm_mat_file'),
                                           ('spmT_images', 'stat_image')]),
            (inputnode, threshold, [(('contrasts', _get_contrast_index),
                                     'contrast_index')]),
            (level1estimate, threshold_topo_ggmm, [('mask_image',
                                                    'inputnode.mask_file')]),
            (contrastestimate, threshold_topo_ggmm,
             [('spm_mat_file', 'inputnode.spm_mat_file'),
              ('spmT_images', 'inputnode.stat_image')]),
            (inputnode, threshold_topo_ggmm,
             [(('contrasts', _get_contrast_index), 'inputnode.contrast_index')
              ]),
        ])

    return model_pipeline
Beispiel #8
0
        if DEBUG:
            print(contrast)
            print(ccode)

    return cont


# ## Set up processing nodes for modeling workflow

# #### Specify model node

# SpecifyModel - Generates SPM-specific Model
modelspec = pe.Node(model.SpecifySPMModel(concatenate_runs=False,
                                          input_units='secs',
                                          output_units='secs',
                                          time_repetition=TR,
                                          high_pass_filter_cutoff=128),
                    output_units='scans',
                    name="modelspec")

# #### Level 1 Design node
#
# ** TODO -- get the right matching template file for fmriprep **
#
# * ??do we need a different mask than:
#
#     `'/data00/tools/spm8/apriori/brainmask_th25.nii'`

# Level1Design - Generates an SPM design matrix
level1design = pe.Node(
Beispiel #9
0
def build_pipeline(model_def):

    # create pointers to needed values from
    # the model dictionary
    # TODO - this could be refactored
    TR = model_def['TR']
    subject_list = model_def['subject_list']
    JSON_MODEL_FILE = model_def['model_path']

    working_dir = model_def['working_dir']
    output_dir = model_def['output_dir']

    SUBJ_DIR = model_def['SUBJ_DIR']
    PROJECT_DIR = model_def['PROJECT_DIR']
    TASK_NAME = model_def['TaskName']
    RUNS = model_def['Runs']
    MODEL_NAME = model_def['ModelName']
    PROJECT_NAME = model_def['ProjectID']
    BASE_DIR = model_def['BaseDirectory']

    SERIAL_CORRELATIONS = "AR(1)" if not model_def.get(
        'SerialCorrelations') else model_def.get('SerialCorrelations')
    RESIDUALS = model_def.get('GenerateResiduals')

    # SpecifyModel - Generates SPM-specific Model

    modelspec = pe.Node(model.SpecifySPMModel(concatenate_runs=False,
                                              input_units='secs',
                                              output_units='secs',
                                              time_repetition=TR,
                                              high_pass_filter_cutoff=128),
                        output_units='scans',
                        name="modelspec")

    # #### Level 1 Design node
    #
    # ** TODO -- get the right matching template file for fmriprep **
    #
    # * ??do we need a different mask than:
    #
    #     `'/data00/tools/spm8/apriori/brainmask_th25.nii'`

    # Level1Design - Generates an SPM design matrix
    level1design = pe.Node(
        spm.Level1Design(
            bases={'hrf': {
                'derivs': [0, 0]
            }},
            timing_units='secs',
            interscan_interval=TR,
            # model_serial_correlations='AR(1)', # [none|AR(1)|FAST]',
            # 8/21/20 mbod - allow for value to be set in JSON model spec
            model_serial_correlations=SERIAL_CORRELATIONS,

            # TODO - allow for specified masks
            mask_image=BRAIN_MASK_PATH,
            global_intensity_normalization='none'),
        name="level1design")

    # #### Estimate Model node
    # EstimateModel - estimate the parameters of the model
    level1estimate = pe.Node(
        spm.EstimateModel(
            estimation_method={'Classical': 1},
            # 8/21/20 mbod - allow for value to be set in JSON model spec
            write_residuals=RESIDUALS),
        name="level1estimate")

    # #### Estimate Contrasts node
    # EstimateContrast - estimates contrasts
    conestimate = pe.Node(spm.EstimateContrast(), name="conestimate")

    # ## Setup pipeline workflow for level 1 model
    # Initiation of the 1st-level analysis workflow
    l1analysis = pe.Workflow(name='l1analysis')

    # Connect up the 1st-level analysis components
    l1analysis.connect([
        (modelspec, level1design, [('session_info', 'session_info')]),
        (level1design, level1estimate, [('spm_mat_file', 'spm_mat_file')]),
        (level1estimate, conestimate, [('spm_mat_file', 'spm_mat_file'),
                                       ('beta_images', 'beta_images'),
                                       ('residual_image', 'residual_image')])
    ])

    # ## Set up nodes for file handling and subject selection
    # ### `getsubjectinfo` node
    #
    # * Use `get_subject_info()` function to generate spec data structure for first level model design matrix

    # Get Subject Info - get subject specific condition information
    getsubjectinfo = pe.Node(util.Function(
        input_names=['subject_id', 'model_path'],
        output_names=['subject_info', 'realign_params', 'condition_names'],
        function=get_subject_info),
                             name='getsubjectinfo')

    makecontrasts = pe.Node(util.Function(
        input_names=['subject_id', 'condition_names', 'model_path'],
        output_names=['contrasts'],
        function=make_contrast_list),
                            name='makecontrasts')

    if model_def.get('ExcludeDummyScans'):
        ExcludeDummyScans = model_def['ExcludeDummyScans']
    else:
        ExcludeDummyScans = 0

    #if DEBUG:
    #    print(f'Excluding {ExcludeDummyScans} dummy scans.')

    trimdummyscans = pe.MapNode(Trim(begin_index=ExcludeDummyScans),
                                name='trimdummyscans',
                                iterfield=['in_file'])

    # ### `infosource` node
    #
    # * iterate over list of subject ids and generate subject ids and produce list of contrasts for subsequent nodes

    # Infosource - a function free node to iterate over the list of subject names
    infosource = pe.Node(util.IdentityInterface(
        fields=['subject_id', 'model_path', 'resolution', 'smoothing']),
                         name="infosource")

    try:
        fwhm_list = model_def['smoothing_list']
    except:
        fwhm_list = [4, 6, 8]

    try:
        resolution_list = model_def['resolutions']
    except:
        resolution_list = ['low', 'medium', 'high']

    infosource.iterables = [
        ('subject_id', subject_list),
        ('model_path', [JSON_MODEL_FILE] * len(subject_list)),
        ('resolution', resolution_list),
        ('smoothing', ['fwhm_{}'.format(s) for s in fwhm_list])
    ]

    # SelectFiles - to grab the data (alternativ to DataGrabber)

    ## TODO: here need to figure out how to incorporate the run number and task name in call
    templates = {
        'func':
        '{subject_id}/{resolution}/{smoothing}/sr{subject_id}_task-' +
        TASK_NAME + '_run-0*_*MNI*preproc*.nii'
    }

    selectfiles = pe.Node(nio.SelectFiles(
        templates,
        base_directory='{}/{}/derivatives/nipype/resampled_and_smoothed'.
        format(BASE_DIR, PROJECT_NAME)),
                          working_dir=working_dir,
                          name="selectfiles")

    # ### Specify datasink node
    #
    # * copy files to keep from various working folders to output folder for model for subject

    # Datasink - creates output folder for important outputs
    datasink = pe.Node(
        nio.DataSink(
            base_directory=SUBJ_DIR,
            parameterization=True,
            #container=output_dir
        ),
        name="datasink")

    datasink.inputs.base_directory = output_dir

    # Use the following DataSink output substitutions
    substitutions = []
    subjFolders = [(
        '_model_path.*resolution_(low|medium|high)_smoothing_(fwhm_\\d{1,2})_subject_id_sub-.*/(.*)$',
        '\\1/\\2/\\3')]
    substitutions.extend(subjFolders)
    datasink.inputs.regexp_substitutions = substitutions

    # datasink connections

    datasink_in_outs = [('conestimate.spm_mat_file', '@spm'),
                        ('level1estimate.beta_images', '@betas'),
                        ('level1estimate.mask_image', '@mask'),
                        ('conestimate.spmT_images', '@spmT'),
                        ('conestimate.con_images', '@con'),
                        ('conestimate.spmF_images', '@spmF')]

    if model_def.get('GenerateResiduals'):
        datasink_in_outs.append(
            ('level1estimate.residual_images', '@residuals'))

    # ---------

    # ## Set up workflow for whole process

    pipeline = pe.Workflow(
        name='first_level_model_{}_{}'.format(TASK_NAME.upper(), MODEL_NAME))
    pipeline.base_dir = os.path.join(SUBJ_DIR, working_dir)

    pipeline.connect([
        (infosource, selectfiles, [('subject_id', 'subject_id'),
                                   ('resolution', 'resolution'),
                                   ('smoothing', 'smoothing')]),
        (infosource, getsubjectinfo, [('subject_id', 'subject_id'),
                                      ('model_path', 'model_path')]),
        (infosource, makecontrasts, [('subject_id', 'subject_id'),
                                     ('model_path', 'model_path')]),
        (getsubjectinfo, makecontrasts, [('condition_names', 'condition_names')
                                         ]),
        (getsubjectinfo, l1analysis,
         [('subject_info', 'modelspec.subject_info'),
          ('realign_params', 'modelspec.realignment_parameters')]),
        (makecontrasts, l1analysis, [('contrasts', 'conestimate.contrasts')]),

        #                  (selectfiles, l1analysis, [('func',
        #                                          'modelspec.functional_runs')]),
        (selectfiles, trimdummyscans, [('func', 'in_file')]),
        (trimdummyscans, l1analysis, [('out_file', 'modelspec.functional_runs')
                                      ]),
        (infosource, datasink, [('subject_id', 'container')]),
        (l1analysis, datasink, datasink_in_outs)
    ])

    return pipeline