def test_Level1Design_outputs():
    output_map = dict(ev_files=dict(),
    fsf_files=dict(),
    )
    outputs = Level1Design.output_spec()

    for key, metadata in output_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(outputs.traits()[key], metakey), value
예제 #2
0
def test_Level1Design_outputs():
    output_map = dict(ev_files=dict(),
    fsf_files=dict(),
    )
    outputs = Level1Design.output_spec()

    for key, metadata in output_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(outputs.traits()[key], metakey), value
예제 #3
0
def test_Level1Design_inputs():
    input_map = dict(ignore_exception=dict(nohash=True,
    usedefault=True,
    ),
    session_info=dict(mandatory=True,
    ),
    interscan_interval=dict(mandatory=True,
    ),
    bases=dict(mandatory=True,
    ),
    model_serial_correlations=dict(mandatory=True,
    ),
    contrasts=dict(),
    )
    inputs = Level1Design.input_spec()

    for key, metadata in input_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_Level1Design_inputs():
    input_map = dict(bases=dict(mandatory=True,
    ),
    contrasts=dict(),
    ignore_exception=dict(nohash=True,
    usedefault=True,
    ),
    interscan_interval=dict(mandatory=True,
    ),
    model_serial_correlations=dict(mandatory=True,
    ),
    session_info=dict(mandatory=True,
    ),
    )
    inputs = Level1Design.input_spec()

    for key, metadata in input_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(inputs.traits()[key], metakey), value
예제 #5
0
def firstlevel_wf(subject_id, sink_directory, name='wmaze_frstlvl_wf'):
    frstlvl_wf = Workflow(name='frstlvl_wf')

    info = dict(
        task_mri_files=[['subject_id',
                         'wmaze']],  #dictionary used in datasource
        motion_noise_files=[['subject_id']])

    #function node to call subjectinfo function with name, onset, duration, and amplitude info
    subject_info = Node(Function(input_names=['subject_id'],
                                 output_names=['output'],
                                 function=subjectinfo),
                        name='subject_info')
    subject_info.inputs.ignore_exception = False
    subject_info.inputs.subject_id = subject_id

    #function node to define contrasts
    getcontrasts = Node(Function(input_names=['subject_id', 'info'],
                                 output_names=['contrasts'],
                                 function=get_contrasts),
                        name='getcontrasts')
    getcontrasts.inputs.ignore_exception = False
    getcontrasts.inputs.subject_id = subject_id
    frstlvl_wf.connect(subject_info, 'output', getcontrasts, 'info')

    #function node to substitute names of folders and files created during pipeline
    getsubs = Node(
        Function(
            input_names=['cons'],
            output_names=['subs'],
            # Calls the function 'get_subs'
            function=get_subs),
        name='getsubs')
    getsubs.inputs.ignore_exception = False
    getsubs.inputs.subject_id = subject_id
    frstlvl_wf.connect(subject_info, 'output', getsubs, 'info')
    frstlvl_wf.connect(getcontrasts, 'contrasts', getsubs, 'cons')

    #datasource node to get task_mri and motion-noise files
    datasource = Node(DataGrabber(infields=['subject_id'],
                                  outfields=info.keys()),
                      name='datasource')
    datasource.inputs.template = '*'
    datasource.inputs.subject_id = subject_id
    datasource.inputs.base_directory = os.path.abspath(
        '/home/data/madlab/data/mri/wmaze/preproc/')
    datasource.inputs.field_template = dict(
        task_mri_files=
        '%s/func/smoothed_fullspectrum/_maskfunc2*/*%s*.nii.gz',  #functional files
        motion_noise_files='%s/noise/filter_regressor??.txt'
    )  #filter regressor noise files
    datasource.inputs.template_args = info
    datasource.inputs.sort_filelist = True
    datasource.inputs.ignore_exception = False
    datasource.inputs.raise_on_empty = True

    #function node to remove last three volumes from functional data
    fslroi_epi = MapNode(
        ExtractROI(t_min=0,
                   t_size=197),  #start from first volume and end on -3
        iterfield=['in_file'],
        name='fslroi_epi')
    fslroi_epi.output_type = 'NIFTI_GZ'
    fslroi_epi.terminal_output = 'stream'
    frstlvl_wf.connect(datasource, 'task_mri_files', fslroi_epi, 'in_file')

    #function node to modify the motion and noise files to be single regressors
    motionnoise = Node(Function(input_names=['subjinfo', 'files'],
                                output_names=['subjinfo'],
                                function=motion_noise),
                       name='motionnoise')
    motionnoise.inputs.ignore_exception = False
    frstlvl_wf.connect(subject_info, 'output', motionnoise, 'subjinfo')
    frstlvl_wf.connect(datasource, 'motion_noise_files', motionnoise, 'files')

    #node to create model specifications compatible with spm/fsl designers (requires subjectinfo to be received in the form of a Bunch)
    specify_model = Node(SpecifyModel(), name='specify_model')
    specify_model.inputs.high_pass_filter_cutoff = -1.0  #high-pass filter cutoff in seconds
    specify_model.inputs.ignore_exception = False
    specify_model.inputs.input_units = 'secs'  #input units in either 'secs' or 'scans'
    specify_model.inputs.time_repetition = 2.0  #TR
    frstlvl_wf.connect(
        fslroi_epi, 'roi_file', specify_model,
        'functional_runs')  #editted data files for model -- list of 4D files
    #list of event description files in 3 column format corresponding to onsets, durations, and amplitudes
    frstlvl_wf.connect(motionnoise, 'subjinfo', specify_model, 'subject_info')

    #node for basic interface class generating identity mappings
    modelfit_inputspec = Node(IdentityInterface(fields=[
        'session_info', 'interscan_interval', 'contrasts', 'film_threshold',
        'functional_data', 'bases', 'model_serial_correlations'
    ],
                                                mandatory_inputs=True),
                              name='modelfit_inputspec')
    modelfit_inputspec.inputs.bases = {'dgamma': {'derivs': False}}
    modelfit_inputspec.inputs.film_threshold = 0.0
    modelfit_inputspec.inputs.interscan_interval = 2.0
    modelfit_inputspec.inputs.model_serial_correlations = True
    frstlvl_wf.connect(fslroi_epi, 'roi_file', modelfit_inputspec,
                       'functional_data')
    frstlvl_wf.connect(getcontrasts, 'contrasts', modelfit_inputspec,
                       'contrasts')
    frstlvl_wf.connect(specify_model, 'session_info', modelfit_inputspec,
                       'session_info')

    #node for first level SPM design matrix to demonstrate contrasts and motion/noise regressors
    level1_design = MapNode(Level1Design(),
                            iterfield=['contrasts', 'session_info'],
                            name='level1_design')
    level1_design.inputs.ignore_exception = False
    frstlvl_wf.connect(modelfit_inputspec, 'interscan_interval', level1_design,
                       'interscan_interval')
    frstlvl_wf.connect(modelfit_inputspec, 'session_info', level1_design,
                       'session_info')
    frstlvl_wf.connect(modelfit_inputspec, 'contrasts', level1_design,
                       'contrasts')
    frstlvl_wf.connect(modelfit_inputspec, 'bases', level1_design, 'bases')
    frstlvl_wf.connect(modelfit_inputspec, 'model_serial_correlations',
                       level1_design, 'model_serial_correlations')

    #MapNode to generate a design.mat file for each run
    generate_model = MapNode(FEATModel(),
                             iterfield=['fsf_file', 'ev_files'],
                             name='generate_model')
    generate_model.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'}
    generate_model.inputs.ignore_exception = False
    generate_model.inputs.output_type = 'NIFTI_GZ'
    generate_model.inputs.terminal_output = 'stream'
    frstlvl_wf.connect(level1_design, 'fsf_files', generate_model, 'fsf_file')
    frstlvl_wf.connect(level1_design, 'ev_files', generate_model, 'ev_files')

    #MapNode to estimate the model using FILMGLS -- fits the design matrix to the voxel timeseries
    estimate_model = MapNode(FILMGLS(),
                             iterfield=['design_file', 'in_file', 'tcon_file'],
                             name='estimate_model')
    estimate_model.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'}
    estimate_model.inputs.ignore_exception = False
    estimate_model.inputs.mask_size = 5  #Susan-smooth mask size
    estimate_model.inputs.output_type = 'NIFTI_GZ'
    estimate_model.inputs.results_dir = 'results'
    estimate_model.inputs.smooth_autocorr = True  #smooth auto-correlation estimates
    estimate_model.inputs.terminal_output = 'stream'
    frstlvl_wf.connect(modelfit_inputspec, 'film_threshold', estimate_model,
                       'threshold')
    frstlvl_wf.connect(modelfit_inputspec, 'functional_data', estimate_model,
                       'in_file')
    frstlvl_wf.connect(
        generate_model, 'design_file', estimate_model,
        'design_file')  #mat file containing ascii matrix for design
    frstlvl_wf.connect(generate_model, 'con_file', estimate_model,
                       'tcon_file')  #contrast file containing contrast vectors

    #merge node to merge the contrasts - necessary for fsl 5.0.7 and greater
    merge_contrasts = MapNode(Merge(2),
                              iterfield=['in1'],
                              name='merge_contrasts')
    frstlvl_wf.connect(estimate_model, 'zstats', merge_contrasts, 'in1')

    #MapNode to transform the z2pval
    z2pval = MapNode(ImageMaths(), iterfield=['in_file'], name='z2pval')
    z2pval.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'}
    z2pval.inputs.ignore_exception = False
    z2pval.inputs.op_string = '-ztop'  #defines the operation used
    z2pval.inputs.output_type = 'NIFTI_GZ'
    z2pval.inputs.suffix = '_pval'
    z2pval.inputs.terminal_output = 'stream'
    frstlvl_wf.connect(merge_contrasts, ('out', pop_lambda), z2pval, 'in_file')

    #outputspec node using IdentityInterface() to receive information from estimate_model, merge_contrasts, z2pval, generate_model, and estimate_model
    modelfit_outputspec = Node(IdentityInterface(fields=[
        'copes', 'varcopes', 'dof_file', 'pfiles', 'parameter_estimates',
        'zstats', 'design_image', 'design_file', 'design_cov', 'sigmasquareds'
    ],
                                                 mandatory_inputs=True),
                               name='modelfit_outputspec')
    frstlvl_wf.connect(estimate_model, 'copes', modelfit_outputspec,
                       'copes')  #lvl1 cope files
    frstlvl_wf.connect(estimate_model, 'varcopes', modelfit_outputspec,
                       'varcopes')  #lvl1 varcope files
    frstlvl_wf.connect(merge_contrasts, 'out', modelfit_outputspec,
                       'zstats')  #zstats across runs
    frstlvl_wf.connect(z2pval, 'out_file', modelfit_outputspec, 'pfiles')
    frstlvl_wf.connect(
        generate_model, 'design_image', modelfit_outputspec,
        'design_image')  #graphical representation of design matrix
    frstlvl_wf.connect(
        generate_model, 'design_file', modelfit_outputspec,
        'design_file')  #mat file containing ascii matrix for design
    frstlvl_wf.connect(
        generate_model, 'design_cov', modelfit_outputspec,
        'design_cov')  #graphical representation of design covariance
    frstlvl_wf.connect(estimate_model, 'param_estimates', modelfit_outputspec,
                       'parameter_estimates'
                       )  #parameter estimates for columns of design matrix
    frstlvl_wf.connect(estimate_model, 'dof_file', modelfit_outputspec,
                       'dof_file')  #degrees of freedom
    frstlvl_wf.connect(estimate_model, 'sigmasquareds', modelfit_outputspec,
                       'sigmasquareds')  #summary of residuals

    #datasink node to save output from multiple points in the pipeline
    sinkd = MapNode(DataSink(),
                    iterfield=[
                        'substitutions', 'modelfit.contrasts.@copes',
                        'modelfit.contrasts.@varcopes', 'modelfit.estimates',
                        'modelfit.contrasts.@zstats'
                    ],
                    name='sinkd')
    sinkd.inputs.base_directory = sink_directory
    sinkd.inputs.container = subject_id
    frstlvl_wf.connect(getsubs, 'subs', sinkd, 'substitutions')
    frstlvl_wf.connect(modelfit_outputspec, 'parameter_estimates', sinkd,
                       'modelfit.estimates')
    frstlvl_wf.connect(modelfit_outputspec, 'sigmasquareds', sinkd,
                       'modelfit.estimates.@sigsq')
    frstlvl_wf.connect(modelfit_outputspec, 'dof_file', sinkd, 'modelfit.dofs')
    frstlvl_wf.connect(modelfit_outputspec, 'copes', sinkd,
                       'modelfit.contrasts.@copes')
    frstlvl_wf.connect(modelfit_outputspec, 'varcopes', sinkd,
                       'modelfit.contrasts.@varcopes')
    frstlvl_wf.connect(modelfit_outputspec, 'zstats', sinkd,
                       'modelfit.contrasts.@zstats')
    frstlvl_wf.connect(modelfit_outputspec, 'design_image', sinkd,
                       'modelfit.design')
    frstlvl_wf.connect(modelfit_outputspec, 'design_cov', sinkd,
                       'modelfit.design.@cov')
    frstlvl_wf.connect(modelfit_outputspec, 'design_file', sinkd,
                       'modelfit.design.@matrix')
    frstlvl_wf.connect(modelfit_outputspec, 'pfiles', sinkd,
                       'modelfit.contrasts.@pstats')

    return frstlvl_wf
예제 #6
0
파일: glm.py 프로젝트: Doeme/SAMRI
def l1(
    preprocessing_dir,
    highpass_sigma=225,
    include={},
    exclude={},
    keep_work=False,
    l1_dir="",
    nprocs=10,
    mask="/home/chymera/ni_data/templates/ds_QBI_chr_bin.nii.gz",
    per_stimulus_contrast=False,
    habituation="",
    tr=1,
    workflow_name="generic",
):
    """Calculate subject level GLM statistics.

	Parameters
	----------

	include : dict
	A dictionary with any combination of "sessions", "subjects", "trials" as keys and corresponding identifiers as values.
	If this is specified ony matching entries will be included in the analysis.

	exclude : dict
	A dictionary with any combination of "sessions", "subjects", "trials" as keys and corresponding identifiers as values.
	If this is specified ony non-matching entries will be included in the analysis.

	habituation : string
	One value of "confound", "in_main_contrast", "separate_contrast", "" indicating how the habituation regressor should be handled.
	"" or any other value which evaluates to False will mean no habituation regressor is used int he model
	"""

    preprocessing_dir = path.expanduser(preprocessing_dir)
    if not l1_dir:
        l1_dir = path.abspath(path.join(preprocessing_dir, "..", "..", "l1"))

    datafind = nio.DataFinder()
    datafind.inputs.root_paths = preprocessing_dir
    datafind.inputs.match_regex = '.+/sub-(?P<sub>.+)/ses-(?P<ses>.+)/func/.*?_trial-(?P<scan>.+)\.nii.gz'
    datafind_res = datafind.run()
    iterfields = zip(*[
        datafind_res.outputs.sub, datafind_res.outputs.ses,
        datafind_res.outputs.scan
    ])

    if include:
        iterfields = iterfield_selector(iterfields, include, "include")
    if exclude:
        iterfields = iterfield_selector(iterfields, exclude, "exclude")

    infosource = pe.Node(
        interface=util.IdentityInterface(fields=['subject_session_scan']),
        name="infosource")
    infosource.iterables = [('subject_session_scan', iterfields)]

    datafile_source = pe.Node(
        name='datafile_source',
        interface=util.Function(
            function=sss_to_source,
            input_names=inspect.getargspec(sss_to_source)[0],
            output_names=['out_file']))
    datafile_source.inputs.base_directory = preprocessing_dir
    datafile_source.inputs.source_format = "sub-{0}/ses-{1}/func/sub-{0}_ses-{1}_trial-{2}.nii.gz"

    eventfile_source = pe.Node(
        name='eventfile_source',
        interface=util.Function(
            function=sss_to_source,
            input_names=inspect.getargspec(sss_to_source)[0],
            output_names=['out_file']))
    eventfile_source.inputs.base_directory = preprocessing_dir
    eventfile_source.inputs.source_format = "sub-{0}/ses-{1}/func/sub-{0}_ses-{1}_trial-{2}_events.tsv"

    specify_model = pe.Node(interface=SpecifyModel(), name="specify_model")
    specify_model.inputs.input_units = 'secs'
    specify_model.inputs.time_repetition = tr
    specify_model.inputs.high_pass_filter_cutoff = highpass_sigma
    specify_model.inputs.one_condition_file = not per_stimulus_contrast
    specify_model.inputs.habituation_regressor = bool(habituation)

    level1design = pe.Node(interface=Level1Design(), name="level1design")
    level1design.inputs.interscan_interval = tr
    level1design.inputs.bases = {
        "custom": {
            "bfcustompath": "/mnt/data/ni_data/irfs/chr_beta1.txt"
        }
    }
    # level1design.inputs.bases = {'gamma': {'derivs':False, 'gammasigma':10, 'gammadelay':5}}
    level1design.inputs.orthogonalization = {
        1: {
            0: 0,
            1: 0,
            2: 0
        },
        2: {
            0: 1,
            1: 1,
            2: 0
        }
    }
    level1design.inputs.model_serial_correlations = True
    if per_stimulus_contrast:
        level1design.inputs.contrasts = [
            ('allStim', 'T', ["e0", "e1", "e2", "e3", "e4",
                              "e5"], [1, 1, 1, 1, 1, 1])
        ]  #condition names as defined in specify_model
    elif habituation == "separate_contrast":
        level1design.inputs.contrasts = [
            ('allStim', 'T', ["e0"], [1]), ('allStim', 'T', ["e1"], [1])
        ]  #condition names as defined in specify_model
    elif habituation == "in_main_contrast":
        level1design.inputs.contrasts = [
            ('allStim', 'T', ["e0", "e1"], [1, 1])
        ]  #condition names as defined in specify_model
    elif habituation == "confound":
        level1design.inputs.contrasts = [
            ('allStim', 'T', ["e0"], [1])
        ]  #condition names as defined in specify_model
    else:
        level1design.inputs.contrasts = [
            ('allStim', 'T', ["e0"], [1])
        ]  #condition names as defined in specify_model

    modelgen = pe.Node(interface=fsl.FEATModel(), name='modelgen')

    glm = pe.Node(interface=fsl.GLM(), name='glm', iterfield='design')
    glm.inputs.out_cope = "cope.nii.gz"
    glm.inputs.out_varcb_name = "varcb.nii.gz"
    #not setting a betas output file might lead to beta export in lieu of COPEs
    glm.inputs.out_file = "betas.nii.gz"
    glm.inputs.out_t_name = "t_stat.nii.gz"
    glm.inputs.out_p_name = "p_stat.nii.gz"
    if mask:
        glm.inputs.mask = mask

    cope_filename = pe.Node(
        name='cope_filename',
        interface=util.Function(
            function=sss_to_source,
            input_names=inspect.getargspec(sss_to_source)[0],
            output_names=['filename']))
    cope_filename.inputs.source_format = "sub-{0}_ses-{1}_trial-{2}_cope.nii.gz"
    varcb_filename = pe.Node(
        name='varcb_filename',
        interface=util.Function(
            function=sss_to_source,
            input_names=inspect.getargspec(sss_to_source)[0],
            output_names=['filename']))
    varcb_filename.inputs.source_format = "sub-{0}_ses-{1}_trial-{2}_varcb.nii.gz"
    tstat_filename = pe.Node(
        name='tstat_filename',
        interface=util.Function(
            function=sss_to_source,
            input_names=inspect.getargspec(sss_to_source)[0],
            output_names=['filename']))
    tstat_filename.inputs.source_format = "sub-{0}_ses-{1}_trial-{2}_tstat.nii.gz"
    zstat_filename = pe.Node(
        name='zstat_filename',
        interface=util.Function(
            function=sss_to_source,
            input_names=inspect.getargspec(sss_to_source)[0],
            output_names=['filename']))
    zstat_filename.inputs.source_format = "sub-{0}_ses-{1}_trial-{2}_zstat.nii.gz"
    pstat_filename = pe.Node(
        name='pstat_filename',
        interface=util.Function(
            function=sss_to_source,
            input_names=inspect.getargspec(sss_to_source)[0],
            output_names=['filename']))
    pstat_filename.inputs.source_format = "sub-{0}_ses-{1}_trial-{2}_pstat.nii.gz"
    pfstat_filename = pe.Node(
        name='pfstat_filename',
        interface=util.Function(
            function=sss_to_source,
            input_names=inspect.getargspec(sss_to_source)[0],
            output_names=['filename']))
    pfstat_filename.inputs.source_format = "sub-{0}_ses-{1}_trial-{2}_pfstat.nii.gz"

    datasink = pe.Node(nio.DataSink(), name='datasink')
    datasink.inputs.base_directory = path.join(l1_dir, workflow_name)
    datasink.inputs.parameterization = False

    workflow_connections = [
        (infosource, datafile_source, [('subject_session_scan',
                                        'subject_session_scan')]),
        (infosource, eventfile_source, [('subject_session_scan',
                                         'subject_session_scan')]),
        (eventfile_source, specify_model, [('out_file', 'event_files')]),
        (datafile_source, specify_model, [('out_file', 'functional_runs')]),
        (specify_model, level1design, [('session_info', 'session_info')]),
        (level1design, modelgen, [('ev_files', 'ev_files')]),
        (level1design, modelgen, [('fsf_files', 'fsf_file')]),
        (datafile_source, glm, [('out_file', 'in_file')]),
        (modelgen, glm, [('design_file', 'design')]),
        (modelgen, glm, [('con_file', 'contrasts')]),
        (infosource, datasink, [(('subject_session_scan', ss_to_path),
                                 'container')]),
        (infosource, cope_filename, [('subject_session_scan',
                                      'subject_session_scan')]),
        (infosource, varcb_filename, [('subject_session_scan',
                                       'subject_session_scan')]),
        (infosource, tstat_filename, [('subject_session_scan',
                                       'subject_session_scan')]),
        (infosource, zstat_filename, [('subject_session_scan',
                                       'subject_session_scan')]),
        (infosource, pstat_filename, [('subject_session_scan',
                                       'subject_session_scan')]),
        (infosource, pfstat_filename, [('subject_session_scan',
                                        'subject_session_scan')]),
        (cope_filename, glm, [('filename', 'out_cope')]),
        (varcb_filename, glm, [('filename', 'out_varcb_name')]),
        (tstat_filename, glm, [('filename', 'out_t_name')]),
        (zstat_filename, glm, [('filename', 'out_z_name')]),
        (pstat_filename, glm, [('filename', 'out_p_name')]),
        (pfstat_filename, glm, [('filename', 'out_pf_name')]),
        (glm, datasink, [('out_pf', '@pfstat')]),
        (glm, datasink, [('out_p', '@pstat')]),
        (glm, datasink, [('out_z', '@zstat')]),
        (glm, datasink, [('out_t', '@tstat')]),
        (glm, datasink, [('out_cope', '@cope')]),
        (glm, datasink, [('out_varcb', '@varcb')]),
    ]

    workdir_name = workflow_name + "_work"
    workflow = pe.Workflow(name=workdir_name)
    workflow.connect(workflow_connections)
    workflow.base_dir = l1_dir
    workflow.config = {
        "execution": {
            "crashdump_dir": path.join(l1_dir, "crashdump")
        }
    }
    workflow.write_graph(dotfilename=path.join(workflow.base_dir, workdir_name,
                                               "graph.dot"),
                         graph2use="hierarchical",
                         format="png")

    workflow.run(plugin="MultiProc", plugin_args={'n_procs': nprocs})
    if not keep_work:
        shutil.rmtree(path.join(l1_dir, workdir_name))
예제 #7
0
    def runglmperun(self, subject, trtimeinsec):
        s = SpecifyModel()
        # loop on all runs and models within each run
        modelfiles = subject._modelfiles

        for model in modelfiles:
            # Make directory results to store the results of the model
            results_dir = os.path.join(subject._path, 'model', model[0],
                                       'results', model[1])
            dir_util.mkpath(results_dir)
            os.chdir(results_dir)

            s.inputs.event_files = model[2]
            s.inputs.input_units = 'secs'
            s.inputs.functional_runs = os.path.join(subject._path, 'BOLD',
                                                    model[1],
                                                    'bold_mcf_hp.nii.gz')
            # use nibable to get the tr of from the .nii file
            s.inputs.time_repetition = trtimeinsec
            s.inputs.high_pass_filter_cutoff = 128.
            # find par file that has motion
            motionfiles = glob(
                os.path.join(subject._path, 'BOLD', model[1], "*.par"))
            s.inputs.realignment_parameters = motionfiles
            #info = [Bunch(conditions=['cond1'], onsets=[[2, 50, 100, 180]],                      durations=[[1]]),                 Bunch(conditions=['cond1'], onsets=[[30, 40, 100, 150]],                       durations=[[1]])]
            #s.inputs.subject_info = None

            res = s.run()
            res.runtime.cwd
            print ">>>> preparing evs for model " + model[
                1] + "and run " + model[0]
            sessionInfo = res.outputs.session_info

            level1design = Level1Design()
            level1design.inputs.interscan_interval = trtimeinsec
            level1design.inputs.bases = {'dgamma': {'derivs': False}}
            level1design.inputs.session_info = sessionInfo
            level1design.inputs.model_serial_correlations = True
            #TODO: add contrasts to level 1 design so that I have just condition vs rest for each ev
            #TODO: Look into changign this to FILM instead of FEAT - this also has the option of setting output directory
            # http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FEAT/UserGuide#Contrasts
            #http://nipy.org/nipype/interfaces/generated/nipype.interfaces.fsl.model.html#filmgls
            resLevel = level1design.run()

            featModel = FEATModel()
            featModel.inputs.fsf_file = resLevel.outputs.fsf_files
            featModel.inputs.ev_files = resLevel.outputs.ev_files
            resFeat = featModel.run()

            print ">>>> creating fsf design files for  " + model[
                1] + "and run " + model[0]
            # TODO: give mask here
            glm = fsl.GLM(in_file=s.inputs.functional_runs[0],
                          design=resFeat.outputs.design_file,
                          output_type='NIFTI')

            print ">>>> running glm for  " + model[1] + "and run " + model[0]
            resGlm = glm.run()

            print ">>>> finished running  glm for  " + model[
                1] + "and run " + model[0]
예제 #8
0
def firstlevel_wf(subject_id, sink_directory, name='wmaze_frstlvl_wf'):
    # Create the frstlvl workflow
    frstlvl_wf = Workflow(name='frstlvl_wf')

    # Dictionary holding the wildcard used in datasource
    info = dict(task_mri_files=[['subject_id', 'wmaze']],
                motion_noise_files=[['subject_id']])

    # Calls the subjectinfo function with the name, onset, duration, and amplitude info
    subject_info = Node(Function(input_names=['subject_id'],
                                 output_names=['output'],
                                 function=subjectinfo),
                        name='subject_info')
    subject_info.inputs.ignore_exception = False
    subject_info.inputs.subject_id = subject_id

    # Create another Function node to define the contrasts for the experiment
    getcontrasts = Node(
        Function(
            input_names=['subject_id', 'info'],
            output_names=['contrasts'],
            # Calls the function 'get_contrasts'
            function=get_contrasts),
        name='getcontrasts')
    getcontrasts.inputs.ignore_exception = False
    # Receives subject_id as input
    getcontrasts.inputs.subject_id = subject_id
    frstlvl_wf.connect(subject_info, 'output', getcontrasts, 'info')

    #### subject_info (output) ----> getcontrasts (info)

    # Create a Function node to substitute names of folders and files created during pipeline
    getsubs = Node(
        Function(
            input_names=['cons'],
            output_names=['subs'],
            # Calls the function 'get_subs'
            function=get_subs),
        name='getsubs')
    getsubs.inputs.ignore_exception = False
    # Receives subject_id as input
    getsubs.inputs.subject_id = subject_id
    frstlvl_wf.connect(subject_info, 'output', getsubs, 'info')
    frstlvl_wf.connect(getcontrasts, 'contrasts', getsubs, 'cons')

    # Create a datasource node to get the task_mri and motion-noise files
    datasource = Node(DataGrabber(infields=['subject_id'],
                                  outfields=info.keys()),
                      name='datasource')
    # Indicates the string template to match (in this case, any that match the field template)
    datasource.inputs.template = '*'
    # Receives subject_id as an input
    datasource.inputs.subject_id = subject_id
    # Base directory to allow branching pathways
    datasource.inputs.base_directory = os.path.abspath(
        '/home/data/madlab/data/mri/wmaze/preproc/')
    datasource.inputs.field_template = dict(
        task_mri_files='%s/func/smoothed_fullspectrum/_maskfunc2*/*%s*.nii.gz',
        # Filter regressor noise files
        motion_noise_files='%s/noise/filter_regressor*.txt')
    # Inputs from the infields argument ('subject_id') that satisfy the template
    datasource.inputs.template_args = info
    # Forces DataGrabber to return data in sorted order when using wildcards
    datasource.inputs.sort_filelist = True
    # Do not ignore exceptions
    datasource.inputs.ignore_exception = False
    datasource.inputs.raise_on_empty = True

    # Function to remove last three volumes from functional data
    # Start from the first volume and end on the -3 volume
    fslroi_epi = MapNode(ExtractROI(t_min=0, t_size=197),
                         iterfield=['in_file'],
                         name='fslroi_epi')
    fslroi_epi.output_type = 'NIFTI_GZ'
    fslroi_epi.terminal_output = 'stream'
    frstlvl_wf.connect(datasource, 'task_mri_files', fslroi_epi, 'in_file')

    # Function node to modify the motion and noise files to be single regressors
    motionnoise = Node(
        Function(
            input_names=['subjinfo', 'files'],
            output_names=['subjinfo'],
            # Calls the function 'motion_noise'
            function=motion_noise),
        name='motionnoise')
    motionnoise.inputs.ignore_exception = False
    # The bunch from subject_info function containing regressor names, onsets, durations, and amplitudes
    frstlvl_wf.connect(subject_info, 'output', motionnoise, 'subjinfo')
    frstlvl_wf.connect(datasource, 'motion_noise_files', motionnoise, 'files')

    # Makes a model specification compatible with spm/fsl designers
    # Requires subjectinfo to be received in the form of a Bunch of a list of Bunch
    specify_model = Node(SpecifyModel(), name='specify_model')
    # High-pass filter cutoff in seconds
    specify_model.inputs.high_pass_filter_cutoff = -1.0
    specify_model.inputs.ignore_exception = False
    # input units in either 'secs' or 'scans'
    specify_model.inputs.input_units = 'secs'
    # Time between start of one volume and the start of following volume
    specify_model.inputs.time_repetition = 2.0
    # Editted data files for model -- list of 4D files
    frstlvl_wf.connect(fslroi_epi, 'roi_file', specify_model,
                       'functional_runs')
    # List of event description files in 3 column format corresponding to onsets, durations, and amplitudes
    frstlvl_wf.connect(motionnoise, 'subjinfo', specify_model, 'subject_info')

    # Basic interface class generates identity mappings
    modelfit_inputspec = Node(IdentityInterface(fields=[
        'session_info', 'interscan_interval', 'contrasts', 'film_threshold',
        'functional_data', 'bases', 'model_serial_correlations'
    ],
                                                mandatory_inputs=True),
                              name='modelfit_inputspec')
    # Set bases to a dictionary with a second dictionary setting the value of dgamma derivatives as 'False'
    modelfit_inputspec.inputs.bases = {'dgamma': {'derivs': False}}
    # Film threshold
    modelfit_inputspec.inputs.film_threshold = 0.0
    # Interscan_interval
    modelfit_inputspec.inputs.interscan_interval = 2.0
    # Create model serial correlations for Level1Design
    modelfit_inputspec.inputs.model_serial_correlations = True
    frstlvl_wf.connect(fslroi_epi, 'roi_file', modelfit_inputspec,
                       'functional_data')
    frstlvl_wf.connect(getcontrasts, 'contrasts', modelfit_inputspec,
                       'contrasts')
    frstlvl_wf.connect(specify_model, 'session_info', modelfit_inputspec,
                       'session_info')

    # Creates a first level SPM design matrix to demonstrate contrasts and motion/noise regressors
    level1_design = MapNode(Level1Design(),
                            iterfield=['contrasts', 'session_info'],
                            name='level1_design')
    level1_design.inputs.ignore_exception = False
    # Inputs the interscan interval (in secs)
    frstlvl_wf.connect(modelfit_inputspec, 'interscan_interval', level1_design,
                       'interscan_interval')
    # Session specific information generated by ``modelgen.SpecifyModel``
    frstlvl_wf.connect(modelfit_inputspec, 'session_info', level1_design,
                       'session_info')
    # List of contrasts with each contrast being a list of the form -[('name', 'stat', [condition list], [weight list], [session list])].
    # If session list is None or not provided, all sessions are used.
    frstlvl_wf.connect(modelfit_inputspec, 'contrasts', level1_design,
                       'contrasts')
    # Name of basis function and options e.g., {'dgamma': {'derivs': True}}
    frstlvl_wf.connect(modelfit_inputspec, 'bases', level1_design, 'bases')
    # Option to model serial correlations using an autoregressive estimator (order 1)
    # Setting this option is only useful in the context of the fsf file
    frstlvl_wf.connect(modelfit_inputspec, 'model_serial_correlations',
                       level1_design, 'model_serial_correlations')

    # Create a MapNode to generate a design.mat file for each run
    generate_model = MapNode(FEATModel(),
                             iterfield=['fsf_file', 'ev_files'],
                             name='generate_model')
    generate_model.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'}
    generate_model.inputs.ignore_exception = False
    generate_model.inputs.output_type = 'NIFTI_GZ'
    generate_model.inputs.terminal_output = 'stream'
    # File specifying the feat design spec file
    frstlvl_wf.connect(level1_design, 'fsf_files', generate_model, 'fsf_file')
    # Event spec files generated by level1design (condition information files)
    frstlvl_wf.connect(level1_design, 'ev_files', generate_model, 'ev_files')

    # Create a MapNode to estimate the model using FILMGLS -- fits the design matrix to the voxel timeseries
    estimate_model = MapNode(FILMGLS(),
                             iterfield=['design_file', 'in_file', 'tcon_file'],
                             name='estimate_model')
    estimate_model.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'}
    estimate_model.inputs.ignore_exception = False
    # Susan-smooth mask size
    estimate_model.inputs.mask_size = 5
    estimate_model.inputs.output_type = 'NIFTI_GZ'
    estimate_model.inputs.results_dir = 'results'
    # Smooth auto-correlation estimates
    estimate_model.inputs.smooth_autocorr = True
    estimate_model.inputs.terminal_output = 'stream'
    frstlvl_wf.connect(modelfit_inputspec, 'film_threshold', estimate_model,
                       'threshold')
    frstlvl_wf.connect(modelfit_inputspec, 'functional_data', estimate_model,
                       'in_file')
    # Mat file containing ascii matrix for design
    frstlvl_wf.connect(generate_model, 'design_file', estimate_model,
                       'design_file')
    # Contrast file containing contrast vectors
    frstlvl_wf.connect(generate_model, 'con_file', estimate_model, 'tcon_file')

    # Create a merge node to merge the contrasts - necessary for fsl 5.0.7 and greater
    merge_contrasts = MapNode(Merge(2),
                              iterfield=['in1'],
                              name='merge_contrasts')
    frstlvl_wf.connect(estimate_model, 'zstats', merge_contrasts, 'in1')

    # Create a MapNode to transform the z2pval
    z2pval = MapNode(ImageMaths(), iterfield=['in_file'], name='z2pval')
    z2pval.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'}
    # Do not ignore exceptions
    z2pval.inputs.ignore_exception = False
    # Defines the operation used
    z2pval.inputs.op_string = '-ztop'
    # Set the outfile type to nii.gz
    z2pval.inputs.output_type = 'NIFTI_GZ'
    # Out-file suffix
    z2pval.inputs.suffix = '_pval'
    # Set output to stream in terminal
    z2pval.inputs.terminal_output = 'stream'
    frstlvl_wf.connect(merge_contrasts, ('out', pop_lambda), z2pval, 'in_file')

    # Create an outputspec node using IdentityInterface() to receive information from estimate_model,
    # merge_contrasts, z2pval, generate_model, and estimate_model
    modelfit_outputspec = Node(IdentityInterface(fields=[
        'copes', 'varcopes', 'dof_file', 'pfiles', 'parameter_estimates',
        'zstats', 'design_image', 'design_file', 'design_cov', 'sigmasquareds'
    ],
                                                 mandatory_inputs=True),
                               name='modelfit_outputspec')
    # All lvl1 cope files
    frstlvl_wf.connect(estimate_model, 'copes', modelfit_outputspec, 'copes')
    # All lvl1 varcope files
    frstlvl_wf.connect(estimate_model, 'varcopes', modelfit_outputspec,
                       'varcopes')
    # All zstats across runs
    frstlvl_wf.connect(merge_contrasts, 'out', modelfit_outputspec, 'zstats')
    #
    frstlvl_wf.connect(z2pval, 'out_file', modelfit_outputspec, 'pfiles')
    # Graphical representation of design matrix
    frstlvl_wf.connect(generate_model, 'design_image', modelfit_outputspec,
                       'design_image')
    # Mat file containing ascii matrix for design
    frstlvl_wf.connect(generate_model, 'design_file', modelfit_outputspec,
                       'design_file')
    # Graphical representation of design covariance
    frstlvl_wf.connect(generate_model, 'design_cov', modelfit_outputspec,
                       'design_cov')
    # Parameter estimates for each column of the design matrix
    frstlvl_wf.connect(estimate_model, 'param_estimates', modelfit_outputspec,
                       'parameter_estimates')
    # Degrees of freedom
    frstlvl_wf.connect(estimate_model, 'dof_file', modelfit_outputspec,
                       'dof_file')
    # Summary of residuals
    frstlvl_wf.connect(estimate_model, 'sigmasquareds', modelfit_outputspec,
                       'sigmasquareds')

    # Create a datasink node to save output from multiple points in the pipeline
    sinkd = MapNode(DataSink(),
                    iterfield=[
                        'substitutions', 'modelfit.contrasts.@copes',
                        'modelfit.contrasts.@varcopes', 'modelfit.estimates',
                        'modelfit.contrasts.@zstats'
                    ],
                    name='sinkd')
    sinkd.inputs.base_directory = sink_directory
    sinkd.inputs.container = subject_id
    frstlvl_wf.connect(getsubs, 'subs', sinkd, 'substitutions')
    frstlvl_wf.connect(modelfit_outputspec, 'parameter_estimates', sinkd,
                       'modelfit.estimates')
    frstlvl_wf.connect(modelfit_outputspec, 'sigmasquareds', sinkd,
                       'modelfit.estimates.@sigsq')
    frstlvl_wf.connect(modelfit_outputspec, 'dof_file', sinkd, 'modelfit.dofs')
    frstlvl_wf.connect(modelfit_outputspec, 'copes', sinkd,
                       'modelfit.contrasts.@copes')
    frstlvl_wf.connect(modelfit_outputspec, 'varcopes', sinkd,
                       'modelfit.contrasts.@varcopes')
    frstlvl_wf.connect(modelfit_outputspec, 'zstats', sinkd,
                       'modelfit.contrasts.@zstats')
    frstlvl_wf.connect(modelfit_outputspec, 'design_image', sinkd,
                       'modelfit.design')
    frstlvl_wf.connect(modelfit_outputspec, 'design_cov', sinkd,
                       'modelfit.design.@cov')
    frstlvl_wf.connect(modelfit_outputspec, 'design_file', sinkd,
                       'modelfit.design.@matrix')
    frstlvl_wf.connect(modelfit_outputspec, 'pfiles', sinkd,
                       'modelfit.contrasts.@pstats')

    return frstlvl_wf
예제 #9
0
datasource.inputs.sort_filelist = False
results = datasource.run()

print results.outputs

cont1 = ['Bundling-Control', 'T', ['Bundling', 'Control'], [1, -1]]

s = SpecifyModel()
s.inputs.input_units = 'secs'
s.inputs.functional_runs = results.outputs.func
s.inputs.time_repetition = 2
s.inputs.high_pass_filter_cutoff = 128.
s.inputs.event_files = results.outputs.evs
model = s.run()

level1design = Level1Design()
level1design.inputs.interscan_interval = 2.5
level1design.inputs.bases = {'dgamma': {'derivs': False}}
level1design.inputs.model_serial_correlations = False
level1design.inputs.session_info = model.outputs.session_info
level1design.inputs.contrasts = [cont1]
l1d = level1design.run()

print l1d.outputs.ev_files
modelgen = FEATModel()
modelgen.inputs.ev_files = l1d.outputs.ev_files
modelgen.inputs.fsf_file = l1d.outputs.fsf_files
model = modelgen.run()

fgls = fsl.FILMGLS()
fgls.inputs.in_file = results.outputs.func
예제 #10
0
def seed_fc(
    preprocessing_dir,
    exclude={},
    habituation='confound',
    highpass_sigma=225,
    lowpass_sigma=False,
    include={},
    keep_work=False,
    out_dir="",
    mask="",
    match_regex='sub-(?P<sub>[a-zA-Z0-9]+)/ses-(?P<ses>[a-zA-Z0-9]+)/func/.*?_task-(?P<task>[a-zA-Z0-9]+)_acq-(?P<acq>[a-zA-Z0-9]+)_(?P<mod>[a-zA-Z0-9]+)\.(?:tsv|nii|nii\.gz)',
    nprocs=N_PROCS,
    tr=1,
    workflow_name="generic",
    modality="cbv",
):
    """Calculate subject level seed-based functional connectivity via the `fsl_glm` command.

	Parameters
	----------

	exclude : dict
		A dictionary with any combination of "sessions", "subjects", "tasks" as keys and corresponding identifiers as values.
		If this is specified matching entries will be excluded in the analysis.
	habituation : {"", "confound", "separate_contrast", "in_main_contrast"}, optional
		How the habituation regressor should be handled.
		Anything which evaluates as False (though we recommend "") means no habituation regressor will be introduced.
	highpass_sigma : int, optional
		Highpass threshold (in seconds).
	include : dict
		A dictionary with any combination of "sessions", "subjects", "tasks" as keys and corresponding identifiers as values.
		If this is specified only matching entries will be included in the analysis.
	keep_work : bool, optional
		Whether to keep the work directory (containing all the intermediary workflow steps, as managed by nipypye).
		This is useful for debugging and quality control.
	out_dir : str, optional
		Path to the directory inside which both the working directory and the output directory will be created.
	mask : str, optional
		Path to the brain mask which shall be used to define the brain volume in the analysis.
		This has to point to an existing NIfTI file containing zero and one values only.
	match_regex : str, optional
		Regex matching pattern by which to select input files. Has to contain groups named "sub", "ses", "acq", "task", and "mod".
	n_procs : int, optional
		Maximum number of processes which to simultaneously spawn for the workflow.
		If not explicitly defined, this is automatically calculated from the number of available cores and under the assumption that the workflow will be the main process running for the duration that it is running.
	tr : int, optional
		Repetition time, in seconds.
	workflow_name : str, optional
		Name of the workflow; this will also be the name of the final output directory produced under `out_dir`.
	"""

    preprocessing_dir = path.abspath(path.expanduser(preprocessing_dir))
    if not out_dir:
        out_dir = path.join(bids_base, 'l1')
    else:
        out_dir = path.abspath(path.expanduser(out_dir))

    datafind = nio.DataFinder()
    datafind.inputs.root_paths = preprocessing_dir
    datafind.inputs.match_regex = match_regex
    datafind_res = datafind.run()
    out_paths = [
        path.abspath(path.expanduser(i))
        for i in datafind_res.outputs.out_paths
    ]
    data_selection = zip(*[
        datafind_res.outputs.sub, datafind_res.outputs.ses,
        datafind_res.outputs.acq, datafind_res.outputs.task,
        datafind_res.outputs.mod, out_paths
    ])
    data_selection = [list(i) for i in data_selection]
    data_selection = pd.DataFrame(data_selection,
                                  columns=('subject', 'session', 'acquisition',
                                           'task', 'modality', 'path'))
    if exclude:
        for key in exclude:
            data_selection = data_selection[~data_selection[key].
                                            isin(exclude[key])]
    if include:
        for key in include:
            data_selection = data_selection[data_selection[key].isin(
                include[key])]
    bids_dictionary = data_selection[
        data_selection['modality'] ==
        modality].drop_duplicates().T.to_dict().values()

    infosource = pe.Node(
        interface=util.IdentityInterface(fields=['bids_dictionary']),
        name="infosource")
    infosource.iterables = [('bids_dictionary', bids_dictionary)]

    datafile_source = pe.Node(
        name='datafile_source',
        interface=util.Function(
            function=select_from_datafind_df,
            input_names=inspect.getargspec(select_from_datafind_df)[0],
            output_names=['out_file']))
    datafile_source.inputs.bids_dictionary_override = {'modality': modality}
    datafile_source.inputs.df = data_selection

    seed_timecourse = pe.Node(
        name='seed_timecourse',
        interface=util.Function(
            function=select_from_datafind_df,
            input_names=inspect.getargspec(select_from_datafind_df)[0],
            output_names=['out_file']))

    specify_model = pe.Node(interface=SpecifyModel(), name="specify_model")
    specify_model.inputs.input_units = 'secs'
    specify_model.inputs.time_repetition = tr
    specify_model.inputs.high_pass_filter_cutoff = highpass_sigma
    specify_model.inputs.habituation_regressor = bool(habituation)

    level1design = pe.Node(interface=Level1Design(), name="level1design")
    level1design.inputs.interscan_interval = tr
    if bf_path:
        bf_path = path.abspath(path.expanduser(bf_path))
        level1design.inputs.bases = {"custom": {"bfcustompath": bf_path}}
    # level1design.inputs.bases = {'gamma': {'derivs':False, 'gammasigma':10, 'gammadelay':5}}
    level1design.inputs.orthogonalization = {
        1: {
            0: 0,
            1: 0,
            2: 0
        },
        2: {
            0: 1,
            1: 1,
            2: 0
        }
    }
    level1design.inputs.model_serial_correlations = True
    if habituation == "separate_contrast":
        level1design.inputs.contrasts = [
            ('allStim', 'T', ["e0"], [1]), ('allStim', 'T', ["e1"], [1])
        ]  #condition names as defined in specify_model
    elif habituation == "in_main_contrast":
        level1design.inputs.contrasts = [
            ('allStim', 'T', ["e0", "e1"], [1, 1])
        ]  #condition names as defined in specify_model
    elif habituation == "confound" or not habituation:
        level1design.inputs.contrasts = [
            ('allStim', 'T', ["e0"], [1])
        ]  #condition names as defined in specify_model
    else:
        raise ValueError(
            'The value you have provided for the `habituation` parameter, namely "{}", is invalid. Please choose one of: {"confound","in_main_contrast","separate_contrast"}'
            .format(habituation))

    modelgen = pe.Node(interface=fsl.FEATModel(), name='modelgen')
    modelgen.inputs.ignore_exception = True

    glm = pe.Node(interface=fsl.GLM(), name='glm', iterfield='design')
    glm.inputs.out_cope = "cope.nii.gz"
    glm.inputs.out_varcb_name = "varcb.nii.gz"
    #not setting a betas output file might lead to beta export in lieu of COPEs
    glm.inputs.out_file = "betas.nii.gz"
    glm.inputs.out_t_name = "t_stat.nii.gz"
    glm.inputs.out_p_name = "p_stat.nii.gz"
    if mask:
        glm.inputs.mask = path.abspath(path.expanduser(mask))
    glm.inputs.ignore_exception = True

    cope_filename = pe.Node(
        name='cope_filename',
        interface=util.Function(
            function=bids_dict_to_source,
            input_names=inspect.getargspec(bids_dict_to_source)[0],
            output_names=['filename']))
    cope_filename.inputs.source_format = "sub-{subject}_ses-{session}_task-{task}_acq-{acquisition}_{modality}_cope.nii.gz"
    varcb_filename = pe.Node(
        name='varcb_filename',
        interface=util.Function(
            function=bids_dict_to_source,
            input_names=inspect.getargspec(bids_dict_to_source)[0],
            output_names=['filename']))
    varcb_filename.inputs.source_format = "sub-{subject}_ses-{session}_task-{task}_acq-{acquisition}_{modality}_varcb.nii.gz"
    tstat_filename = pe.Node(
        name='tstat_filename',
        interface=util.Function(
            function=bids_dict_to_source,
            input_names=inspect.getargspec(bids_dict_to_source)[0],
            output_names=['filename']))
    tstat_filename.inputs.source_format = "sub-{subject}_ses-{session}_task-{task}_acq-{acquisition}_{modality}_tstat.nii.gz"
    zstat_filename = pe.Node(
        name='zstat_filename',
        interface=util.Function(
            function=bids_dict_to_source,
            input_names=inspect.getargspec(bids_dict_to_source)[0],
            output_names=['filename']))
    zstat_filename.inputs.source_format = "sub-{subject}_ses-{session}_task-{task}_acq-{acquisition}_{modality}_zstat.nii.gz"
    pstat_filename = pe.Node(
        name='pstat_filename',
        interface=util.Function(
            function=bids_dict_to_source,
            input_names=inspect.getargspec(bids_dict_to_source)[0],
            output_names=['filename']))
    pstat_filename.inputs.source_format = "sub-{subject}_ses-{session}_task-{task}_acq-{acquisition}_{modality}_pstat.nii.gz"
    pfstat_filename = pe.Node(
        name='pfstat_filename',
        interface=util.Function(
            function=bids_dict_to_source,
            input_names=inspect.getargspec(bids_dict_to_source)[0],
            output_names=['filename']))
    pfstat_filename.inputs.source_format = "sub-{subject}_ses-{session}_task-{task}_acq-{acquisition}_{modality}_pfstat.nii.gz"

    datasink = pe.Node(nio.DataSink(), name='datasink')
    datasink.inputs.base_directory = path.join(out_dir, workflow_name)
    datasink.inputs.parameterization = False

    workflow_connections = [
        (infosource, datafile_source, [('bids_dictionary', 'bids_dictionary')
                                       ]),
        (infosource, eventfile_source, [('bids_dictionary', 'bids_dictionary')
                                        ]),
        (eventfile_source, specify_model, [('out_file', 'event_files')]),
        (specify_model, level1design, [('session_info', 'session_info')]),
        (level1design, modelgen, [('ev_files', 'ev_files')]),
        (level1design, modelgen, [('fsf_files', 'fsf_file')]),
        (modelgen, glm, [('design_file', 'design')]),
        (modelgen, glm, [('con_file', 'contrasts')]),
        (infosource, datasink, [(('bids_dictionary', bids_dict_to_dir),
                                 'container')]),
        (infosource, cope_filename, [('bids_dictionary', 'bids_dictionary')]),
        (infosource, varcb_filename, [('bids_dictionary', 'bids_dictionary')]),
        (infosource, tstat_filename, [('bids_dictionary', 'bids_dictionary')]),
        (infosource, zstat_filename, [('bids_dictionary', 'bids_dictionary')]),
        (infosource, pstat_filename, [('bids_dictionary', 'bids_dictionary')]),
        (infosource, pfstat_filename, [('bids_dictionary', 'bids_dictionary')
                                       ]),
        (cope_filename, glm, [('filename', 'out_cope')]),
        (varcb_filename, glm, [('filename', 'out_varcb_name')]),
        (tstat_filename, glm, [('filename', 'out_t_name')]),
        (zstat_filename, glm, [('filename', 'out_z_name')]),
        (pstat_filename, glm, [('filename', 'out_p_name')]),
        (pfstat_filename, glm, [('filename', 'out_pf_name')]),
        (glm, datasink, [('out_pf', '@pfstat')]),
        (glm, datasink, [('out_p', '@pstat')]),
        (glm, datasink, [('out_z', '@zstat')]),
        (glm, datasink, [('out_t', '@tstat')]),
        (glm, datasink, [('out_cope', '@cope')]),
        (glm, datasink, [('out_varcb', '@varcb')]),
    ]

    if highpass_sigma or lowpass_sigma:
        bandpass = pe.Node(interface=fsl.maths.TemporalFilter(),
                           name="bandpass")
        bandpass.inputs.highpass_sigma = highpass_sigma
        if lowpass_sigma:
            bandpass.inputs.lowpass_sigma = lowpass_sigma
        else:
            bandpass.inputs.lowpass_sigma = tr
        workflow_connections.extend([
            (datafile_source, bandpass, [('out_file', 'in_file')]),
            (bandpass, specify_model, [('out_file', 'functional_runs')]),
            (bandpass, glm, [('out_file', 'in_file')]),
        ])
    else:
        workflow_connections.extend([
            (datafile_source, specify_model, [('out_file', 'functional_runs')
                                              ]),
            (datafile_source, glm, [('out_file', 'in_file')]),
        ])

    workdir_name = workflow_name + "_work"
    workflow = pe.Workflow(name=workdir_name)
    workflow.connect(workflow_connections)
    workflow.base_dir = out_dir
    workflow.config = {
        "execution": {
            "crashdump_dir": path.join(out_dir, "crashdump")
        }
    }
    workflow.write_graph(dotfilename=path.join(workflow.base_dir, workdir_name,
                                               "graph.dot"),
                         graph2use="hierarchical",
                         format="png")

    workflow.run(plugin="MultiProc", plugin_args={'n_procs': nprocs})
    if not keep_work:
        shutil.rmtree(path.join(out_dir, workdir_name))
예제 #11
0
def create_firstlevel_workflow_FEAT(name='level1feat'):

    input_node = pe.Node(IdentityInterface(fields=[
        'events_file', 'single_trial', 'sort_by_onset', 'exclude', 'func_file',
        'TR', 'confound_file', 'which_confounds', 'extend_motion_pars',
        'model_serial_correlations', 'hrf_base', 'hp_filter', 'contrasts'
    ]),
                         name='inputspec')

    output_node = pe.Node(
        IdentityInterface(fields=['fsf_file', 'ev_file', 'feat_dir']),
        name='outputspec')

    level1_design = pe.MapNode(interface=Level1Design(
        bases={'dgamma': {
            'derivs': True
        }},
        interscan_interval=2.0,
        model_serial_correlations=True),
                               iterfield=['contrasts', 'session_info'],
                               name='level1_design')

    feat = pe.MapNode(interface=FEAT(), iterfield=['fsf_file'], name='FEAT')
    extract_task = pe.MapNode(interface=Extract_task,
                              iterfield=['in_file'],
                              name='extract_task')
    rename_feat_dir = pe.MapNode(interface=Rename_feat_dir,
                                 iterfield=['feat_dir', 'task'],
                                 name='rename_feat_dir')

    firstlevel_wf = pe.Workflow(name=name)

    modelgen_wf = create_modelgen_workflow()

    firstlevel_wf.connect(input_node, 'events_file', modelgen_wf,
                          'inputspec.events_file')
    firstlevel_wf.connect(input_node, 'func_file', modelgen_wf,
                          'inputspec.func_file')
    firstlevel_wf.connect(input_node, 'TR', modelgen_wf, 'inputspec.TR')
    firstlevel_wf.connect(input_node, 'single_trial', modelgen_wf,
                          'inputspec.single_trial')
    firstlevel_wf.connect(input_node, 'sort_by_onset', modelgen_wf,
                          'inputspec.sort_by_onset')
    firstlevel_wf.connect(input_node, 'extend_motion_pars', modelgen_wf,
                          'inputspec.extend_motion_pars')
    firstlevel_wf.connect(input_node, 'exclude', modelgen_wf,
                          'inputspec.exclude')
    firstlevel_wf.connect(input_node, 'confound_file', modelgen_wf,
                          'inputspec.confound_file')
    firstlevel_wf.connect(input_node, 'which_confounds', modelgen_wf,
                          'inputspec.which_confounds')
    firstlevel_wf.connect(input_node, 'hp_filter', modelgen_wf,
                          'inputspec.hp_filter')

    firstlevel_wf.connect(input_node, 'TR', level1_design,
                          'interscan_interval')
    firstlevel_wf.connect(input_node, 'model_serial_correlations',
                          level1_design, 'model_serial_correlations')
    firstlevel_wf.connect(input_node, 'hrf_base', level1_design, 'bases')
    firstlevel_wf.connect(input_node, 'contrasts', level1_design, 'contrasts')

    firstlevel_wf.connect(modelgen_wf, 'outputspec.session_info',
                          level1_design, 'session_info')
    firstlevel_wf.connect(level1_design, 'fsf_files', feat, 'fsf_file')
    firstlevel_wf.connect(level1_design, 'fsf_files', output_node, 'fsf_file')
    firstlevel_wf.connect(level1_design, 'ev_files', output_node, 'ev_file')

    firstlevel_wf.connect(input_node, 'func_file', extract_task, 'in_file')
    firstlevel_wf.connect(extract_task, 'task_name', rename_feat_dir, 'task')
    firstlevel_wf.connect(feat, 'feat_dir', rename_feat_dir, 'feat_dir')
    firstlevel_wf.connect(rename_feat_dir, 'feat_dir', output_node, 'feat_dir')

    return firstlevel_wf
예제 #12
0
파일: glm.py 프로젝트: setina42/SAMRI
def l1(preprocessing_dir,
	bf_path = '~/ni_data/irfs/chr_beta1.txt',
	debug=False,
	exclude={},
	habituation='confound',
	highpass_sigma=225,
	lowpass_sigma=False,
	include={},
	keep_work=False,
	out_base="",
	mask="",
	match={},
	tr=1,
	workflow_name="generic",
	modality="cbv",
	n_jobs_percentage=1,
	invert=False,
	):
	"""Calculate subject level GLM statistic scores.

	Parameters
	----------

	bf_path : str, optional
		Basis set path. It should point to a text file in the so-called FEAT/FSL "#2" format (1 entry per volume).
	exclude : dict
		A dictionary with any combination of "sessions", "subjects", "tasks" as keys and corresponding identifiers as values.
		If this is specified matching entries will be excluded in the analysis.
	debug : bool, optional
		Whether to enable nipype debug mode.
		This increases logging.
	habituation : {"", "confound", "separate_contrast", "in_main_contrast"}, optional
		How the habituation regressor should be handled.
		Anything which evaluates as False (though we recommend "") means no habituation regressor will be introduced.
	highpass_sigma : int, optional
		Highpass threshold (in seconds).
	include : dict
		A dictionary with any combination of "sessions", "subjects", "tasks" as keys and corresponding identifiers as values.
		If this is specified only matching entries will be included in the analysis.
	invert : bool
		If true the values will be inverted with respect to zero.
		This is commonly used for iron nano-particle Cerebral Blood Volume (CBV) measurements.
	keep_work : bool, optional
		Whether to keep the work directory (containing all the intermediary workflow steps, as managed by nipypye).
		This is useful for debugging and quality control.
	out_base : str, optional
		Path to the directory inside which both the working directory and the output directory will be created.
	mask : str, optional
		Path to the brain mask which shall be used to define the brain volume in the analysis.
		This has to point to an existing NIfTI file containing zero and one values only.
	n_jobs_percentage : float, optional
		Percentage of the cores present on the machine which to maximally use for deploying jobs in parallel.
	tr : int, optional
		Repetition time, in seconds.
	workflow_name : str, optional
		Name of the workflow; this will also be the name of the final output directory produced under `out_dir`.
	"""

	from samri.pipelines.utils import bids_data_selection

	preprocessing_dir = path.abspath(path.expanduser(preprocessing_dir))
	out_base = path.abspath(path.expanduser(out_base))

	data_selection = bids_data_selection(preprocessing_dir, structural_match=False, functional_match=match, subjects=False, sessions=False)
	ind = data_selection.index.tolist()

	out_dir = path.join(out_base,workflow_name)
	workdir_name = workflow_name+'_work'
	workdir = path.join(out_base,workdir_name)
	if not os.path.exists(workdir):
		os.makedirs(workdir)
	data_selection.to_csv(path.join(workdir,'data_selection.csv'))

	get_scan = pe.Node(name='get_scan', interface=util.Function(function=get_bids_scan,input_names=inspect.getargspec(get_bids_scan)[0], output_names=['scan_path','scan_type','task', 'nii_path', 'nii_name', 'events_name', 'subject_session', 'metadata_filename', 'dict_slice']))
	get_scan.inputs.ignore_exception = True
	get_scan.inputs.data_selection = data_selection
	get_scan.inputs.bids_base = preprocessing_dir
	get_scan.iterables = ("ind_type", ind)

	eventfile = pe.Node(name='eventfile', interface=util.Function(function=corresponding_eventfile,input_names=inspect.getargspec(corresponding_eventfile)[0], output_names=['eventfile']))

	if invert:
		invert = pe.Node(interface=fsl.ImageMaths(), name="invert")
		invert.inputs.op_string = '-mul -1'

	specify_model = pe.Node(interface=SpecifyModel(), name="specify_model")
	specify_model.inputs.input_units = 'secs'
	specify_model.inputs.time_repetition = tr
	specify_model.inputs.high_pass_filter_cutoff = highpass_sigma

	level1design = pe.Node(interface=Level1Design(), name="level1design")
	level1design.inputs.interscan_interval = tr
	if bf_path:
		bf_path = path.abspath(path.expanduser(bf_path))
		level1design.inputs.bases = {"custom": {"bfcustompath":bf_path}}
	else:
		# We are not adding derivatives here, as these conflict with the habituation option.
		# !!! This is not difficult to solve, and would only require the addition of an elif condition to the habituator definition, which would add multiple column copies for each of the derivs.
		level1design.inputs.bases = {'gamma': {'derivs':True, 'gammasigma':30, 'gammadelay':10}}
	level1design.inputs.model_serial_correlations = True

	modelgen = pe.Node(interface=fsl.FEATModel(), name='modelgen')
	#modelgen.inputs.ignore_exception = True

	glm = pe.Node(interface=fsl.GLM(), name='glm', iterfield='design')
#	glm.inputs.out_cope = "cope.nii.gz"
#	glm.inputs.out_varcb_name = "varcb.nii.gz"
#	#not setting a betas output file might lead to beta export in lieu of COPEs
#	glm.inputs.out_file = "betas.nii.gz"
#	glm.inputs.out_t_name = "t_stat.nii.gz"
#	glm.inputs.out_p_name = "p_stat.nii.gz"
	if mask == 'mouse':
		mask = '/usr/share/mouse-brain-atlases/dsurqec_200micron_mask.nii'
	else:
		glm.inputs.mask = path.abspath(path.expanduser(mask))
	glm.interface.mem_gb = 6
	#glm.inputs.ignore_exception = True

	out_file_name_base = 'sub-{{subject}}_ses-{{session}}_task-{{task}}_acq-{{acquisition}}_run-{{run}}_{{modality}}_{}.{}'

	betas_filename = pe.Node(name='betas_filename', interface=util.Function(function=bids_dict_to_source,input_names=inspect.getargspec(bids_dict_to_source)[0], output_names=['filename']))
	betas_filename.inputs.source_format = out_file_name_base.format('betas','nii.gz')
	cope_filename = pe.Node(name='cope_filename', interface=util.Function(function=bids_dict_to_source,input_names=inspect.getargspec(bids_dict_to_source)[0], output_names=['filename']))
	cope_filename.inputs.source_format = out_file_name_base.format('cope','nii.gz')
	varcb_filename = pe.Node(name='varcb_filename', interface=util.Function(function=bids_dict_to_source,input_names=inspect.getargspec(bids_dict_to_source)[0], output_names=['filename']))
	varcb_filename.inputs.source_format = out_file_name_base.format('varcb','nii.gz')
	tstat_filename = pe.Node(name='tstat_filename', interface=util.Function(function=bids_dict_to_source,input_names=inspect.getargspec(bids_dict_to_source)[0], output_names=['filename']))
	tstat_filename.inputs.source_format = out_file_name_base.format('tstat','nii.gz')
	zstat_filename = pe.Node(name='zstat_filename', interface=util.Function(function=bids_dict_to_source,input_names=inspect.getargspec(bids_dict_to_source)[0], output_names=['filename']))
	zstat_filename.inputs.source_format = out_file_name_base.format('zstat','nii.gz')
	pstat_filename = pe.Node(name='pstat_filename', interface=util.Function(function=bids_dict_to_source,input_names=inspect.getargspec(bids_dict_to_source)[0], output_names=['filename']))
	pstat_filename.inputs.source_format = out_file_name_base.format('pstat','nii.gz')
	pfstat_filename = pe.Node(name='pfstat_filename', interface=util.Function(function=bids_dict_to_source,input_names=inspect.getargspec(bids_dict_to_source)[0], output_names=['filename']))
	pfstat_filename.inputs.source_format = out_file_name_base.format('pfstat','nii.gz')
	design_filename = pe.Node(name='design', interface=util.Function(function=bids_dict_to_source,input_names=inspect.getargspec(bids_dict_to_source)[0], output_names=['filename']))
	design_filename.inputs.source_format = out_file_name_base.format('design','mat')

	design_rename = pe.Node(interface=util.Rename(), name='design_rename')

	datasink = pe.Node(nio.DataSink(), name='datasink')
	datasink.inputs.base_directory = path.join(out_base,workflow_name)
	datasink.inputs.parameterization = False

	workflow_connections = [
		(get_scan, eventfile, [('nii_path', 'timecourse_file')]),
		(specify_model, level1design, [('session_info', 'session_info')]),
		(level1design, modelgen, [('ev_files', 'ev_files')]),
		(level1design, modelgen, [('fsf_files', 'fsf_file')]),
		(modelgen, glm, [('design_file', 'design')]),
		(modelgen, glm, [('con_file', 'contrasts')]),
		(get_scan, datasink, [(('dict_slice',bids_dict_to_dir), 'container')]),
		(get_scan, betas_filename, [('dict_slice', 'bids_dictionary')]),
		(get_scan, cope_filename, [('dict_slice', 'bids_dictionary')]),
		(get_scan, varcb_filename, [('dict_slice', 'bids_dictionary')]),
		(get_scan, tstat_filename, [('dict_slice', 'bids_dictionary')]),
		(get_scan, zstat_filename, [('dict_slice', 'bids_dictionary')]),
		(get_scan, pstat_filename, [('dict_slice', 'bids_dictionary')]),
		(get_scan, pfstat_filename, [('dict_slice', 'bids_dictionary')]),
		(get_scan, design_filename, [('dict_slice', 'bids_dictionary')]),
		(betas_filename, glm, [('filename', 'out_file')]),
		(cope_filename, glm, [('filename', 'out_cope')]),
		(varcb_filename, glm, [('filename', 'out_varcb_name')]),
		(tstat_filename, glm, [('filename', 'out_t_name')]),
		(zstat_filename, glm, [('filename', 'out_z_name')]),
		(pstat_filename, glm, [('filename', 'out_p_name')]),
		(pfstat_filename, glm, [('filename', 'out_pf_name')]),
		(modelgen, design_rename, [('design_file', 'in_file')]),
		(design_filename, design_rename, [('filename', 'format_string')]),
		(glm, datasink, [('out_pf', '@pfstat')]),
		(glm, datasink, [('out_p', '@pstat')]),
		(glm, datasink, [('out_z', '@zstat')]),
		(glm, datasink, [('out_t', '@tstat')]),
		(glm, datasink, [('out_cope', '@cope')]),
		(glm, datasink, [('out_varcb', '@varcb')]),
		(glm, datasink, [('out_file', '@betas')]),
		(design_rename, datasink, [('out_file', '@design')]),
		]

	if habituation:
		level1design.inputs.orthogonalization = {1: {0:0,1:0,2:0}, 2: {0:1,1:1,2:0}}
		specify_model.inputs.bids_condition_column = 'samri_l1_regressors'
		specify_model.inputs.bids_amplitude_column = 'samri_l1_amplitude'
		add_habituation = pe.Node(name='add_habituation', interface=util.Function(function=eventfile_add_habituation,input_names=inspect.getargspec(eventfile_add_habituation)[0], output_names=['out_file']))
		# Regressor names need to be prefixed with "e" plus a numerator so that Level1Design will be certain to conserve the order.
		add_habituation.inputs.original_stimulation_value='1stim'
		add_habituation.inputs.habituation_value='2habituation'
		workflow_connections.extend([
			(eventfile, add_habituation, [('eventfile', 'in_file')]),
			(add_habituation, specify_model, [('out_file', 'bids_event_file')]),
			])
	if not habituation:
		specify_model.inputs.bids_condition_column = ''
		level1design.inputs.contrasts = [('allStim','T', ['ev0'],[1])]
		workflow_connections.extend([
			(eventfile, specify_model, [('eventfile', 'bids_event_file')]),
			])
	#condition names as defined in eventfile_add_habituation:
	elif habituation=="separate_contrast":
		level1design.inputs.contrasts = [('stim','T', ['1stim','2habituation'],[1,0]),('hab','T', ['1stim','2habituation'],[0,1])]
	elif habituation=="in_main_contrast":
		level1design.inputs.contrasts = [('all','T', ['1stim','2habituation'],[1,1])]
	elif habituation=="confound":
		level1design.inputs.contrasts = [('stim','T', ["1stim", "2habituation"],[1,0])]
	else:
		print(habituation)
		raise ValueError('The value you have provided for the `habituation` parameter, namely "{}", is invalid. Please choose one of: {{None, False,"","confound","in_main_contrast","separate_contrast"}}'.format(habituation))

	if highpass_sigma or lowpass_sigma:
		bandpass = pe.Node(interface=fsl.maths.TemporalFilter(), name="bandpass")
		bandpass.inputs.highpass_sigma = highpass_sigma
		bandpass.interface.mem_gb = 16
		if lowpass_sigma:
			bandpass.inputs.lowpass_sigma = lowpass_sigma
		else:
			bandpass.inputs.lowpass_sigma = tr
		if invert:
			workflow_connections.extend([
				(get_scan, invert, [('nii_path', 'in_file')]),
				(invert, bandpass, [('out_file', 'in_file')]),
				(bandpass, specify_model, [('out_file', 'functional_runs')]),
				(bandpass, glm, [('out_file', 'in_file')]),
				(bandpass, datasink, [('out_file', '@ts_file')]),
				(get_scan, bandpass, [('nii_name', 'out_file')]),
				])
		else:
			workflow_connections.extend([
				(get_scan, bandpass, [('nii_path', 'in_file')]),
				(bandpass, specify_model, [('out_file', 'functional_runs')]),
				(bandpass, glm, [('out_file', 'in_file')]),
				(bandpass, datasink, [('out_file', '@ts_file')]),
				(get_scan, bandpass, [('nii_name', 'out_file')]),
				])
	else:
		if invert:
			workflow_connections.extend([
				(get_scan, invert, [('nii_path', 'in_file')]),
				(invert, specify_model, [('out_file', 'functional_runs')]),
				(invert, glm, [('out_file', 'in_file')]),
				(invert, datasink, [('out_file', '@ts_file')]),
				(get_scan, invert, [('nii_name', 'out_file')]),
				])
		else:
			workflow_connections.extend([
				(get_scan, specify_model, [('nii_path', 'functional_runs')]),
				(get_scan, glm, [('nii_path', 'in_file')]),
				(get_scan, datasink, [('nii_path', '@ts_file')]),
				])


	workflow_config = {'execution': {'crashdump_dir': path.join(out_base,'crashdump'),}}
	if debug:
		workflow_config['logging'] = {
			'workflow_level':'DEBUG',
			'utils_level':'DEBUG',
			'interface_level':'DEBUG',
			'filemanip_level':'DEBUG',
			'log_to_file':'true',
			}

	workflow = pe.Workflow(name=workdir_name)
	workflow.connect(workflow_connections)
	workflow.base_dir = out_base
	workflow.config = workflow_config
	workflow.write_graph(dotfilename=path.join(workflow.base_dir,workdir_name,"graph.dot"), graph2use="hierarchical", format="png")

	n_jobs = max(int(round(mp.cpu_count()*n_jobs_percentage)),2)
	workflow.run(plugin="MultiProc", plugin_args={'n_procs' : n_jobs})
	if not keep_work:
		shutil.rmtree(path.join(out_base,workdir_name))
예제 #13
0
def firstlevel_wf(subject_id, sink_directory, name='ds008_R2_frstlvl_wf'):

    frstlvl_wf = Workflow(name='frstlvl_wf')

    info = dict(task_mri_files=[['subject_id', 'stopsignal']],
                motion_noise_files=[['subject_id', 'filter_regressor']])

    # Create a Function node to define stimulus onsets, etc... for each subject
    subject_info = Node(Function(input_names=['subject_id'],
                                 output_names=['output'],
                                 function=subjectinfo),
                        name='subject_info')
    subject_info.inputs.ignore_exception = False
    subject_info.inputs.subject_id = subject_id

    # Create another Function node to define the contrasts for the experiment
    getcontrasts = Node(Function(input_names=['subject_id'],
                                 output_names=['contrasts'],
                                 function=get_contrasts),
                        name='getcontrasts')
    getcontrasts.inputs.ignore_exception = False
    getcontrasts.inputs.subject_id = subject_id

    # Create a Function node to substitute names of files created during pipeline
    getsubs = Node(Function(input_names=['subject_id', 'cons', 'info'],
                            output_names=['subs'],
                            function=get_subs),
                   name='getsubs')
    getsubs.inputs.ignore_exception = False
    getsubs.inputs.subject_id = subject_id
    frstlvl_wf.connect(subject_info, 'output', getsubs, 'info')
    frstlvl_wf.connect(getcontrasts, 'contrasts', getsubs, 'cons')

    # Create a datasource node to get the task_mri and motion-noise files
    datasource = Node(DataGrabber(infields=['subject_id'],
                                  outfields=info.keys()),
                      name='datasource')
    datasource.inputs.template = '*'
    datasource.inputs.subject_id = subject_id
    #datasource.inputs.base_directory = os.path.abspath('/scratch/PSB6351_2017/ds008_R2.0.0/preproc/')
    #datasource.inputs.field_template = dict(task_mri_files='%s/func/realigned/*%s*.nii.gz',
    #                                        motion_noise_files='%s/noise/%s*.txt')
    datasource.inputs.base_directory = os.path.abspath(
        '/scratch/PSB6351_2017/students/salo/data/preproc/')
    datasource.inputs.field_template = dict(
        task_mri_files=
        '%s/preproc/func/smoothed/corr_*_task-%s_*_bold_bet_smooth_mask.nii.gz',
        motion_noise_files='%s/preproc/noise/%s*.txt')
    datasource.inputs.template_args = info
    datasource.inputs.sort_filelist = True
    datasource.inputs.ignore_exception = False
    datasource.inputs.raise_on_empty = True

    # Create a Function node to modify the motion and noise files to be single regressors
    motionnoise = Node(Function(input_names=['subjinfo', 'files'],
                                output_names=['subjinfo'],
                                function=motion_noise),
                       name='motionnoise')
    motionnoise.inputs.ignore_exception = False
    frstlvl_wf.connect(subject_info, 'output', motionnoise, 'subjinfo')
    frstlvl_wf.connect(datasource, 'motion_noise_files', motionnoise, 'files')

    # Create a specify model node
    specify_model = Node(SpecifyModel(), name='specify_model')
    specify_model.inputs.high_pass_filter_cutoff = 128.
    specify_model.inputs.ignore_exception = False
    specify_model.inputs.input_units = 'secs'
    specify_model.inputs.time_repetition = 2.
    frstlvl_wf.connect(datasource, 'task_mri_files', specify_model,
                       'functional_runs')
    frstlvl_wf.connect(motionnoise, 'subjinfo', specify_model, 'subject_info')

    # Create an InputSpec node for the modelfit node
    modelfit_inputspec = Node(IdentityInterface(fields=[
        'session_info', 'interscan_interval', 'contrasts', 'film_threshold',
        'functional_data', 'bases', 'model_serial_correlations'
    ],
                                                mandatory_inputs=True),
                              name='modelfit_inputspec')
    modelfit_inputspec.inputs.bases = {'dgamma': {'derivs': False}}
    modelfit_inputspec.inputs.film_threshold = 0.0
    modelfit_inputspec.inputs.interscan_interval = 2.0
    modelfit_inputspec.inputs.model_serial_correlations = True
    frstlvl_wf.connect(datasource, 'task_mri_files', modelfit_inputspec,
                       'functional_data')
    frstlvl_wf.connect(getcontrasts, 'contrasts', modelfit_inputspec,
                       'contrasts')
    frstlvl_wf.connect(specify_model, 'session_info', modelfit_inputspec,
                       'session_info')

    # Create a level1 design node
    level1_design = Node(Level1Design(), name='level1_design')
    level1_design.inputs.ignore_exception = False
    frstlvl_wf.connect(modelfit_inputspec, 'interscan_interval', level1_design,
                       'interscan_interval')
    frstlvl_wf.connect(modelfit_inputspec, 'session_info', level1_design,
                       'session_info')
    frstlvl_wf.connect(modelfit_inputspec, 'contrasts', level1_design,
                       'contrasts')
    frstlvl_wf.connect(modelfit_inputspec, 'bases', level1_design, 'bases')
    frstlvl_wf.connect(modelfit_inputspec, 'model_serial_correlations',
                       level1_design, 'model_serial_correlations')

    # Create a MapNode to generate a model for each run
    generate_model = MapNode(FEATModel(),
                             iterfield=['fsf_file', 'ev_files'],
                             name='generate_model')
    generate_model.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'}
    generate_model.inputs.ignore_exception = False
    generate_model.inputs.output_type = 'NIFTI_GZ'
    generate_model.inputs.terminal_output = 'stream'
    frstlvl_wf.connect(level1_design, 'fsf_files', generate_model, 'fsf_file')
    frstlvl_wf.connect(level1_design, 'ev_files', generate_model, 'ev_files')

    # Create a MapNode to estimate the model using FILMGLS
    estimate_model = MapNode(FILMGLS(),
                             iterfield=['design_file', 'in_file', 'tcon_file'],
                             name='estimate_model')
    frstlvl_wf.connect(generate_model, 'design_file', estimate_model,
                       'design_file')
    frstlvl_wf.connect(generate_model, 'con_file', estimate_model, 'tcon_file')
    frstlvl_wf.connect(modelfit_inputspec, 'functional_data', estimate_model,
                       'in_file')

    # Create a merge node to merge the contrasts - necessary for fsl 5.0.7 and greater
    merge_contrasts = MapNode(Merge(2),
                              iterfield=['in1'],
                              name='merge_contrasts')
    frstlvl_wf.connect(estimate_model, 'zstats', merge_contrasts, 'in1')

    # Create a MapNode to transform the z2pval
    z2pval = MapNode(ImageMaths(), iterfield=['in_file'], name='z2pval')
    z2pval.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'}
    z2pval.inputs.ignore_exception = False
    z2pval.inputs.op_string = '-ztop'
    z2pval.inputs.output_type = 'NIFTI_GZ'
    z2pval.inputs.suffix = '_pval'
    z2pval.inputs.terminal_output = 'stream'
    frstlvl_wf.connect(merge_contrasts, ('out', pop_lambda), z2pval, 'in_file')

    # Create an outputspec node
    modelfit_outputspec = Node(IdentityInterface(fields=[
        'copes', 'varcopes', 'dof_file', 'pfiles', 'parameter_estimates',
        'zstats', 'design_image', 'design_file', 'design_cov', 'sigmasquareds'
    ],
                                                 mandatory_inputs=True),
                               name='modelfit_outputspec')
    frstlvl_wf.connect(estimate_model, 'copes', modelfit_outputspec, 'copes')
    frstlvl_wf.connect(estimate_model, 'varcopes', modelfit_outputspec,
                       'varcopes')
    frstlvl_wf.connect(merge_contrasts, 'out', modelfit_outputspec, 'zstats')
    frstlvl_wf.connect(z2pval, 'out_file', modelfit_outputspec, 'pfiles')
    frstlvl_wf.connect(generate_model, 'design_image', modelfit_outputspec,
                       'design_image')
    frstlvl_wf.connect(generate_model, 'design_file', modelfit_outputspec,
                       'design_file')
    frstlvl_wf.connect(generate_model, 'design_cov', modelfit_outputspec,
                       'design_cov')
    frstlvl_wf.connect(estimate_model, 'param_estimates', modelfit_outputspec,
                       'parameter_estimates')
    frstlvl_wf.connect(estimate_model, 'dof_file', modelfit_outputspec,
                       'dof_file')
    frstlvl_wf.connect(estimate_model, 'sigmasquareds', modelfit_outputspec,
                       'sigmasquareds')

    # Create a datasink node
    sinkd = Node(DataSink(), name='sinkd')
    sinkd.inputs.base_directory = sink_directory
    sinkd.inputs.container = subject_id
    frstlvl_wf.connect(getsubs, 'subs', sinkd, 'substitutions')
    frstlvl_wf.connect(modelfit_outputspec, 'parameter_estimates', sinkd,
                       'modelfit.estimates')
    frstlvl_wf.connect(modelfit_outputspec, 'sigmasquareds', sinkd,
                       'modelfit.estimates.@sigsq')
    frstlvl_wf.connect(modelfit_outputspec, 'dof_file', sinkd, 'modelfit.dofs')
    frstlvl_wf.connect(modelfit_outputspec, 'copes', sinkd,
                       'modelfit.contrasts.@copes')
    frstlvl_wf.connect(modelfit_outputspec, 'varcopes', sinkd,
                       'modelfit.contrasts.@varcopes')
    frstlvl_wf.connect(modelfit_outputspec, 'zstats', sinkd,
                       'modelfit.contrasts.@zstats')
    frstlvl_wf.connect(modelfit_outputspec, 'design_image', sinkd,
                       'modelfit.design')
    frstlvl_wf.connect(modelfit_outputspec, 'design_cov', sinkd,
                       'modelfit.design.@cov')
    frstlvl_wf.connect(modelfit_outputspec, 'design_file', sinkd,
                       'modelfit.design.@matrix')
    frstlvl_wf.connect(modelfit_outputspec, 'pfiles', sinkd,
                       'modelfit.contrasts.@pstats')

    return frstlvl_wf