Пример #1
0
def model_fitting(source_img, prepped_img, subject_info, task):
    taskdir = os.path.join(outputdir, task)
    if not os.path.exists(taskdir):
        os.mkdir(taskdir)

    # skull strip the preprocessed BOLD
    bet = fsl.BET()
    bet.inputs.in_file = prepped_img
    bet.inputs.frac = 0.7
    bet.inputs.functional = True
    bet.inputs.out_file = os.path.join(taskdir, task + "_input_functional_bet.nii.gz")
    bet_res = bet.run()
    bettedinput = bet_res.outputs.out_file

    task_vs_baseline = [task + " vs baseline", 'T', [task], [1]]  # set up contrasts
    contrasts = [task_vs_baseline]

    modelfit = pe.Workflow(name='modelfit', base_dir=taskdir)  # generate the model fitting workflow
    modelspec = pe.Node(interface=model.SpecifyModel(), name="modelspec")  # generate design info
    level1design = pe.Node(interface=fsl.Level1Design(), name="level1design")  # generate fsf file
    modelgen = pe.MapNode(  # generate .mat file
        interface=fsl.FEATModel(),
        name='modelgen',
        iterfield=['fsf_file', 'ev_files'])
    feat = pe.Node(  # feat statistics
        interface=fsl.FEAT(),
        name='feat',
        iterfield=['fsf_file'])

    # put it all together
    modelfit.connect([
        (modelspec, level1design, [('session_info', 'session_info')]),
        (level1design, modelgen, [('fsf_files', 'fsf_file'), ('ev_files', 'ev_files')]),
        (level1design, feat, [('fsf_files', 'fsf_file')])])

    # define inputs to workflow
    modelspec.inputs.input_units = 'secs'
    modelspec.inputs.functional_runs = bettedinput
    modelspec.inputs.time_repetition = source_img.entities['RepetitionTime']
    modelspec.inputs.high_pass_filter_cutoff = 90
    modelspec.inputs.subject_info = subject_info

    level1design.inputs.interscan_interval = source_img.entities['RepetitionTime']
    level1design.inputs.bases = {'gamma': {'gammasigma': 3, 'gammadelay': 6, 'derivs': True}}
    level1design.inputs.contrasts = contrasts
    level1design.inputs.model_serial_correlations = True

    # Run the model-fitting pipeline. Main outputs are a feat directory (w/ functional img) and a design.mat file
    res = modelfit.run()

    # outputs
    feat_dir = list(res.nodes)[3].result.outputs.feat_dir
    thresh_img = feat_dir + "/thresh_zstat1.nii.gz"

    return thresh_img
Пример #2
0
Файл: glm.py Проект: Doeme/SAMRI
def l1(
    preprocessing_dir,
    highpass_sigma=225,
    include={},
    exclude={},
    keep_work=False,
    l1_dir="",
    nprocs=10,
    mask="/home/chymera/ni_data/templates/ds_QBI_chr_bin.nii.gz",
    per_stimulus_contrast=False,
    habituation="",
    tr=1,
    workflow_name="generic",
):
    """Calculate subject level GLM statistics.

	Parameters
	----------

	include : dict
	A dictionary with any combination of "sessions", "subjects", "trials" as keys and corresponding identifiers as values.
	If this is specified ony matching entries will be included in the analysis.

	exclude : dict
	A dictionary with any combination of "sessions", "subjects", "trials" as keys and corresponding identifiers as values.
	If this is specified ony non-matching entries will be included in the analysis.

	habituation : string
	One value of "confound", "in_main_contrast", "separate_contrast", "" indicating how the habituation regressor should be handled.
	"" or any other value which evaluates to False will mean no habituation regressor is used int he model
	"""

    preprocessing_dir = path.expanduser(preprocessing_dir)
    if not l1_dir:
        l1_dir = path.abspath(path.join(preprocessing_dir, "..", "..", "l1"))

    datafind = nio.DataFinder()
    datafind.inputs.root_paths = preprocessing_dir
    datafind.inputs.match_regex = '.+/sub-(?P<sub>.+)/ses-(?P<ses>.+)/func/.*?_trial-(?P<scan>.+)\.nii.gz'
    datafind_res = datafind.run()
    iterfields = zip(*[
        datafind_res.outputs.sub, datafind_res.outputs.ses,
        datafind_res.outputs.scan
    ])

    if include:
        iterfields = iterfield_selector(iterfields, include, "include")
    if exclude:
        iterfields = iterfield_selector(iterfields, exclude, "exclude")

    infosource = pe.Node(
        interface=util.IdentityInterface(fields=['subject_session_scan']),
        name="infosource")
    infosource.iterables = [('subject_session_scan', iterfields)]

    datafile_source = pe.Node(
        name='datafile_source',
        interface=util.Function(
            function=sss_to_source,
            input_names=inspect.getargspec(sss_to_source)[0],
            output_names=['out_file']))
    datafile_source.inputs.base_directory = preprocessing_dir
    datafile_source.inputs.source_format = "sub-{0}/ses-{1}/func/sub-{0}_ses-{1}_trial-{2}.nii.gz"

    eventfile_source = pe.Node(
        name='eventfile_source',
        interface=util.Function(
            function=sss_to_source,
            input_names=inspect.getargspec(sss_to_source)[0],
            output_names=['out_file']))
    eventfile_source.inputs.base_directory = preprocessing_dir
    eventfile_source.inputs.source_format = "sub-{0}/ses-{1}/func/sub-{0}_ses-{1}_trial-{2}_events.tsv"

    specify_model = pe.Node(interface=SpecifyModel(), name="specify_model")
    specify_model.inputs.input_units = 'secs'
    specify_model.inputs.time_repetition = tr
    specify_model.inputs.high_pass_filter_cutoff = highpass_sigma
    specify_model.inputs.one_condition_file = not per_stimulus_contrast
    specify_model.inputs.habituation_regressor = bool(habituation)

    level1design = pe.Node(interface=Level1Design(), name="level1design")
    level1design.inputs.interscan_interval = tr
    level1design.inputs.bases = {
        "custom": {
            "bfcustompath": "/mnt/data/ni_data/irfs/chr_beta1.txt"
        }
    }
    # level1design.inputs.bases = {'gamma': {'derivs':False, 'gammasigma':10, 'gammadelay':5}}
    level1design.inputs.orthogonalization = {
        1: {
            0: 0,
            1: 0,
            2: 0
        },
        2: {
            0: 1,
            1: 1,
            2: 0
        }
    }
    level1design.inputs.model_serial_correlations = True
    if per_stimulus_contrast:
        level1design.inputs.contrasts = [
            ('allStim', 'T', ["e0", "e1", "e2", "e3", "e4",
                              "e5"], [1, 1, 1, 1, 1, 1])
        ]  #condition names as defined in specify_model
    elif habituation == "separate_contrast":
        level1design.inputs.contrasts = [
            ('allStim', 'T', ["e0"], [1]), ('allStim', 'T', ["e1"], [1])
        ]  #condition names as defined in specify_model
    elif habituation == "in_main_contrast":
        level1design.inputs.contrasts = [
            ('allStim', 'T', ["e0", "e1"], [1, 1])
        ]  #condition names as defined in specify_model
    elif habituation == "confound":
        level1design.inputs.contrasts = [
            ('allStim', 'T', ["e0"], [1])
        ]  #condition names as defined in specify_model
    else:
        level1design.inputs.contrasts = [
            ('allStim', 'T', ["e0"], [1])
        ]  #condition names as defined in specify_model

    modelgen = pe.Node(interface=fsl.FEATModel(), name='modelgen')

    glm = pe.Node(interface=fsl.GLM(), name='glm', iterfield='design')
    glm.inputs.out_cope = "cope.nii.gz"
    glm.inputs.out_varcb_name = "varcb.nii.gz"
    #not setting a betas output file might lead to beta export in lieu of COPEs
    glm.inputs.out_file = "betas.nii.gz"
    glm.inputs.out_t_name = "t_stat.nii.gz"
    glm.inputs.out_p_name = "p_stat.nii.gz"
    if mask:
        glm.inputs.mask = mask

    cope_filename = pe.Node(
        name='cope_filename',
        interface=util.Function(
            function=sss_to_source,
            input_names=inspect.getargspec(sss_to_source)[0],
            output_names=['filename']))
    cope_filename.inputs.source_format = "sub-{0}_ses-{1}_trial-{2}_cope.nii.gz"
    varcb_filename = pe.Node(
        name='varcb_filename',
        interface=util.Function(
            function=sss_to_source,
            input_names=inspect.getargspec(sss_to_source)[0],
            output_names=['filename']))
    varcb_filename.inputs.source_format = "sub-{0}_ses-{1}_trial-{2}_varcb.nii.gz"
    tstat_filename = pe.Node(
        name='tstat_filename',
        interface=util.Function(
            function=sss_to_source,
            input_names=inspect.getargspec(sss_to_source)[0],
            output_names=['filename']))
    tstat_filename.inputs.source_format = "sub-{0}_ses-{1}_trial-{2}_tstat.nii.gz"
    zstat_filename = pe.Node(
        name='zstat_filename',
        interface=util.Function(
            function=sss_to_source,
            input_names=inspect.getargspec(sss_to_source)[0],
            output_names=['filename']))
    zstat_filename.inputs.source_format = "sub-{0}_ses-{1}_trial-{2}_zstat.nii.gz"
    pstat_filename = pe.Node(
        name='pstat_filename',
        interface=util.Function(
            function=sss_to_source,
            input_names=inspect.getargspec(sss_to_source)[0],
            output_names=['filename']))
    pstat_filename.inputs.source_format = "sub-{0}_ses-{1}_trial-{2}_pstat.nii.gz"
    pfstat_filename = pe.Node(
        name='pfstat_filename',
        interface=util.Function(
            function=sss_to_source,
            input_names=inspect.getargspec(sss_to_source)[0],
            output_names=['filename']))
    pfstat_filename.inputs.source_format = "sub-{0}_ses-{1}_trial-{2}_pfstat.nii.gz"

    datasink = pe.Node(nio.DataSink(), name='datasink')
    datasink.inputs.base_directory = path.join(l1_dir, workflow_name)
    datasink.inputs.parameterization = False

    workflow_connections = [
        (infosource, datafile_source, [('subject_session_scan',
                                        'subject_session_scan')]),
        (infosource, eventfile_source, [('subject_session_scan',
                                         'subject_session_scan')]),
        (eventfile_source, specify_model, [('out_file', 'event_files')]),
        (datafile_source, specify_model, [('out_file', 'functional_runs')]),
        (specify_model, level1design, [('session_info', 'session_info')]),
        (level1design, modelgen, [('ev_files', 'ev_files')]),
        (level1design, modelgen, [('fsf_files', 'fsf_file')]),
        (datafile_source, glm, [('out_file', 'in_file')]),
        (modelgen, glm, [('design_file', 'design')]),
        (modelgen, glm, [('con_file', 'contrasts')]),
        (infosource, datasink, [(('subject_session_scan', ss_to_path),
                                 'container')]),
        (infosource, cope_filename, [('subject_session_scan',
                                      'subject_session_scan')]),
        (infosource, varcb_filename, [('subject_session_scan',
                                       'subject_session_scan')]),
        (infosource, tstat_filename, [('subject_session_scan',
                                       'subject_session_scan')]),
        (infosource, zstat_filename, [('subject_session_scan',
                                       'subject_session_scan')]),
        (infosource, pstat_filename, [('subject_session_scan',
                                       'subject_session_scan')]),
        (infosource, pfstat_filename, [('subject_session_scan',
                                        'subject_session_scan')]),
        (cope_filename, glm, [('filename', 'out_cope')]),
        (varcb_filename, glm, [('filename', 'out_varcb_name')]),
        (tstat_filename, glm, [('filename', 'out_t_name')]),
        (zstat_filename, glm, [('filename', 'out_z_name')]),
        (pstat_filename, glm, [('filename', 'out_p_name')]),
        (pfstat_filename, glm, [('filename', 'out_pf_name')]),
        (glm, datasink, [('out_pf', '@pfstat')]),
        (glm, datasink, [('out_p', '@pstat')]),
        (glm, datasink, [('out_z', '@zstat')]),
        (glm, datasink, [('out_t', '@tstat')]),
        (glm, datasink, [('out_cope', '@cope')]),
        (glm, datasink, [('out_varcb', '@varcb')]),
    ]

    workdir_name = workflow_name + "_work"
    workflow = pe.Workflow(name=workdir_name)
    workflow.connect(workflow_connections)
    workflow.base_dir = l1_dir
    workflow.config = {
        "execution": {
            "crashdump_dir": path.join(l1_dir, "crashdump")
        }
    }
    workflow.write_graph(dotfilename=path.join(workflow.base_dir, workdir_name,
                                               "graph.dot"),
                         graph2use="hierarchical",
                         format="png")

    workflow.run(plugin="MultiProc", plugin_args={'n_procs': nprocs})
    if not keep_work:
        shutil.rmtree(path.join(l1_dir, workdir_name))
Пример #3
0
def init_taskbased_wf(
    workdir=None,
    feature=None,
    condition_files=None,
    condition_units=None,
    memcalc=MemoryCalculator(),
):
    """
    create workflow to calculate a first level glm for task functional data
    """
    if feature is not None:
        name = f"{formatlikebids(feature.name)}_wf"
    else:
        name = "taskbased_wf"
    workflow = pe.Workflow(name=name)

    #
    inputnode = pe.Node(
        niu.IdentityInterface(
            fields=[
                "tags",
                "vals",
                "metadata",
                "bold",
                "mask",
                "repetition_time",
                "confounds_selected",
                "condition_names",
                "condition_files",
                "condition_units",
            ]
        ),
        name="inputnode",
    )
    outputnode = pe.Node(niu.IdentityInterface(fields=["resultdicts"]), name="outputnode")

    if feature is not None:
        inputnode.inputs.condition_names = feature.conditions

    if condition_files is not None:
        inputnode.inputs.condition_files = condition_files

    if condition_units is not None:
        inputnode.inputs.condition_units = condition_units

    #
    make_resultdicts_a = pe.Node(
        MakeResultdicts(tagkeys=["feature"], imagekeys=["design_matrix", "contrast_matrix"]),
        name="make_resultdicts_a",
    )
    if feature is not None:
        make_resultdicts_a.inputs.feature = feature.name
    workflow.connect(inputnode, "tags", make_resultdicts_a, "tags")
    workflow.connect(inputnode, "vals", make_resultdicts_a, "vals")
    workflow.connect(inputnode, "metadata", make_resultdicts_a, "metadata")
    make_resultdicts_b = pe.Node(
        MakeResultdicts(
            tagkeys=["feature", "taskcontrast"],
            imagekeys=["effect", "variance", "z", "dof", "mask"],
            metadatakeys=["sources"],
        ),
        name="make_resultdicts_b",
    )
    if feature is not None:
        make_resultdicts_b.inputs.feature = feature.name
    workflow.connect(inputnode, "tags", make_resultdicts_b, "tags")
    workflow.connect(inputnode, "vals", make_resultdicts_b, "vals")
    workflow.connect(inputnode, "metadata", make_resultdicts_b, "metadata")
    workflow.connect(inputnode, "mask", make_resultdicts_b, "mask")

    workflow.connect(make_resultdicts_b, "resultdicts", outputnode, "resultdicts")

    #
    merge_resultdicts = pe.Node(niu.Merge(2), name="merge_resultdicts")
    workflow.connect(make_resultdicts_a, "resultdicts", merge_resultdicts, "in1")
    workflow.connect(make_resultdicts_b, "resultdicts", merge_resultdicts, "in2")
    resultdict_datasink = pe.Node(
        ResultdictDatasink(base_directory=workdir), name="resultdict_datasink"
    )
    workflow.connect(merge_resultdicts, "out", resultdict_datasink, "indicts")

    # parse condition files into three (ordered) lists
    parseconditionfile = pe.Node(ParseConditionFile(), name="parseconditionfile")
    workflow.connect(inputnode, "condition_names", parseconditionfile, "condition_names")
    workflow.connect(inputnode, "condition_files", parseconditionfile, "in_any")

    fillna = pe.Node(FillNA(), name="fillna")
    workflow.connect(inputnode, "confounds_selected", fillna, "in_tsv")

    # first level model specification
    modelspec = pe.Node(model.SpecifyModel(), name="modelspec")
    if hasattr(feature, "high_pass_filter_cutoff"):
        modelspec.inputs.high_pass_filter_cutoff = feature.high_pass_filter_cutoff
    else:
        modelspec.inputs.high_pass_filter_cutoff = np.inf
    workflow.connect(inputnode, "bold", modelspec, "functional_runs")
    workflow.connect(inputnode, "condition_units", modelspec, "input_units")
    workflow.connect(inputnode, "repetition_time", modelspec, "time_repetition")
    workflow.connect(fillna, "out_no_header", modelspec, "realignment_parameters")
    workflow.connect(parseconditionfile, "subject_info", modelspec, "subject_info")

    # transform contrasts dictionary to nipype list data structure
    contrasts = []
    if feature is not None:
        condition_names = feature.conditions
        for contrast in feature.contrasts:
            contrast_values = [contrast["values"].get(c, 0.0) for c in condition_names]
            contrasts.append(
                [contrast["name"], contrast["type"].upper(), condition_names, contrast_values]
            )
    contrast_names = list(map(firststr, contrasts))
    make_resultdicts_b.inputs.taskcontrast = contrast_names

    # generate design from first level specification
    level1design = pe.Node(
        fsl.Level1Design(
            contrasts=contrasts,
            model_serial_correlations=True,
            bases={"dgamma": {"derivs": False}},
        ),
        name="level1design",
    )
    workflow.connect(inputnode, "repetition_time", level1design, "interscan_interval")
    workflow.connect(modelspec, "session_info", level1design, "session_info")

    # generate required input files for FILMGLS from design
    modelgen = pe.Node(fsl.FEATModel(), name="modelgen")
    workflow.connect([(level1design, modelgen, [(("fsf_files", firststr), "fsf_file")])])
    workflow.connect([(level1design, modelgen, [(("ev_files", ravel), "ev_files")])])

    # calculate range of image values to determine cutoff value
    stats = pe.Node(fsl.ImageStats(op_string="-R"), name="stats")
    workflow.connect(inputnode, "bold", stats, "in_file")
    cutoff = pe.Node(
        niu.Function(input_names=["obj"], output_names=["min_val"], function=firstfloat),
        name="cutoff",
    )
    workflow.connect(stats, "out_stat", cutoff, "obj")

    # actually estimate the first level model
    modelestimate = pe.Node(
        fsl.FILMGLS(smooth_autocorr=True, mask_size=5),
        name="modelestimate"
    )
    workflow.connect(inputnode, "bold", modelestimate, "in_file")
    workflow.connect(cutoff, "min_val", modelestimate, "threshold")
    workflow.connect(modelgen, "design_file", modelestimate, "design_file")
    workflow.connect(modelgen, "con_file", modelestimate, "tcon_file")

    # make dof volume
    makedofvolume = pe.Node(
        MakeDofVolume(), iterfield=["dof_file", "copes"], name="makedofvolume"
    )
    workflow.connect(modelestimate, "copes", makedofvolume, "copes")
    workflow.connect(modelestimate, "dof_file", makedofvolume, "dof_file")

    workflow.connect(modelestimate, "copes", make_resultdicts_b, "effect")
    workflow.connect(modelestimate, "varcopes", make_resultdicts_b, "variance")
    workflow.connect(modelestimate, "zstats", make_resultdicts_b, "z")
    workflow.connect(makedofvolume, "out_file", make_resultdicts_b, "dof")

    #
    mergecolumnnames = pe.Node(niu.Merge(2), name="mergecolumnnames")
    mergecolumnnames.inputs.in1 = condition_names
    workflow.connect(fillna, "column_names", mergecolumnnames, "in2")

    design_unvest = pe.Node(Unvest(), name="design_unvest")
    workflow.connect(modelgen, "design_file", design_unvest, "in_vest")

    design_tsv = pe.Node(MergeColumns(1), name="design_tsv")
    workflow.connect(design_unvest, "out_no_header", design_tsv, "in1")
    workflow.connect(mergecolumnnames, "out", design_tsv, "column_names1")

    contrast_unvest = pe.Node(Unvest(), name="contrast_unvest")
    workflow.connect(modelgen, "con_file", contrast_unvest, "in_vest")

    contrast_tsv = pe.Node(MergeColumns(1), name="contrast_tsv")
    contrast_tsv.inputs.row_index = contrast_names
    workflow.connect(contrast_unvest, "out_no_header", contrast_tsv, "in1")
    workflow.connect(mergecolumnnames, "out", contrast_tsv, "column_names1")

    workflow.connect(design_tsv, "out_with_header", make_resultdicts_a, "design_matrix")
    workflow.connect(contrast_tsv, "out_with_header", make_resultdicts_a, "contrast_matrix")

    return workflow
Пример #4
0
def seed_fc(
    preprocessing_dir,
    exclude={},
    habituation='confound',
    highpass_sigma=225,
    lowpass_sigma=False,
    include={},
    keep_work=False,
    out_dir="",
    mask="",
    match_regex='sub-(?P<sub>[a-zA-Z0-9]+)/ses-(?P<ses>[a-zA-Z0-9]+)/func/.*?_task-(?P<task>[a-zA-Z0-9]+)_acq-(?P<acq>[a-zA-Z0-9]+)_(?P<mod>[a-zA-Z0-9]+)\.(?:tsv|nii|nii\.gz)',
    nprocs=N_PROCS,
    tr=1,
    workflow_name="generic",
    modality="cbv",
):
    """Calculate subject level seed-based functional connectivity via the `fsl_glm` command.

	Parameters
	----------

	exclude : dict
		A dictionary with any combination of "sessions", "subjects", "tasks" as keys and corresponding identifiers as values.
		If this is specified matching entries will be excluded in the analysis.
	habituation : {"", "confound", "separate_contrast", "in_main_contrast"}, optional
		How the habituation regressor should be handled.
		Anything which evaluates as False (though we recommend "") means no habituation regressor will be introduced.
	highpass_sigma : int, optional
		Highpass threshold (in seconds).
	include : dict
		A dictionary with any combination of "sessions", "subjects", "tasks" as keys and corresponding identifiers as values.
		If this is specified only matching entries will be included in the analysis.
	keep_work : bool, optional
		Whether to keep the work directory (containing all the intermediary workflow steps, as managed by nipypye).
		This is useful for debugging and quality control.
	out_dir : str, optional
		Path to the directory inside which both the working directory and the output directory will be created.
	mask : str, optional
		Path to the brain mask which shall be used to define the brain volume in the analysis.
		This has to point to an existing NIfTI file containing zero and one values only.
	match_regex : str, optional
		Regex matching pattern by which to select input files. Has to contain groups named "sub", "ses", "acq", "task", and "mod".
	n_procs : int, optional
		Maximum number of processes which to simultaneously spawn for the workflow.
		If not explicitly defined, this is automatically calculated from the number of available cores and under the assumption that the workflow will be the main process running for the duration that it is running.
	tr : int, optional
		Repetition time, in seconds.
	workflow_name : str, optional
		Name of the workflow; this will also be the name of the final output directory produced under `out_dir`.
	"""

    preprocessing_dir = path.abspath(path.expanduser(preprocessing_dir))
    if not out_dir:
        out_dir = path.join(bids_base, 'l1')
    else:
        out_dir = path.abspath(path.expanduser(out_dir))

    datafind = nio.DataFinder()
    datafind.inputs.root_paths = preprocessing_dir
    datafind.inputs.match_regex = match_regex
    datafind_res = datafind.run()
    out_paths = [
        path.abspath(path.expanduser(i))
        for i in datafind_res.outputs.out_paths
    ]
    data_selection = zip(*[
        datafind_res.outputs.sub, datafind_res.outputs.ses,
        datafind_res.outputs.acq, datafind_res.outputs.task,
        datafind_res.outputs.mod, out_paths
    ])
    data_selection = [list(i) for i in data_selection]
    data_selection = pd.DataFrame(data_selection,
                                  columns=('subject', 'session', 'acquisition',
                                           'task', 'modality', 'path'))
    if exclude:
        for key in exclude:
            data_selection = data_selection[~data_selection[key].
                                            isin(exclude[key])]
    if include:
        for key in include:
            data_selection = data_selection[data_selection[key].isin(
                include[key])]
    bids_dictionary = data_selection[
        data_selection['modality'] ==
        modality].drop_duplicates().T.to_dict().values()

    infosource = pe.Node(
        interface=util.IdentityInterface(fields=['bids_dictionary']),
        name="infosource")
    infosource.iterables = [('bids_dictionary', bids_dictionary)]

    datafile_source = pe.Node(
        name='datafile_source',
        interface=util.Function(
            function=select_from_datafind_df,
            input_names=inspect.getargspec(select_from_datafind_df)[0],
            output_names=['out_file']))
    datafile_source.inputs.bids_dictionary_override = {'modality': modality}
    datafile_source.inputs.df = data_selection

    seed_timecourse = pe.Node(
        name='seed_timecourse',
        interface=util.Function(
            function=select_from_datafind_df,
            input_names=inspect.getargspec(select_from_datafind_df)[0],
            output_names=['out_file']))

    specify_model = pe.Node(interface=SpecifyModel(), name="specify_model")
    specify_model.inputs.input_units = 'secs'
    specify_model.inputs.time_repetition = tr
    specify_model.inputs.high_pass_filter_cutoff = highpass_sigma
    specify_model.inputs.habituation_regressor = bool(habituation)

    level1design = pe.Node(interface=Level1Design(), name="level1design")
    level1design.inputs.interscan_interval = tr
    if bf_path:
        bf_path = path.abspath(path.expanduser(bf_path))
        level1design.inputs.bases = {"custom": {"bfcustompath": bf_path}}
    # level1design.inputs.bases = {'gamma': {'derivs':False, 'gammasigma':10, 'gammadelay':5}}
    level1design.inputs.orthogonalization = {
        1: {
            0: 0,
            1: 0,
            2: 0
        },
        2: {
            0: 1,
            1: 1,
            2: 0
        }
    }
    level1design.inputs.model_serial_correlations = True
    if habituation == "separate_contrast":
        level1design.inputs.contrasts = [
            ('allStim', 'T', ["e0"], [1]), ('allStim', 'T', ["e1"], [1])
        ]  #condition names as defined in specify_model
    elif habituation == "in_main_contrast":
        level1design.inputs.contrasts = [
            ('allStim', 'T', ["e0", "e1"], [1, 1])
        ]  #condition names as defined in specify_model
    elif habituation == "confound" or not habituation:
        level1design.inputs.contrasts = [
            ('allStim', 'T', ["e0"], [1])
        ]  #condition names as defined in specify_model
    else:
        raise ValueError(
            'The value you have provided for the `habituation` parameter, namely "{}", is invalid. Please choose one of: {"confound","in_main_contrast","separate_contrast"}'
            .format(habituation))

    modelgen = pe.Node(interface=fsl.FEATModel(), name='modelgen')
    modelgen.inputs.ignore_exception = True

    glm = pe.Node(interface=fsl.GLM(), name='glm', iterfield='design')
    glm.inputs.out_cope = "cope.nii.gz"
    glm.inputs.out_varcb_name = "varcb.nii.gz"
    #not setting a betas output file might lead to beta export in lieu of COPEs
    glm.inputs.out_file = "betas.nii.gz"
    glm.inputs.out_t_name = "t_stat.nii.gz"
    glm.inputs.out_p_name = "p_stat.nii.gz"
    if mask:
        glm.inputs.mask = path.abspath(path.expanduser(mask))
    glm.inputs.ignore_exception = True

    cope_filename = pe.Node(
        name='cope_filename',
        interface=util.Function(
            function=bids_dict_to_source,
            input_names=inspect.getargspec(bids_dict_to_source)[0],
            output_names=['filename']))
    cope_filename.inputs.source_format = "sub-{subject}_ses-{session}_task-{task}_acq-{acquisition}_{modality}_cope.nii.gz"
    varcb_filename = pe.Node(
        name='varcb_filename',
        interface=util.Function(
            function=bids_dict_to_source,
            input_names=inspect.getargspec(bids_dict_to_source)[0],
            output_names=['filename']))
    varcb_filename.inputs.source_format = "sub-{subject}_ses-{session}_task-{task}_acq-{acquisition}_{modality}_varcb.nii.gz"
    tstat_filename = pe.Node(
        name='tstat_filename',
        interface=util.Function(
            function=bids_dict_to_source,
            input_names=inspect.getargspec(bids_dict_to_source)[0],
            output_names=['filename']))
    tstat_filename.inputs.source_format = "sub-{subject}_ses-{session}_task-{task}_acq-{acquisition}_{modality}_tstat.nii.gz"
    zstat_filename = pe.Node(
        name='zstat_filename',
        interface=util.Function(
            function=bids_dict_to_source,
            input_names=inspect.getargspec(bids_dict_to_source)[0],
            output_names=['filename']))
    zstat_filename.inputs.source_format = "sub-{subject}_ses-{session}_task-{task}_acq-{acquisition}_{modality}_zstat.nii.gz"
    pstat_filename = pe.Node(
        name='pstat_filename',
        interface=util.Function(
            function=bids_dict_to_source,
            input_names=inspect.getargspec(bids_dict_to_source)[0],
            output_names=['filename']))
    pstat_filename.inputs.source_format = "sub-{subject}_ses-{session}_task-{task}_acq-{acquisition}_{modality}_pstat.nii.gz"
    pfstat_filename = pe.Node(
        name='pfstat_filename',
        interface=util.Function(
            function=bids_dict_to_source,
            input_names=inspect.getargspec(bids_dict_to_source)[0],
            output_names=['filename']))
    pfstat_filename.inputs.source_format = "sub-{subject}_ses-{session}_task-{task}_acq-{acquisition}_{modality}_pfstat.nii.gz"

    datasink = pe.Node(nio.DataSink(), name='datasink')
    datasink.inputs.base_directory = path.join(out_dir, workflow_name)
    datasink.inputs.parameterization = False

    workflow_connections = [
        (infosource, datafile_source, [('bids_dictionary', 'bids_dictionary')
                                       ]),
        (infosource, eventfile_source, [('bids_dictionary', 'bids_dictionary')
                                        ]),
        (eventfile_source, specify_model, [('out_file', 'event_files')]),
        (specify_model, level1design, [('session_info', 'session_info')]),
        (level1design, modelgen, [('ev_files', 'ev_files')]),
        (level1design, modelgen, [('fsf_files', 'fsf_file')]),
        (modelgen, glm, [('design_file', 'design')]),
        (modelgen, glm, [('con_file', 'contrasts')]),
        (infosource, datasink, [(('bids_dictionary', bids_dict_to_dir),
                                 'container')]),
        (infosource, cope_filename, [('bids_dictionary', 'bids_dictionary')]),
        (infosource, varcb_filename, [('bids_dictionary', 'bids_dictionary')]),
        (infosource, tstat_filename, [('bids_dictionary', 'bids_dictionary')]),
        (infosource, zstat_filename, [('bids_dictionary', 'bids_dictionary')]),
        (infosource, pstat_filename, [('bids_dictionary', 'bids_dictionary')]),
        (infosource, pfstat_filename, [('bids_dictionary', 'bids_dictionary')
                                       ]),
        (cope_filename, glm, [('filename', 'out_cope')]),
        (varcb_filename, glm, [('filename', 'out_varcb_name')]),
        (tstat_filename, glm, [('filename', 'out_t_name')]),
        (zstat_filename, glm, [('filename', 'out_z_name')]),
        (pstat_filename, glm, [('filename', 'out_p_name')]),
        (pfstat_filename, glm, [('filename', 'out_pf_name')]),
        (glm, datasink, [('out_pf', '@pfstat')]),
        (glm, datasink, [('out_p', '@pstat')]),
        (glm, datasink, [('out_z', '@zstat')]),
        (glm, datasink, [('out_t', '@tstat')]),
        (glm, datasink, [('out_cope', '@cope')]),
        (glm, datasink, [('out_varcb', '@varcb')]),
    ]

    if highpass_sigma or lowpass_sigma:
        bandpass = pe.Node(interface=fsl.maths.TemporalFilter(),
                           name="bandpass")
        bandpass.inputs.highpass_sigma = highpass_sigma
        if lowpass_sigma:
            bandpass.inputs.lowpass_sigma = lowpass_sigma
        else:
            bandpass.inputs.lowpass_sigma = tr
        workflow_connections.extend([
            (datafile_source, bandpass, [('out_file', 'in_file')]),
            (bandpass, specify_model, [('out_file', 'functional_runs')]),
            (bandpass, glm, [('out_file', 'in_file')]),
        ])
    else:
        workflow_connections.extend([
            (datafile_source, specify_model, [('out_file', 'functional_runs')
                                              ]),
            (datafile_source, glm, [('out_file', 'in_file')]),
        ])

    workdir_name = workflow_name + "_work"
    workflow = pe.Workflow(name=workdir_name)
    workflow.connect(workflow_connections)
    workflow.base_dir = out_dir
    workflow.config = {
        "execution": {
            "crashdump_dir": path.join(out_dir, "crashdump")
        }
    }
    workflow.write_graph(dotfilename=path.join(workflow.base_dir, workdir_name,
                                               "graph.dot"),
                         graph2use="hierarchical",
                         format="png")

    workflow.run(plugin="MultiProc", plugin_args={'n_procs': nprocs})
    if not keep_work:
        shutil.rmtree(path.join(out_dir, workdir_name))
# model specification
modelspec = Node(modelgen.SpecifyModel(subject_info=subject_info,
                                       input_units='secs',
                                       time_repetition=TR,
                                       high_pass_filter_cutoff=100),
                 name="modelspec")

# first-level design
level1design = Node(fsl.Level1Design(bases={'dgamma':{'derivs': True}},
                                     interscan_interval=TR,
                                     model_serial_correlations=True,
                                     contrasts=contrast_list),
                    name="level1design")

# creating all the other files necessary to run the model
modelgen = Node(fsl.FEATModel(),
                name='modelgen')

# then running through FEAT
feat = Node(fsl.FEAT(),
            name="feat")

# creating datasink to collect outputs
datasink = Node(DataSink(base_directory=outDir),
                name='datasink')

## Use the following DataSink output substitutions
substitutions = [('_subject_id_', 'sub-'),
                 ('_subsession_id_', '/ses-')
                 ]
Пример #6
0
   SPM-specific design information.
"""

modelspec = pe.Node(interface=model.SpecifyModel(), name="modelspec")
"""
   d. Use :class:`nipype.interfaces.fsl.Level1Design` to generate a
   run specific fsf file for analysis
"""

level1design = pe.Node(interface=fsl.Level1Design(), name="fsfdesign")
"""
   e. Use :class:`nipype.interfaces.fsl.FEATModel` to generate a
   run specific mat file for use by FILMGLS
"""

modelgen = pe.Node(interface=fsl.FEATModel(), name='modelgen')
"""
   f. Use :class:`nipype.interfaces.fsl.FILMGLS` to estimate a model
   specified by a mat file and a functional run
"""

modelestimate = pe.Node(interface=fsl.FILMGLS(), name='modelestimate')
#iterfield = ['design_file','in_file'])

modelfit.connect([
    (modelspec, level1design, [('session_info', 'session_info')]),
    (level1design, modelgen, [('fsf_files', 'fsf_file'),
                              ('ev_files', 'ev_files')]),
    (modelgen, modelestimate, [('design_file', 'design_file')]),
])
"""
Пример #7
0
# model specification
modelspec = Node(modelgen.SpecifyModel(input_units='secs',
                                       time_repetition=TR,
                                       high_pass_filter_cutoff=100),
                 name="modelspec")

# first-level design
level1design = Node(fsl.Level1Design(bases={'dgamma': {
    'derivs': True
}},
                                     interscan_interval=TR,
                                     model_serial_correlations=True),
                    name="level1design")

# creating all the other files necessary to run the model
modelgen = Node(fsl.FEATModel(), name='modelgen')

# then running through FEAT
feat = Node(fsl.FEAT(), name="feat")

# creating datasink to collect outputs
datasink = Node(DataSink(base_directory=outDir), name='datasink')

## Use the following DataSink output substitutions
substitutions = [('_subject_id_', '/sub-'), ('_run_id_', '/run-')]

datasink.inputs.substitutions = substitutions

###########
#
# SETTING UP THE WORKFLOW NODES
def create_nuisance_modelfit_workflow(name='modelfit', f_contrasts=False):
    """
    Create an FSL  modelfitting workflow that returns also
    residual4d and sigmasquareds.

    Example
    -------

#    >>> modelfit = create_modelfit_workflow()
#    >>> modelfit.base_dir = '.'
#    >>> info = dict()
#    >>> modelfit.inputs.inputspec.session_info = info
#    >>> modelfit.inputs.inputspec.interscan_interval = 3.
#    >>> modelfit.inputs.inputspec.film_threshold = 1000
#    >>> modelfit.run() #doctest: +SKIP

    Inputs::

         inputspec.session_info : info generated by modelgen.SpecifyModel
         inputspec.interscan_interval : interscan interval
         inputspec.contrasts : list of contrasts
         inputspec.film_threshold : image threshold for FILM estimation
         inputspec.model_serial_correlations
         inputspec.bases

    Outputs::

         outputspec.copes
         outputspec.varcopes
         outputspec.dof_file
         outputspec.pfiles
         outputspec.zfiles
         outputspec.parameter_estimates
         outputspec.residual4d
         outputspec.sigmasquareds

    """

    version = 0
    if fsl.Info.version() and \
                    LooseVersion(fsl.Info.version()) > LooseVersion('5.0.6'):
        version = 507

    modelfit = pe.Workflow(name=name)
    """
    Create the nodes
    """

    inputspec = pe.Node(util.IdentityInterface(fields=[
        'session_info', 'interscan_interval', 'contrasts', 'film_threshold',
        'functional_data', 'bases', 'model_serial_correlations'
    ]),
                        name='inputspec')
    level1design = pe.Node(interface=fsl.Level1Design(), name="level1design")
    modelgen = pe.MapNode(interface=fsl.FEATModel(),
                          name='modelgen',
                          iterfield=['fsf_file', 'ev_files'])
    if version < 507:
        modelestimate = pe.MapNode(interface=fsl.FILMGLS(smooth_autocorr=True,
                                                         mask_size=5),
                                   name='modelestimate',
                                   iterfield=['design_file', 'in_file'])
    else:
        if f_contrasts:
            iterfield = ['design_file', 'in_file', 'tcon_file', 'fcon_file']
        else:
            iterfield = ['design_file', 'in_file', 'tcon_file']
        modelestimate = pe.MapNode(interface=fsl.FILMGLS(smooth_autocorr=True,
                                                         mask_size=5),
                                   name='modelestimate',
                                   iterfield=iterfield)

    if version < 507:
        if f_contrasts:
            iterfield = [
                'tcon_file', 'fcon_file', 'param_estimates', 'sigmasquareds',
                'corrections', 'dof_file'
            ]
        else:
            iterfield = [
                'tcon_file', 'param_estimates', 'sigmasquareds', 'corrections',
                'dof_file'
            ]
        conestimate = pe.MapNode(interface=fsl.ContrastMgr(),
                                 name='conestimate',
                                 iterfield=[
                                     'tcon_file', 'fcon_file',
                                     'param_estimates', 'sigmasquareds',
                                     'corrections', 'dof_file'
                                 ])

    if f_contrasts:
        iterfield = ['in1', 'in2']
    else:
        iterfield = ['in1']
    merge_contrasts = pe.MapNode(interface=util.Merge(2),
                                 name='merge_contrasts',
                                 iterfield=iterfield)

    ztopval = pe.MapNode(interface=fsl.ImageMaths(op_string='-ztop',
                                                  suffix='_pval'),
                         nested=True,
                         name='ztop',
                         iterfield=['in_file'])

    outputspec = pe.Node(util.IdentityInterface(fields=[
        'copes', 'varcopes', 'dof_file', 'pfiles', 'zfiles',
        'parameter_estimates', 'residual4d', 'sigmasquareds'
    ]),
                         name='outputspec')
    """
    Setup the connections
    """

    modelfit.connect([
        (inputspec, level1design,
         [('interscan_interval', 'interscan_interval'),
          ('session_info', 'session_info'), ('contrasts', 'contrasts'),
          ('bases', 'bases'),
          ('model_serial_correlations', 'model_serial_correlations')]),
        (inputspec, modelestimate, [('film_threshold', 'threshold'),
                                    ('functional_data', 'in_file')]),
        (level1design, modelgen, [('fsf_files', 'fsf_file'),
                                  ('ev_files', 'ev_files')]),
        (modelgen, modelestimate, [('design_file', 'design_file')]),

        # connect also residual4d and sigmasquared
        (modelestimate, outputspec, [('param_estimates',
                                      'parameter_estimates'),
                                     ('dof_file', 'dof_file'),
                                     ('residual4d', 'residual4d'),
                                     ('sigmasquareds', 'sigmasquareds')]),
    ])
    if version < 507:
        modelfit.connect([
            (modelgen, conestimate, [('con_file', 'tcon_file'),
                                     ('fcon_file', 'fcon_file')]),
            (modelestimate, conestimate, [('param_estimates',
                                           'param_estimates'),
                                          ('sigmasquareds', 'sigmasquareds'),
                                          ('corrections', 'corrections'),
                                          ('dof_file', 'dof_file')]),
            (conestimate, outputspec, [('copes', 'copes'),
                                       ('varcopes', 'varcopes')]),
        ])
    else:
        modelfit.connect([
            (modelgen, modelestimate, [('con_file', 'tcon_file'),
                                       ('fcon_file', 'fcon_file')]),
            (modelestimate, outputspec, [('copes', 'copes'),
                                         ('varcopes', 'varcopes')]),
        ])
    return modelfit
Пример #9
0
def L1PIPE(): 

	# ---1) Import modules
	import nipype.interfaces.fsl as fsl 
	import nipype.pipeline.engine as pe
	import nipype.algorithms.modelgen as model
	import glob
	from nipype import Function
	import matplotlib
	import nipype.interfaces.utility as util
	import os


	#--- 2) Specify model node
	specify_model = pe.Node(interface=model.SpecifyModel(), name="SPECIFY_MODEL")
	specify_model.inputs.input_units = 'secs'

	runs=raw_input('Please drag in the pre-processsed functional data\n')
	runs2= runs.strip('\'"')

	NIFTIDIR=os.path.split(runs2)[0]

	specify_model.inputs.functional_runs = [runs2]
	specify_model.inputs.time_repetition = float(raw_input('Enter the TR (s)\n'))
	specify_model.inputs.high_pass_filter_cutoff = float(raw_input('Enter the High pass filter cutoff (s)\n'))
	EVENTFILES=raw_input('Please drag in the directory of 3 column event files')
	EVENTFILES2=EVENTFILES.strip('\'"')
	EVENTFILESLIST=glob.glob(EVENTFILES2 + '/*')
	specify_model.inputs.event_files=sorted(EVENTFILESLIST)


	#--- 3) Level 1 design node.
	Designer=pe.Node(interface=fsl.Level1Design(),name='DESIGN')
	Designer.inputs.interscan_interval = float(specify_model.inputs.time_repetition)
	Designer.inputs.bases = {'dgamma':{'derivs': False}}
	Designer.inputs.model_serial_correlations=bool(0)

	#--- 4) Make some contrasts
	cont1=('Task', 'T', ['B1INVFEAR.RUN001', 'B1INVINVFEAR.RUN001', 'B1INVINVNEUT.RUN001', 'B1INVNEUT.RUN001', 'B1SCFEAR.RUN001', 'B1SCNEUT.RUN001', 'B1UPFEAR.RUN001', 'B1UPINVFEAR.RUN001', 'B1UPINVNEUT.RUN001', 'B1UPNEUT.RUN001'], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
	cont2=('Up', 'T', ['B1INVFEAR.RUN001', 'B1INVINVFEAR.RUN001', 'B1INVINVNEUT.RUN001', 'B1INVNEUT.RUN001', 'B1SCFEAR.RUN001', 'B1SCNEUT.RUN001', 'B1UPFEAR.RUN001', 'B1UPINVFEAR.RUN001', 'B1UPINVNEUT.RUN001', 'B1UPNEUT.RUN001'], [0, 0, 0, 0, 0, 0, 1, 0, 0, 1])
	cont3=('SC', 'T', ['B1INVFEAR.RUN001', 'B1INVINVFEAR.RUN001', 'B1INVINVNEUT.RUN001', 'B1INVNEUT.RUN001', 'B1SCFEAR.RUN001', 'B1SCNEUT.RUN001', 'B1UPFEAR.RUN001', 'B1UPINVFEAR.RUN001', 'B1UPINVNEUT.RUN001', 'B1UPNEUT.RUN001'], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0])
	cont4=('UpvSC', 'T', ['B1INVFEAR.RUN001', 'B1INVINVFEAR.RUN001', 'B1INVINVNEUT.RUN001', 'B1INVNEUT.RUN001', 'B1SCFEAR.RUN001', 'B1SCNEUT.RUN001', 'B1UPFEAR.RUN001', 'B1UPINVFEAR.RUN001', 'B1UPINVNEUT.RUN001', 'B1UPNEUT.RUN001'], [0, 0, 0, 0, -1, -1, 1, 0, 0, 1])
	Designer.inputs.contrasts=[cont1, cont2, cont3, cont4]

	#--- 5) FSL model node
	Model=pe.Node(interface=fsl.FEATModel(),name='FEATMODEL')

	#--- 6) FILM GSL node
	fgls=pe.Node(interface=fsl.FILMGLS(),name='FILM_GLS')
	fgls.inputs.in_file=runs2

	#--- 7) outputnode for the design image (gets binned otherwise)
	outputnode = pe.Node(interface=util.IdentityInterface(fields=['im','cope','varcope','dof','resid','params','sigmas']),name='outputnode')


	#--- 8)  Plotting node
	def plot(in_file):
		from nilearn import image
		from nilearn import plotting
		import matplotlib
		display=plotting.plot_stat_map(stat_map_img = in_file, display_mode='z', cut_coords=10, threshold=float(0))
		matplotlib.pyplot.show()



	plotter=pe.MapNode(Function(input_names=['in_file'],output_names='display',function=plot),iterfield=['in_file'],name='PLOTTER')

	workflow = pe.Workflow(name='L1PIPE')
	

	workflow.connect(specify_model,'session_info',Designer,'session_info')
	workflow.connect(Designer,'fsf_files',Model,'fsf_file')
	workflow.connect(Designer,'ev_files',Model,'ev_files')
	workflow.connect(Model,'design_file',fgls,'design_file')
	workflow.connect(Model,'con_file',fgls,'tcon_file')
	workflow.connect(Model,'design_image',outputnode,'im')
	
	# Feed the z stats to the plotter.
	workflow.connect(fgls,'zstats',plotter,'in_file')
	workflow.connect(fgls,'copes',outputnode,'cope')
	workflow.connect(fgls,'varcopes',outputnode,'varcope')
	workflow.connect(fgls,'dof_file',outputnode,'dof')
	workflow.connect(fgls,'residual4d',outputnode,'resid')
	workflow.connect(fgls,'param_estimates',outputnode,'params')
	workflow.connect(fgls,'sigmasquareds',outputnode,'sigmas')
	




	workflow.base_dir = NIFTIDIR
	workflow.write_graph(graph2use='exec')
	workflow.run()
Пример #10
0
def init_glm_wf(conditions,
                contrasts,
                repetition_time,
                use_mov_pars,
                name="glm"):
    """
    create workflow to calculate a first level glm for task functional data

    :param conditions: dictionary of conditions with onsets and durations 
        by condition names
    :param contrasts: dictionary of contrasts by names
    :param repetition_time: repetition time
    :param use_mov_pars: if true, regress out movement parameters when 
        calculating the glm
    :param name: workflow name (Default value = "glm")

    """
    workflow = pe.Workflow(name=name)

    # inputs are the bold file, the mask file and the confounds file
    # that contains the movement parameters
    inputnode = pe.Node(niu.IdentityInterface(
        fields=["bold_file", "mask_file", "confounds_file"]),
                        name="inputnode")

    # transform (unordered) conditions dictionary into three (ordered) lists

    names = list(conditions.keys())
    onsets = [conditions[k]["onsets"] for k in names]
    durations = [conditions[k]["durations"] for k in names]

    # first level model specification
    modelspec = pe.Node(interface=model.SpecifyModel(
        input_units="secs",
        high_pass_filter_cutoff=128.,
        time_repetition=repetition_time,
        subject_info=Bunch(conditions=names,
                           onsets=onsets,
                           durations=durations)),
                        name="modelspec")

    # transform contrasts dictionary to nipype list data structure
    contrasts_ = [[k, "T"] +
                  [list(i) for i in zip(*[(n, val) for n, val in v.items()])]
                  for k, v in contrasts.items()]

    connames = [k[0] for k in contrasts_]

    # outputs are cope, varcope and zstat for each contrast and a dof_file
    outputnode = pe.Node(niu.IdentityInterface(fields=sum(
        [["%s_img" % conname,
          "%s_varcope" % conname,
          "%s_zstat" % conname] for conname in connames], []) + ["dof_file"]),
                         name="outputnode")

    outputnode._interface.names = connames

    # generate design from first level specification
    level1design = pe.Node(interface=fsl.Level1Design(
        contrasts=contrasts_,
        interscan_interval=repetition_time,
        model_serial_correlations=True,
        bases={"dgamma": {
            "derivs": False
        }}),
                           name="level1design")

    # generate required input files for FILMGLS from design
    modelgen = pe.Node(interface=fsl.FEATModel(),
                       name="modelgen",
                       iterfield=["fsf_file", "ev_files"])

    # calculate range of image values to determine cutoff value
    # for FILMGLS
    stats = pe.Node(interface=fsl.ImageStats(op_string="-R"), name="stats")

    # actuallt estimate the firsy level model
    modelestimate = pe.Node(interface=fsl.FILMGLS(smooth_autocorr=True,
                                                  mask_size=5),
                            name="modelestimate",
                            iterfield=["design_file", "in_file", "tcon_file"])

    # mask regression outputs
    maskimgs = pe.MapNode(interface=fsl.ApplyMask(),
                          name="maskimgs",
                          iterfield=["in_file"])
    maskvarcopes = pe.MapNode(interface=fsl.ApplyMask(),
                              name="maskvarcopes",
                              iterfield=["in_file"])
    maskzstats = pe.MapNode(interface=fsl.ApplyMask(),
                            name="maskzstats",
                            iterfield=["in_file"])

    # split regression outputs by name
    splitimgs = pe.Node(interface=niu.Split(splits=[1
                                                    for conname in connames]),
                        name="splitimgs")
    splitvarcopes = pe.Node(
        interface=niu.Split(splits=[1 for conname in connames]),
        name="splitvarcopes")
    splitzstats = pe.Node(
        interface=niu.Split(splits=[1 for conname in connames]),
        name="splitzstats")

    # pass movement parameters to glm model specification if requested
    c = [("bold_file", "functional_runs")]
    if use_mov_pars:
        c.append(("confounds_file", "realignment_parameters"))

    workflow.connect([
        (inputnode, modelspec, c),
        (inputnode, modelestimate, [("bold_file", "in_file")]),
        (modelspec, level1design, [("session_info", "session_info")]),
        (level1design, modelgen, [("fsf_files", "fsf_file"),
                                  ("ev_files", "ev_files")]),
        (inputnode, stats, [("bold_file", "in_file")]),
        (stats, modelestimate, [(("out_stat", get_float), "threshold")]),
        (modelgen, modelestimate, [("design_file", "design_file"),
                                   ("con_file", "tcon_file")]),
        (inputnode, maskimgs, [("mask_file", "mask_file")]),
        (inputnode, maskvarcopes, [("mask_file", "mask_file")]),
        (inputnode, maskzstats, [("mask_file", "mask_file")]),
        (modelestimate, maskimgs, [
            (("copes", flatten), "in_file"),
        ]),
        (modelestimate, maskvarcopes, [
            (("varcopes", flatten), "in_file"),
        ]),
        (modelestimate, maskzstats, [
            (("zstats", flatten), "in_file"),
        ]),
        (modelestimate, outputnode, [("dof_file", "dof_file")]),
        (maskimgs, splitimgs, [
            ("out_file", "inlist"),
        ]),
        (maskvarcopes, splitvarcopes, [
            ("out_file", "inlist"),
        ]),
        (maskzstats, splitzstats, [
            ("out_file", "inlist"),
        ]),
    ])

    # connect outputs named for the contrasts
    for i, conname in enumerate(connames):
        workflow.connect(splitimgs, "out%i" % (i + 1), outputnode,
                         "%s_img" % conname)
        workflow.connect(splitvarcopes, "out%i" % (i + 1), outputnode,
                         "%s_varcope" % conname)
        workflow.connect(splitzstats, "out%i" % (i + 1), outputnode,
                         "%s_zstat" % conname)

    return workflow, connames
Пример #11
0
#Makes a model specification compatible with spm/fsl designers.
NodeHash_1e7a3420 = pe.MapNode(interface = modelgen.SpecifyModel(), name = 'NodeName_1e7a3420', iterfield = ['functional_runs', 'subject_info'])
NodeHash_1e7a3420.inputs.high_pass_filter_cutoff = 0
NodeHash_1e7a3420.inputs.input_units = 'secs'
NodeHash_1e7a3420.inputs.time_repetition = 2.0

#Generate FEAT specific files
NodeHash_9bb0d40 = pe.MapNode(interface = fsl.Level1Design(), name = 'NodeName_9bb0d40', iterfield = ['session_info'])
NodeHash_9bb0d40.inputs.bases = {'dgamma':{'derivs': False}}
NodeHash_9bb0d40.inputs.contrasts = [('con-incon', 'T', ['congruent_correct', 'congruent_correct'], [-1, 1])]
NodeHash_9bb0d40.inputs.interscan_interval = 2.0
NodeHash_9bb0d40.inputs.model_serial_correlations = True

#Wraps command **feat_model**
NodeHash_6b33f50 = pe.MapNode(interface = fsl.FEATModel(), name = 'NodeName_6b33f50', iterfield = ['ev_files', 'fsf_file'])

#Wraps command **film_gls**
NodeHash_2762fb60 = pe.MapNode(interface = fsl.FILMGLS(), name = 'NodeName_2762fb60', iterfield = ['design_file', 'in_file', 'tcon_file'])

#Wraps command **fslmaths**
NodeHash_2df82970 = pe.MapNode(interface = fsl.MeanImage(), name = 'NodeName_2df82970', iterfield = ['in_file'])
NodeHash_2df82970.inputs.dimension = 'T'

#Generic datasink module to store structured outputs
NodeHash_33a4bec0 = pe.Node(interface = io.DataSink(), name = 'NodeName_33a4bec0')
NodeHash_33a4bec0.inputs.base_directory = '/tmp/FIRSTLEVEL'

#Basic interface class to select specific elements from a list
NodeHash_7caa820 = pe.MapNode(interface = utility.Select(), name = 'NodeName_7caa820', iterfield = ['inlist'])
NodeHash_7caa820.inputs.index = 0
NodeHash_214dcae0.inputs.input_units = 'secs'
NodeHash_214dcae0.inputs.time_repetition = 2.0

#Generate FEAT specific files
NodeHash_2087a210 = pe.MapNode(interface=fsl.Level1Design(),
                               name='NodeName_2087a210',
                               iterfield=['session_info'])
NodeHash_2087a210.inputs.bases = {'dgamma': {'derivs': False}}
NodeHash_2087a210.inputs.contrasts = [
    ('con-incon', 'T', ['congruent_correct', 'congruent_correct'], [-1, 1])
]
NodeHash_2087a210.inputs.interscan_interval = 2.0
NodeHash_2087a210.inputs.model_serial_correlations = True

#Wraps command **feat_model**
NodeHash_219c0190 = pe.MapNode(interface=fsl.FEATModel(),
                               name='NodeName_219c0190',
                               iterfield=['ev_files', 'fsf_file'])

#Wraps command **film_gls**
NodeHash_215cb480 = pe.MapNode(
    interface=fsl.FILMGLS(),
    name='NodeName_215cb480',
    iterfield=['design_file', 'in_file', 'tcon_file'])

#Wraps command **fslmaths**
NodeHash_23b7ddc0 = pe.MapNode(interface=fsl.MeanImage(),
                               name='NodeName_23b7ddc0',
                               iterfield=['in_file'])
NodeHash_23b7ddc0.inputs.dimension = 'T'
#Makes a model specification compatible with spm/fsl designers.
NodeHash_1f14a780 = pe.MapNode(interface = modelgen.SpecifyModel(), name = 'NodeName_1f14a780', iterfield = ['functional_runs', 'subject_info'])
NodeHash_1f14a780.inputs.high_pass_filter_cutoff = 0
NodeHash_1f14a780.inputs.input_units = 'secs'
NodeHash_1f14a780.inputs.time_repetition = 2.0

#Generate FEAT specific files
NodeHash_1ee3fa40 = pe.MapNode(interface = fsl.Level1Design(), name = 'NodeName_1ee3fa40', iterfield = ['session_info'])
NodeHash_1ee3fa40.inputs.bases = {'dgamma':{'derivs': False}}
NodeHash_1ee3fa40.inputs.contrasts = [('con-incon', 'T', ['congruent_correct', 'congruent_correct'], [-1, 1])]
NodeHash_1ee3fa40.inputs.interscan_interval = 2.0
NodeHash_1ee3fa40.inputs.model_serial_correlations = True

#Wraps command **feat_model**
NodeHash_1ff83800 = pe.MapNode(interface = fsl.FEATModel(), name = 'NodeName_1ff83800', iterfield = ['ev_files', 'fsf_file'])

#Wraps command **film_gls**
NodeHash_215a5a10 = pe.MapNode(interface = fsl.FILMGLS(), name = 'NodeName_215a5a10', iterfield = ['design_file', 'in_file', 'tcon_file'])

#Wraps command **fslmaths**
NodeHash_218a6ba0 = pe.MapNode(interface = fsl.MeanImage(), name = 'NodeName_218a6ba0', iterfield = ['in_file'])
NodeHash_218a6ba0.inputs.dimension = 'T'

#Generic datasink module to store structured outputs
NodeHash_218b7a50 = pe.Node(interface = io.DataSink(), name = 'NodeName_218b7a50')
NodeHash_218b7a50.inputs.base_directory = '/tmp/FIRSTLEVEL'

#Basic interface class to select specific elements from a list
NodeHash_23ccf700 = pe.MapNode(interface = utility.Select(), name = 'NodeName_23ccf700', iterfield = ['inlist'])
NodeHash_23ccf700.inputs.index = 0
Пример #14
0
def first_level_wf(pipeline, subject_id, task_id, output_dir):
    """
    First level workflow
    """
    workflow = pe.Workflow(name='_'.join((pipeline, subject_id, task_id)))

    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'bold_preproc', 'contrasts', 'confounds', 'brainmask', 'events_file'
    ]),
                        name='inputnode')

    outputnode = pe.Node(
        niu.IdentityInterface(fields=['sigma_pre', 'sigma_post', 'out_stats']),
        name='outputnode')

    conf2movpar = pe.Node(niu.Function(function=_confounds2movpar),
                          name='conf2movpar')
    masker = pe.Node(fsl.ApplyMask(), name='masker')
    bim = pe.Node(afni.BlurInMask(fwhm=5.0, outputtype='NIFTI_GZ'),
                  name='bim',
                  mem_gb=20)

    ev = pe.Node(EventsFilesForTask(task=task_id), name='events')

    l1 = pe.Node(SpecifyModel(
        input_units='secs',
        time_repetition=2,
        high_pass_filter_cutoff=100,
        parameter_source='FSL',
    ),
                 name='l1')

    l1model = pe.Node(fsl.Level1Design(interscan_interval=2,
                                       bases={'dgamma': {
                                           'derivs': True
                                       }},
                                       model_serial_correlations=True),
                      name='l1design')

    l1featmodel = pe.Node(fsl.FEATModel(), name='l1model')
    l1estimate = pe.Node(fsl.FEAT(), name='l1estimate', mem_gb=40)

    pre_smooth_afni = pe.Node(afni.FWHMx(combine=True,
                                         detrend=True,
                                         args='-ShowMeClassicFWHM'),
                              name='smooth_pre_afni',
                              mem_gb=20)
    post_smooth_afni = pe.Node(afni.FWHMx(combine=True,
                                          detrend=True,
                                          args='-ShowMeClassicFWHM'),
                               name='smooth_post_afni',
                               mem_gb=20)

    pre_smooth = pe.Node(fsl.SmoothEstimate(), name='smooth_pre', mem_gb=20)
    post_smooth = pe.Node(fsl.SmoothEstimate(), name='smooth_post', mem_gb=20)

    def _resels(val):
        return val**(1 / 3.)

    def _fwhm(fwhm):
        from numpy import mean
        return float(mean(fwhm, dtype=float))

    workflow.connect([
        (inputnode, masker, [('bold_preproc', 'in_file'),
                             ('brainmask', 'mask_file')]),
        (inputnode, ev, [('events_file', 'in_file')]),
        (inputnode, l1model, [('contrasts', 'contrasts')]),
        (inputnode, conf2movpar, [('confounds', 'in_confounds')]),
        (inputnode, bim, [('brainmask', 'mask')]),
        (masker, bim, [('out_file', 'in_file')]),
        (bim, l1, [('out_file', 'functional_runs')]),
        (ev, l1, [('event_files', 'event_files')]),
        (conf2movpar, l1, [('out', 'realignment_parameters')]),
        (l1, l1model, [('session_info', 'session_info')]),
        (ev, l1model, [('orthogonalization', 'orthogonalization')]),
        (l1model, l1featmodel, [('fsf_files', 'fsf_file'),
                                ('ev_files', 'ev_files')]),
        (l1model, l1estimate, [('fsf_files', 'fsf_file')]),
        # Smooth
        (inputnode, pre_smooth, [('bold_preproc', 'zstat_file'),
                                 ('brainmask', 'mask_file')]),
        (bim, post_smooth, [('out_file', 'zstat_file')]),
        (inputnode, post_smooth, [('brainmask', 'mask_file')]),
        (pre_smooth, outputnode, [(('resels', _resels), 'sigma_pre')]),
        (post_smooth, outputnode, [(('resels', _resels), 'sigma_post')]),

        # Smooth with AFNI
        (inputnode, pre_smooth_afni, [('bold_preproc', 'in_file'),
                                      ('brainmask', 'mask')]),
        (bim, post_smooth_afni, [('out_file', 'in_file')]),
        (inputnode, post_smooth_afni, [('brainmask', 'mask')]),
    ])

    # Writing outputs
    csv = pe.Node(AddCSVRow(in_file=str(output_dir / 'smoothness.csv')),
                  name='addcsv_%s_%s' % (subject_id, pipeline))
    csv.inputs.sub_id = subject_id
    csv.inputs.pipeline = pipeline

    # Datasinks
    ds_stats = pe.Node(niu.Function(function=_feat_stats), name='ds_stats')
    ds_stats.inputs.subject_id = subject_id
    ds_stats.inputs.task_id = task_id
    ds_stats.inputs.variant = pipeline
    ds_stats.inputs.out_path = output_dir
    setattr(ds_stats.interface, '_always_run', True)

    workflow.connect([
        (outputnode, csv, [('sigma_pre', 'smooth_pre'),
                           ('sigma_post', 'smooth_post')]),
        (pre_smooth_afni, csv, [(('fwhm', _fwhm), 'fwhm_pre')]),
        (post_smooth_afni, csv, [(('fwhm', _fwhm), 'fwhm_post')]),
        (l1estimate, ds_stats, [('feat_dir', 'feat_dir')]),
        (ds_stats, outputnode, [('out', 'out_stats')]),
    ])
    return workflow
Пример #15
0
def init_taskbased_wf(analysis=None, memcalc=MemoryCalculator()):
    """
    create workflow to calculate a first level glm for task functional data
    """

    assert isinstance(analysis, Analysis)
    assert isinstance(analysis.tags, Tags)

    # make bold file variant specification
    boldfilefields = ["bold_file"]
    varianttupls = [("space", analysis.tags.space)]
    if analysis.tags.grand_mean_scaled is not None:
        assert isinstance(analysis.tags.grand_mean_scaled, GrandMeanScaledTag)
        varianttupls.append(analysis.tags.grand_mean_scaled.as_tupl())
    if analysis.tags.band_pass_filtered is not None:
        assert isinstance(analysis.tags.band_pass_filtered,
                          BandPassFilteredTag)
        assert analysis.tags.band_pass_filtered.type == "gaussian"
        varianttupls.append(analysis.tags.band_pass_filtered.as_tupl())
    if analysis.tags.confounds_removed is not None:
        assert isinstance(analysis.tags.confounds_removed, ConfoundsRemovedTag)
        confounds_removed_names = tuple(
            name for name in analysis.tags.confounds_removed.names
            if "aroma_motion" in name)
        varianttupls.append(("confounds_removed", confounds_removed_names))
        confounds_extract_names = tuple(
            name for name in analysis.tags.confounds_removed.names
            if "aroma_motion" not in name)
        if len(confounds_extract_names) > 0:
            boldfilefields.append("confounds_file")
            varianttupls.append(("confounds_extract", confounds_extract_names))
    if analysis.tags.smoothed is not None:
        assert isinstance(analysis.tags.smoothed, SmoothedTag)
        varianttupls.append(analysis.tags.smoothed.as_tupl())
    variantdict = dict(varianttupls)

    boldfilevariant = (tuple(boldfilefields), tuple(varianttupls))

    assert analysis.name is not None
    workflow = pe.Workflow(name=analysis.name)

    # inputs are the bold file, the mask file and the confounds file
    inputnode = pe.Node(
        niu.IdentityInterface(fields=[
            *boldfilefields, "mask_file", "condition_files", "metadata"
        ]),
        name="inputnode",
    )

    # parse condition files into three (ordered) lists
    parseconditionfile = pe.Node(
        interface=ParseConditionFile(),
        name="parseconditionfile",
    )
    workflow.connect(inputnode, "condition_files", parseconditionfile,
                     "in_any")

    def get_repetition_time(dic):
        return dic.get("RepetitionTime")

    # first level model specification
    modelspec = pe.Node(
        interface=model.SpecifyModel(input_units="secs", ),
        name="modelspec",
    )
    workflow.connect([
        (
            inputnode,
            modelspec,
            [
                ("bold_file", "functional_runs"),
                (("metadata", get_repetition_time), "time_repetition"),
            ],
        ),
        (parseconditionfile, modelspec, [("subject_info", "subject_info")]),
    ])
    if "band_pass_filtered" in variantdict:
        modelspec.inputs.high_pass_filter_cutoff = float(
            analysis.tags.band_pass_filtered.high)
    if "confounds_extract" in variantdict:
        workflow.connect([(inputnode, modelspec,
                           [("confounds_file", "realignment_parameters")])])

    # transform contrasts dictionary to nipype list data structure
    contrasts = [[
        contrast.name,
        contrast.type.upper(), *map(list, zip(*contrast.values.items()))
    ] for contrast in analysis.contrasts]

    # generate design from first level specification
    level1design = pe.Node(
        interface=fsl.Level1Design(
            contrasts=contrasts,
            model_serial_correlations=True,
            bases={"dgamma": {
                "derivs": False
            }},
        ),
        name="level1design",
    )
    workflow.connect([
        (
            inputnode,
            level1design,
            [(("metadata", get_repetition_time), "interscan_interval")],
        ),
        (modelspec, level1design, [("session_info", "session_info")]),
    ])

    # generate required input files for FILMGLS from design
    modelgen = pe.Node(interface=fsl.FEATModel(),
                       name="modelgen",
                       iterfield=["fsf_file", "ev_files"])
    workflow.connect([(
        level1design,
        modelgen,
        [("fsf_files", "fsf_file"), ("ev_files", "ev_files")],
    )])

    # calculate range of image values to determine cutoff value
    # for FILMGLS
    boldfilecutoff = pe.Node(interface=fsl.ImageStats(op_string="-R"),
                             name="boldfilecutoff")
    workflow.connect([(inputnode, boldfilecutoff, [("bold_file", "in_file")])])

    # actually estimate the first level model
    modelestimate = pe.Node(
        interface=fsl.FILMGLS(smooth_autocorr=True, mask_size=5),
        name="modelestimate",
        iterfield=["design_file", "in_file", "tcon_file"],
    )
    workflow.connect([
        (inputnode, modelestimate, [("bold_file", "in_file")]),
        (boldfilecutoff, modelestimate, [(("out_stat", firstfloat),
                                          "threshold")]),
        (
            modelgen,
            modelestimate,
            [("design_file", "design_file"), ("con_file", "tcon_file")],
        ),
    ])

    # make dof volume
    makedofvolume = pe.MapNode(
        interface=MakeDofVolume(),
        iterfield=["dof_file", "cope_file"],
        name="makedofvolume",
    )
    workflow.connect([
        (
            modelestimate,
            makedofvolume,
            [(("copes", first), "cope_file"), ("dof_file", "dof_file")],
        ),
    ])

    outputnode = pe.Node(
        interface=MakeResultdicts(keys=[
            "firstlevelanalysisname",
            "firstlevelfeaturename",
            "cope",
            "varcope",
            "zstat",
            "dof_file",
            "mask_file",
        ]),
        name="outputnode",
    )
    outputnode.inputs.firstlevelanalysisname = analysis.name
    outputnode.inputs.firstlevelfeaturename = list(map(first, contrasts))
    workflow.connect([
        (inputnode, outputnode, [("metadata", "basedict"),
                                 ("mask_file", "mask_file")]),
        (
            modelestimate,
            outputnode,
            [
                (("copes", ravel), "cope"),
                (("varcopes", ravel), "varcope"),
                (("zstats", ravel), "zstat"),
            ],
        ),
        (makedofvolume, outputnode, [("out_file", "dof_file")]),
    ])

    return workflow, (boldfilevariant, )
Пример #16
0
def fsl_run_level_wf(
    model,
    step,
    bids_dir,
    output_dir,
    work_dir,
    subject_id,
    database_path,
    smoothing_fwhm=None,
    smoothing_level=None,
    smoothing_type=None,
    use_rapidart=False,
    detrend_poly=None,
    align_volumes=None,
    smooth_autocorrelations=False,
    despike=False,
    name="fsl_run_level_wf",
):
    """Generate run level workflow for a given model."""
    bids_dir = Path(bids_dir)
    work_dir = Path(work_dir)
    workflow = pe.Workflow(name=name)

    level = step["Level"]

    dimensionality = 3  # Nipype FSL.SUSAN Default
    if smoothing_type == "inp":
        dimensionality = 2

    workflow.__desc__ = ""
    (work_dir / model["Name"]).mkdir(exist_ok=True)

    include_entities = {}
    if "Input" in model:
        if "Include" in model["Input"]:
            include_entities = model["Input"]["Include"]
    include_entities.update({"subject": subject_id})

    getter = pe.Node(
        BIDSGet(
            database_path=database_path,
            fixed_entities=include_entities,
            align_volumes=align_volumes,
        ),
        name="func_select",
    )

    get_info = pe.MapNode(
        GetRunModelInfo(model=step, detrend_poly=detrend_poly),
        iterfield=[
            "metadata_file", "regressor_file", "events_file", "entities"
        ],
        name=f"get_{level}_info",
    )

    despiker = pe.MapNode(
        afni.Despike(outputtype="NIFTI_GZ"),
        iterfield=["in_file"],
        name="despiker",
    )

    realign_runs = pe.MapNode(
        fsl.MCFLIRT(output_type="NIFTI_GZ", interpolation="sinc"),
        iterfield=["in_file", "ref_file"],
        name="func_realign",
    )

    wrangle_volumes = pe.MapNode(
        IdentityInterface(fields=["functional_file"]),
        iterfield=["functional_file"],
        name="wrangle_volumes",
    )

    specify_model = pe.MapNode(
        modelgen.SpecifyModel(high_pass_filter_cutoff=-1.0,
                              input_units="secs"),
        iterfield=["functional_runs", "subject_info", "time_repetition"],
        name=f"model_{level}_specify",
    )

    fit_model = pe.MapNode(
        IdentityInterface(
            fields=[
                "session_info", "interscan_interval", "contrasts",
                "functional_data"
            ],
            mandatory_inputs=True,
        ),
        iterfield=[
            "functional_data", "session_info", "interscan_interval",
            "contrasts"
        ],
        name=f"model_{level}_fit",
    )

    first_level_design = pe.MapNode(
        fsl.Level1Design(
            bases={"dgamma": {
                "derivs": False
            }},
            model_serial_correlations=False,
        ),
        iterfield=["session_info", "interscan_interval", "contrasts"],
        name=f"model_{level}_design",
    )

    generate_model = pe.MapNode(
        fsl.FEATModel(output_type="NIFTI_GZ"),
        iterfield=["fsf_file", "ev_files"],
        name=f"model_{level}_generate",
    )

    estimate_model = pe.MapNode(
        fsl.FILMGLS(
            threshold=0.0,  # smooth_autocorr=True
            output_type="NIFTI_GZ",
            results_dir="results",
            smooth_autocorr=False,
            autocorr_noestimate=True,
        ),
        iterfield=["design_file", "in_file", "tcon_file"],
        name=f"model_{level}_estimate",
    )

    if smooth_autocorrelations:
        first_level_design.inputs.model_serial_correlations = True
        estimate_model.inputs.smooth_autocorr = True
        estimate_model.inputs.autocorr_noestimate = False

    calculate_p = pe.MapNode(
        fsl.ImageMaths(output_type="NIFTI_GZ",
                       op_string="-ztop",
                       suffix="_pval"),
        iterfield=["in_file"],
        name=f"model_{level}_caculate_p",
    )

    image_pattern = ("[sub-{subject}/][ses-{session}/]"
                     "[sub-{subject}_][ses-{session}_]"
                     "task-{task}_[acq-{acquisition}_]"
                     "[rec-{reconstruction}_][run-{run}_]"
                     "[echo-{echo}_][space-{space}_]contrast-{contrast}_"
                     "stat-{stat<effect|variance|z|p|t|F>}_statmap.nii.gz")

    run_rapidart = pe.MapNode(
        ra.ArtifactDetect(
            use_differences=[True, False],
            use_norm=True,
            zintensity_threshold=3,
            norm_threshold=1,
            bound_by_brainmask=True,
            mask_type="file",
            parameter_source="FSL",
        ),
        iterfield=["realignment_parameters", "realigned_files", "mask_file"],
        name="rapidart_run",
    )

    reshape_rapidart = pe.MapNode(
        Function(
            input_names=[
                "run_info", "functional_file", "outlier_file",
                "contrast_entities"
            ],
            output_names=["run_info", "contrast_entities"],
            function=utils.reshape_ra,
        ),
        iterfield=[
            "run_info", "functional_file", "outlier_file", "contrast_entities"
        ],
        name="reshape_rapidart",
    )

    mean_img = pe.MapNode(
        fsl.ImageMaths(output_type="NIFTI_GZ",
                       op_string="-Tmean",
                       suffix="_mean"),
        iterfield=["in_file", "mask_file"],
        name="smooth_susan_avgimg",
    )

    median_img = pe.MapNode(
        fsl.ImageStats(output_type="NIFTI_GZ", op_string="-k %s -p 50"),
        iterfield=["in_file", "mask_file"],
        name="smooth_susan_medimg",
    )

    merge = pe.Node(Merge(2, axis="hstack"), name="smooth_merge")

    run_susan = pe.MapNode(
        fsl.SUSAN(output_type="NIFTI_GZ"),
        iterfield=["in_file", "brightness_threshold", "usans"],
        name="smooth_susan",
    )

    mask_functional = pe.MapNode(ApplyMask(),
                                 iterfield=["in_file", "mask_file"],
                                 name="mask_functional")

    # Exists solely to correct undesirable behavior of FSL
    # that results in loss of constant columns
    correct_matrices = pe.MapNode(
        Function(
            input_names=["design_matrix"],
            output_names=["design_matrix"],
            function=utils.correct_matrix,
        ),
        iterfield=["design_matrix"],
        run_without_submitting=True,
        name=f"correct_{level}_matrices",
    )

    collate = pe.Node(
        MergeAll(
            fields=[
                "effect_maps",
                "variance_maps",
                "zscore_maps",
                "pvalue_maps",
                "tstat_maps",
                "contrast_metadata",
            ],
            check_lengths=True,
        ),
        name=f"collate_{level}",
    )

    collate_outputs = pe.Node(
        CollateWithMetadata(
            fields=[
                "effect_maps", "variance_maps", "zscore_maps", "pvalue_maps",
                "tstat_maps"
            ],
            field_to_metadata_map={
                "effect_maps": {
                    "stat": "effect"
                },
                "variance_maps": {
                    "stat": "variance"
                },
                "zscore_maps": {
                    "stat": "z"
                },
                "pvalue_maps": {
                    "stat": "p"
                },
                "tstat_maps": {
                    "stat": "t"
                },
            },
        ),
        name=f"collate_{level}_outputs",
    )

    plot_matrices = pe.MapNode(
        PlotMatrices(output_dir=output_dir, database_path=database_path),
        iterfield=["mat_file", "con_file", "entities", "run_info"],
        run_without_submitting=True,
        name=f"plot_{level}_matrices",
    )

    ds_contrast_maps = pe.MapNode(
        BIDSDataSink(base_directory=output_dir, path_patterns=image_pattern),
        iterfield=["entities", "in_file"],
        run_without_submitting=True,
        name=f"ds_{level}_contrast_maps",
    )

    wrangle_outputs = pe.Node(
        IdentityInterface(fields=["contrast_metadata", "contrast_maps"]),
        name=f"wrangle_{level}_outputs",
    )

    # Setup connections among nodes
    workflow.connect([(
        getter,
        get_info,
        [
            ("metadata_files", "metadata_file"),
            ("events_files", "events_file"),
            ("regressor_files", "regressor_file"),
            ("entities", "entities"),
        ],
    )])

    if align_volumes and despike:
        workflow.connect([
            (getter, despiker, [("functional_files", "in_file")]),
            (despiker, realign_runs, [("out_file", "in_file")]),
            (getter, realign_runs, [("reference_files", "ref_file")]),
            (
                realign_runs,
                wrangle_volumes,
                [("out_file", "functional_file")],
            ),
        ])
    elif align_volumes and not despike:
        workflow.connect([
            (
                getter,
                realign_runs,
                [("functional_files", "in_file"),
                 ("reference_files", "ref_file")],
            ),
            (
                realign_runs,
                wrangle_volumes,
                [("out_file", "functional_file")],
            ),
        ])
    elif despike:
        workflow.connect([
            (getter, despiker, [("functional_files", "in_file")]),
            (despiker, wrangle_volumes, [("out_file", "functional_file")]),
        ])
    else:
        workflow.connect([(getter, wrangle_volumes, [("functional_files",
                                                      "functional_file")])])

    if use_rapidart:
        workflow.connect([
            (get_info, run_rapidart, [("motion_parameters",
                                       "realignment_parameters")]),
            (getter, run_rapidart, [("mask_files", "mask_file")]),
            (
                wrangle_volumes,
                run_rapidart,
                [("functional_file", "realigned_files")],
            ),
            (
                run_rapidart,
                reshape_rapidart,
                [("outlier_files", "outlier_file")],
            ),
            (
                get_info,
                reshape_rapidart,
                [("run_info", "run_info"),
                 ("contrast_entities", "contrast_entities")],
            ),
            (wrangle_volumes, reshape_rapidart, [("functional_file",
                                                  "functional_file")]),
            (
                reshape_rapidart,
                specify_model,
                [("run_info", "subject_info")],
            ),
            (reshape_rapidart, plot_matrices, [("run_info", "run_info")]),
            (reshape_rapidart, collate, [("contrast_entities",
                                          "contrast_metadata")]),
        ])
    else:
        workflow.connect([
            (get_info, specify_model, [("run_info", "subject_info")]),
            (get_info, plot_matrices, [("run_info", "run_info")]),
            (
                get_info,
                collate,
                [("contrast_entities", "contrast_metadata")],
            ),
        ])

    if smoothing_level == "l1" or smoothing_level == "run":
        run_susan.inputs.fwhm = smoothing_fwhm
        run_susan.inputs.dimension = dimensionality
        estimate_model.inputs.mask_size = smoothing_fwhm
        workflow.connect([
            (wrangle_volumes, mean_img, [("functional_file", "in_file")]),
            (
                wrangle_volumes,
                median_img,
                [("functional_file", "in_file")],
            ),
            (getter, mean_img, [("mask_files", "mask_file")]),
            (getter, median_img, [("mask_files", "mask_file")]),
            (mean_img, merge, [("out_file", "in1")]),
            (median_img, merge, [("out_stat", "in2")]),
            (wrangle_volumes, run_susan, [("functional_file", "in_file")]),
            (
                median_img,
                run_susan,
                [(
                    ("out_stat", utils.get_btthresh),
                    "brightness_threshold",
                )],
            ),
            (merge, run_susan, [(("out", utils.get_usans), "usans")]),
            (getter, mask_functional, [("mask_files", "mask_file")]),
            (run_susan, mask_functional, [("smoothed_file", "in_file")]),
            (
                mask_functional,
                specify_model,
                [("out_file", "functional_runs")],
            ),
            (
                mask_functional,
                fit_model,
                [("out_file", "functional_data")],
            ),
        ])

    else:
        workflow.connect([
            (getter, mask_functional, [("mask_files", "mask_file")]),
            (
                wrangle_volumes,
                mask_functional,
                [("functional_file", "in_file")],
            ),
            (
                mask_functional,
                specify_model,
                [("out_file", "functional_runs")],
            ),
            (
                mask_functional,
                fit_model,
                [("out_file", "functional_data")],
            ),
        ])

    workflow.connect([
        (
            get_info,
            specify_model,
            [("repetition_time", "time_repetition")],
        ),
        (specify_model, fit_model, [("session_info", "session_info")]),
        (
            get_info,
            fit_model,
            [("repetition_time", "interscan_interval"),
             ("run_contrasts", "contrasts")],
        ),
        (
            fit_model,
            first_level_design,
            [
                ("interscan_interval", "interscan_interval"),
                ("session_info", "session_info"),
                ("contrasts", "contrasts"),
            ],
        ),
        (first_level_design, generate_model, [("fsf_files", "fsf_file")]),
        (first_level_design, generate_model, [("ev_files", "ev_files")]),
    ])

    if detrend_poly:
        workflow.connect([
            (
                generate_model,
                correct_matrices,
                [("design_file", "design_matrix")],
            ),
            (
                correct_matrices,
                plot_matrices,
                [("design_matrix", "mat_file")],
            ),
            (
                correct_matrices,
                estimate_model,
                [("design_matrix", "design_file")],
            ),
        ])

    else:
        workflow.connect([
            (generate_model, plot_matrices, [("design_file", "mat_file")]),
            (
                generate_model,
                estimate_model,
                [("design_file", "design_file")],
            ),
        ])

    workflow.connect([
        (getter, plot_matrices, [("entities", "entities")]),
        (generate_model, plot_matrices, [("con_file", "con_file")]),
        (fit_model, estimate_model, [("functional_data", "in_file")]),
        (generate_model, estimate_model, [("con_file", "tcon_file")]),
        (
            estimate_model,
            calculate_p,
            [(("zstats", utils.flatten), "in_file")],
        ),
        (
            estimate_model,
            collate,
            [
                ("copes", "effect_maps"),
                ("varcopes", "variance_maps"),
                ("zstats", "zscore_maps"),
                ("tstats", "tstat_maps"),
            ],
        ),
        (calculate_p, collate, [("out_file", "pvalue_maps")]),
        (
            collate,
            collate_outputs,
            [
                ("effect_maps", "effect_maps"),
                ("variance_maps", "variance_maps"),
                ("zscore_maps", "zscore_maps"),
                ("pvalue_maps", "pvalue_maps"),
                ("tstat_maps", "tstat_maps"),
                ("contrast_metadata", "metadata"),
            ],
        ),
        (
            collate_outputs,
            ds_contrast_maps,
            [("out", "in_file"), ("metadata", "entities")],
        ),
        (
            collate_outputs,
            wrangle_outputs,
            [("metadata", "contrast_metadata"), ("out", "contrast_maps")],
        ),
    ])

    return workflow
Пример #17
0
    input_units='secs',
    high_pass_filter_cutoff=120,
    time_repetition = tr,
), name='l1_spec')

# l1_model creates a first-level model design
l1_model = pe.Node(fsl.Level1Design(
    bases={'dgamma': {'derivs': True}},
    model_serial_correlations=True,
    interscan_interval = tr,
    contrasts=contrasts
    # orthogonalization=orthogonality,
), name='l1_model')

# feat_spec generates an fsf model specification file
feat_spec = pe.Node(fsl.FEATModel(), name='feat_spec')

# feat_fit actually runs FEAT
feat_fit = pe.Node(fsl.FEAT(), name='feat_fit', mem_gb=5)

## instead of FEAT
#modelestimate = pe.MapNode(interface=fsl.FILMGLS(smooth_autocorr=True,
#                                                 mask_size=5,
#                                                 threshold=1000),
#                                                 name='modelestimate',
#                                                 iterfield = ['design_file',
#                                                              'in_file',
#                                                              'tcon_file'])
feat_select = pe.Node(nio.SelectFiles({
    'cope': 'stats/cope*.nii.gz',
    'pe': 'stats/pe[0-9][0-9].nii.gz',
Пример #18
0
def model_fitting(source_img, prepped_img, subject_info, aroma, task, args,
                  mask_file, run_number):
    # Get the necessary parameters
    outputdir = args.outputdir
    fwhm = args.fwhm
    cthresh = args.cthresh
    alpha = args.alpha

    # Make a task directory in the output folder
    if run_number > 0:
        taskdir = os.path.join(outputdir,
                               task + "_run-0" + str(run_number + 1))
    else:
        taskdir = os.path.join(outputdir, task)

    if not os.path.exists(taskdir):
        os.mkdir(taskdir)
    os.mkdir(os.path.join(taskdir, 'stats'))
    os.mkdir(os.path.join(taskdir, 'figs'))

    processed_image = preprocess(aroma, fwhm, prepped_img, mask_file, taskdir,
                                 task)

    task_vs_baseline = [
        task + " vs baseline", 'T', [task, 'baseline'], [1, -1]
    ]  # set up contrasts
    contrasts = [task_vs_baseline]
    """
    Model fitting workflow

    Inputs::
         inputspec.session_info : info generated by modelgen.SpecifyModel
         inputspec.interscan_interval : interscan interval
         inputspec.contrasts : list of contrasts
         inputspec.film_threshold : image threshold for FILM estimation
         inputspec.model_serial_correlations
         inputspec.bases
    Outputs::
         outputspec.copes
         outputspec.varcopes
         outputspec.dof_file
         outputspec.zfiles
         outputspec.parameter_estimates
    """

    modelfit = pe.Workflow(name='modelfit', base_dir=taskdir)
    modelspec = pe.Node(interface=model.SpecifyModel(),
                        name="modelspec")  # generate design info
    inputspec = pe.Node(util.IdentityInterface(fields=[
        'session_info', 'interscan_interval', 'contrasts', 'film_threshold',
        'functional_data', 'bases', 'model_serial_correlations'
    ]),
                        name='inputspec')
    level1design = pe.Node(interface=fsl.Level1Design(), name="level1design")
    modelgen = pe.MapNode(interface=fsl.FEATModel(),
                          name='modelgen',
                          iterfield=['fsf_file', 'ev_files'])
    modelestimate = pe.MapNode(
        interface=fsl.FILMGLS(smooth_autocorr=True, mask_size=5),
        name='modelestimate',
        iterfield=['design_file', 'in_file', 'tcon_file'])
    merge_contrasts = pe.MapNode(interface=util.Merge(2),
                                 name='merge_contrasts',
                                 iterfield=['in1'])
    outputspec = pe.Node(util.IdentityInterface(fields=[
        'copes', 'varcopes', 'dof_file', 'zfiles', 'parameter_estimates'
    ]),
                         name='outputspec')

    modelfit.connect([
        (modelspec, inputspec, [('session_info', 'session_info')]),
        (inputspec, level1design,
         [('interscan_interval', 'interscan_interval'),
          ('session_info', 'session_info'), ('contrasts', 'contrasts'),
          ('bases', 'bases'),
          ('model_serial_correlations', 'model_serial_correlations')]),
        (inputspec, modelestimate, [('film_threshold', 'threshold'),
                                    ('functional_data', 'in_file')]),
        (level1design, modelgen, [('fsf_files', 'fsf_file'),
                                  ('ev_files', 'ev_files')]),
        (modelgen, modelestimate, [('design_file', 'design_file')]),
        (merge_contrasts, outputspec, [('out', 'zfiles')]),
        (modelestimate, outputspec, [('param_estimates',
                                      'parameter_estimates'),
                                     ('dof_file', 'dof_file')]),
    ])

    modelfit.connect([
        (modelgen, modelestimate, [('con_file', 'tcon_file'),
                                   ('fcon_file', 'fcon_file')]),
        (modelestimate, merge_contrasts, [('zstats', 'in1'),
                                          ('zfstats', 'in2')]),
        (modelestimate, outputspec, [('copes', 'copes'),
                                     ('varcopes', 'varcopes')]),
    ])

    # Define inputs to workflow
    modelspec.inputs.functional_runs = processed_image
    inputspec.inputs.functional_data = processed_image
    modelspec.inputs.subject_info = subject_info
    modelspec.inputs.input_units = 'secs'
    modelspec.inputs.time_repetition = source_img.entities['RepetitionTime']
    modelspec.inputs.high_pass_filter_cutoff = 90
    inputspec.inputs.model_serial_correlations = True
    inputspec.inputs.film_threshold = 10.0
    inputspec.inputs.interscan_interval = source_img.entities['RepetitionTime']
    inputspec.inputs.bases = {
        'gamma': {
            'gammasigma': 3,
            'gammadelay': 6,
            'derivs': True
        }
    }
    inputspec.inputs.contrasts = contrasts

    # Run the model-fitting pipeline. Main outputs are a feat directory (w/ functional img) and a design.mat file
    res = modelfit.run()

    # outputs
    output_txt = open(os.path.join(taskdir, task + '_outputs.txt'), 'w')
    print_outputs(output_txt, res)

    # The third node, FILM's, first element (i.e. only element) of its 'zstats' output
    z_img = list(res.nodes)[2].result.outputs.zstats[0]

    # Use False Discovery Rate theory to correct for multiple comparisons
    fdr_thresh_img, fdr_threshold = thresholding.map_threshold(
        stat_img=z_img,
        mask_img=mask_file,
        alpha=alpha,
        height_control='fdr',
        cluster_threshold=cthresh)
    print("Thresholding at FDR corrected threshold of " + str(fdr_threshold))
    fdr_thresh_img_path = os.path.join(taskdir,
                                       task + '_fdr_thresholded_z.nii.gz')
    nibabel.save(fdr_thresh_img, fdr_thresh_img_path)

    # Do a cluster analysis using the FDR corrected threshold on the original z_img
    print("Performing cluster analysis.")
    cl = fsl.Cluster(in_file=z_img, threshold=fdr_threshold)
    cluster_file = os.path.join(taskdir, 'stats', task + "_cluster_stats.txt")
    cluster_analysis(cluster_file, cl)

    # Resample the result image with AFNI
    resample_fdr_thresh_img_path = os.path.join(
        taskdir, task + '_fdr_thresholded_z_resample.nii.gz')
    print("Resampling thresholded image to MNI space")
    resample = afni.Resample(master=template,
                             out_file=resample_fdr_thresh_img_path,
                             in_file=fdr_thresh_img_path)
    resample.run()
    os.remove(fdr_thresh_img_path)

    print("Image to be returned: " + resample_fdr_thresh_img_path)

    return resample_fdr_thresh_img_path
Пример #19
0
def create_first(name='modelfit'):
    """First level task-fMRI modelling workflow
    
    Parameters
    ----------
    name : name of workflow. Default = 'modelfit'
    
    Inputs
    ------
    inputspec.session_info :
    inputspec.interscan_interval :
    inputspec.contrasts :
    inputspec.film_threshold :
    inputspec.functional_data :
    inputspec.bases :
    inputspec.model_serial_correlations :
    
    Outputs
    -------
    outputspec.copes :
    outputspec.varcopes :
    outputspec.dof_file :
    outputspec.pfiles :
    outputspec.parameter_estimates :
    outputspec.zstats :
    outputspec.tstats :
    outputspec.design_image :
    outputspec.design_file :
    outputspec.design_cov :
    
    Returns
    -------
    workflow : first-level workflow
    """
    import nipype.interfaces.fsl as fsl  # fsl
    import nipype.pipeline.engine as pe
    import nipype.interfaces.utility as util
    modelfit = pe.Workflow(name=name)

    inputspec = pe.Node(util.IdentityInterface(fields=[
        'session_info', 'interscan_interval', 'contrasts', 'film_threshold',
        'functional_data', 'bases', 'model_serial_correlations'
    ]),
                        name='inputspec')

    level1design = pe.Node(interface=fsl.Level1Design(),
                           name="create_level1_design")

    modelgen = pe.MapNode(interface=fsl.FEATModel(),
                          name='generate_model',
                          iterfield=['fsf_file', 'ev_files'])

    modelestimate = pe.MapNode(interface=fsl.FILMGLS(smooth_autocorr=True,
                                                     mask_size=5),
                               name='estimate_model',
                               iterfield=['design_file', 'in_file'])

    conestimate = pe.MapNode(interface=fsl.ContrastMgr(),
                             name='estimate_contrast',
                             iterfield=[
                                 'tcon_file', 'param_estimates',
                                 'sigmasquareds', 'corrections', 'dof_file'
                             ])

    ztopval = pe.MapNode(interface=fsl.ImageMaths(op_string='-ztop',
                                                  suffix='_pval'),
                         name='z2pval',
                         iterfield=['in_file'])
    outputspec = pe.Node(util.IdentityInterface(fields=[
        'copes', 'varcopes', 'dof_file', 'pfiles', 'parameter_estimates',
        'zstats', 'tstats', 'design_image', 'design_file', 'design_cov',
        'sigmasquareds'
    ]),
                         name='outputspec')

    # Utility function

    pop_lambda = lambda x: x[0]

    # Setup the connections

    modelfit.connect([
        (inputspec, level1design,
         [('interscan_interval', 'interscan_interval'),
          ('session_info', 'session_info'), ('contrasts', 'contrasts'),
          ('bases', 'bases'),
          ('model_serial_correlations', 'model_serial_correlations')]),
        (inputspec, modelestimate, [('film_threshold', 'threshold'),
                                    ('functional_data', 'in_file')]),
        (level1design, modelgen, [('fsf_files', 'fsf_file'),
                                  ('ev_files', 'ev_files')]),
        (modelgen, modelestimate, [('design_file', 'design_file')]),
        (modelgen, conestimate, [('con_file', 'tcon_file')]),
        (modelestimate, conestimate, [('param_estimates', 'param_estimates'),
                                      ('sigmasquareds', 'sigmasquareds'),
                                      ('corrections', 'corrections'),
                                      ('dof_file', 'dof_file')]),
        (conestimate, ztopval, [(('zstats', pop_lambda), 'in_file')]),
        (ztopval, outputspec, [('out_file', 'pfiles')]),
        (modelestimate, outputspec, [('param_estimates',
                                      'parameter_estimates'),
                                     ('dof_file', 'dof_file'),
                                     ('sigmasquareds', 'sigmasquareds')]),
        (conestimate, outputspec, [('copes', 'copes'),
                                   ('varcopes', 'varcopes'),
                                   ('tstats', 'tstats'), ('zstats', 'zstats')])
    ])
    modelfit.connect(modelgen, 'design_image', outputspec, 'design_image')
    modelfit.connect(modelgen, 'design_file', outputspec, 'design_file')
    modelfit.connect(modelgen, 'design_cov', outputspec, 'design_cov')
    return modelfit
Пример #20
0
NodeHash_6bef320.inputs.high_pass_filter_cutoff = 0
NodeHash_6bef320.inputs.input_units = 'secs'
NodeHash_6bef320.inputs.time_repetition = 2.0

#Generate FEAT specific files
NodeHash_8241250 = pe.Node(interface=fsl.Level1Design(),
                           name='NodeName_8241250')
NodeHash_8241250.inputs.bases = {'dgamma': {'derivs': False}}
NodeHash_8241250.inputs.contrasts = [
    ('con-incon', 'T', ['congruent_correct', 'congruent_correct'], [-1, 1])
]
NodeHash_8241250.inputs.interscan_interval = 2.0
NodeHash_8241250.inputs.model_serial_correlations = True

#Wraps command **feat_model**
NodeHash_8b12580 = pe.Node(interface=fsl.FEATModel(), name='NodeName_8b12580')

#Wraps command **film_gls**
NodeHash_5015c80 = pe.Node(interface=fsl.FILMGLS(), name='NodeName_5015c80')

#Generic datasink module to store structured outputs
NodeHash_8a104d0 = pe.Node(interface=io.DataSink(), name='NodeName_8a104d0')
NodeHash_8a104d0.inputs.base_directory = '/tmp/FIRSTLEVEL'

#Create a workflow to connect all those nodes
analysisflow = nipype.Workflow('MyWorkflow')
analysisflow.connect(NodeHash_3042f20, 'subject_info', NodeHash_6bef320,
                     'subject_info')
analysisflow.connect(NodeHash_32c4e30, 'events', NodeHash_3042f20, 'in_file')
analysisflow.connect(NodeHash_6bef320, 'session_info', NodeHash_8241250,
                     'session_info')
Пример #21
0
def first_level_wf(in_files, output_dir, fwhm=6.0, name='wf_1st_level'):
    workflow = pe.Workflow(name=name)
    datasource = pe.Node(niu.Function(function=_dict_ds,
                                      output_names=DATA_ITEMS),
                         name='datasource')
    datasource.inputs.in_dict = in_files
    datasource.iterables = ('sub', sorted(in_files.keys()))

    # Extract motion parameters from regressors file
    runinfo = pe.Node(niu.Function(input_names=[
        'in_file', 'events_file', 'regressors_file', 'regressors_names'
    ],
                                   function=_bids2nipypeinfo,
                                   output_names=['info', 'realign_file']),
                      name='runinfo')

    # Set the column names to be used from the confounds file
    runinfo.inputs.regressors_names = ['dvars', 'framewise_displacement'] + \
        ['a_comp_cor_%02d' % i for i in range(6)] + ['cosine%02d' % i for i in range(4)]

    # SUSAN smoothing
    susan = create_susan_smooth()
    susan.inputs.inputnode.fwhm = fwhm

    l1_spec = pe.Node(SpecifyModel(parameter_source='FSL',
                                   input_units='secs',
                                   high_pass_filter_cutoff=100),
                      name='l1_spec')

    # l1_model creates a first-level model design
    l1_model = pe.Node(
        fsl.Level1Design(
            bases={'dgamma': {
                'derivs': True
            }},
            model_serial_correlations=True,
            #ENTER YOUR OWN CONTRAST HERE
            contrasts=[],
            # orthogonalization=orthogonality,
        ),
        name='l1_model')

    # feat_spec generates an fsf model specification file
    feat_spec = pe.Node(fsl.FEATModel(), name='feat_spec')
    # feat_fit actually runs FEAT
    feat_fit = pe.Node(fsl.FEAT(), name='feat_fit', mem_gb=12)

    feat_select = pe.Node(nio.SelectFiles({
        'cope': 'stats/cope1.nii.gz',
        'pe': 'stats/pe[0-9][0-9].nii.gz',
        'tstat': 'stats/tstat1.nii.gz',
        'varcope': 'stats/varcope1.nii.gz',
        'zstat': 'stats/zstat1.nii.gz',
    }),
                          name='feat_select')

    ds_cope = pe.Node(DerivativesDataSink(base_directory=str(output_dir),
                                          keep_dtype=False,
                                          suffix='cope',
                                          desc='intask'),
                      name='ds_cope',
                      run_without_submitting=True)

    ds_varcope = pe.Node(DerivativesDataSink(base_directory=str(output_dir),
                                             keep_dtype=False,
                                             suffix='varcope',
                                             desc='intask'),
                         name='ds_varcope',
                         run_without_submitting=True)

    ds_zstat = pe.Node(DerivativesDataSink(base_directory=str(output_dir),
                                           keep_dtype=False,
                                           suffix='zstat',
                                           desc='intask'),
                       name='ds_zstat',
                       run_without_submitting=True)

    ds_tstat = pe.Node(DerivativesDataSink(base_directory=str(output_dir),
                                           keep_dtype=False,
                                           suffix='tstat',
                                           desc='intask'),
                       name='ds_tstat',
                       run_without_submitting=True)

    workflow.connect([
        (datasource, susan, [('bold', 'inputnode.in_files'),
                             ('mask', 'inputnode.mask_file')]),
        (datasource, runinfo, [('events', 'events_file'),
                               ('regressors', 'regressors_file')]),
        (susan, l1_spec, [('outputnode.smoothed_files', 'functional_runs')]),
        (datasource, l1_spec, [('tr', 'time_repetition')]),
        (datasource, l1_model, [('tr', 'interscan_interval')]),
        (datasource, ds_cope, [('bold', 'source_file')]),
        (datasource, ds_varcope, [('bold', 'source_file')]),
        (datasource, ds_zstat, [('bold', 'source_file')]),
        (datasource, ds_tstat, [('bold', 'source_file')]),
        (susan, runinfo, [('outputnode.smoothed_files', 'in_file')]),
        (runinfo, l1_spec, [('info', 'subject_info'),
                            ('realign_file', 'realignment_parameters')]),
        (l1_spec, l1_model, [('session_info', 'session_info')]),
        (l1_model, feat_spec, [('fsf_files', 'fsf_file'),
                               ('ev_files', 'ev_files')]),
        (l1_model, feat_fit, [('fsf_files', 'fsf_file')]),
        (feat_fit, feat_select, [('feat_dir', 'base_directory')]),
        (feat_select, ds_cope, [('cope', 'in_file')]),
        (feat_select, ds_varcope, [('varcope', 'in_file')]),
        (feat_select, ds_zstat, [('zstat', 'in_file')]),
        (feat_select, ds_tstat, [('tstat', 'in_file')]),
    ])
    return workflow
Пример #22
0
def create_modelfit_workflow(name='modelfit'):
    """Create an FSL individual modelfitting workflow

    Example
    -------

    >>> modelfit = create_modelfit_workflow()
    >>> modelfit.base_dir = '.'
    >>> info = dict()
    >>> modelfit.inputs.inputspec.session_info = info
    >>> modelfit.inputs.inputspec.interscan_interval = 3.
    >>> modelfit.inputs.inputspec.film_threshold = 1000
    >>> modelfit.run() #doctest: +SKIP

    Inputs::

         inputspec.session_info : info generated by modelgen.SpecifyModel
         inputspec.interscan_interval : interscan interval
         inputspec.contrasts : list of contrasts
         inputspec.film_threshold : image threshold for FILM estimation

    Outputs::

         outputspec.realignment_parameters : realignment parameter files
         outputspec.smoothed_files : smoothed functional files
         outputspec.outlier_files : list of outliers
         outputspec.outlier_stats : statistics of outliers
         outputspec.outlier_plots : images of outliers
         outputspec.mask_file : binary mask file in reference image space
         outputspec.reg_file : registration file that maps reference image to
                               freesurfer space
         outputspec.reg_cost : cost of registration (useful for detecting
                               misalignment)
    """

    modelfit = pe.Workflow(name=name)

    """
    Create the nodes
    """

    inputspec = pe.Node(util.IdentityInterface(fields=['session_info',
                                                       'interscan_interval',
                                                       'contrasts',
                                                       'film_threshold',
                                                       'functional_data',
                                                       'bases',
                                                       'model_serial_correlations']),
                        name='inputspec')
    level1design = pe.Node(interface=fsl.Level1Design(), name="level1design")
    modelgen = pe.MapNode(interface=fsl.FEATModel(), name='modelgen',
                          iterfield=['fsf_file', 'ev_files'])
    modelestimate = pe.MapNode(interface=fsl.FILMGLS(smooth_autocorr=True,
                                                     mask_size=5),
                               name='modelestimate',
                               iterfield=['design_file', 'in_file'])
    conestimate = pe.MapNode(interface=fsl.ContrastMgr(), name='conestimate',
                             iterfield=['tcon_file', 'param_estimates',
                                        'sigmasquareds', 'corrections',
                                        'dof_file'])
    ztopval = pe.MapNode(interface=fsl.ImageMaths(op_string='-ztop',
                                                  suffix='_pval'),
                         name='ztop',
                         iterfield=['in_file'])
    outputspec = pe.Node(util.IdentityInterface(fields=['copes', 'varcopes',
                                                        'dof_file', 'pfiles',
                                                        'parameter_estimates']),
                         name='outputspec')

    """
    Utility function
    """

    pop_lambda = lambda x: x[0]

    """
    Setup the connections
    """

    modelfit.connect([
        (inputspec, level1design, [('interscan_interval', 'interscan_interval'),
                                   ('session_info', 'session_info'),
                                   ('contrasts', 'contrasts'),
                                   ('bases', 'bases'),
                                   ('model_serial_correlations',
                                    'model_serial_correlations')]),
        (inputspec, modelestimate, [('film_threshold', 'threshold'),
                                    ('functional_data', 'in_file')]),
        (level1design, modelgen, [('fsf_files', 'fsf_file'),
                                ('ev_files', 'ev_files')]),
        (modelgen, modelestimate, [('design_file', 'design_file')]),
        (modelgen, conestimate, [('con_file', 'tcon_file')]),
        (modelestimate, conestimate, [('param_estimates', 'param_estimates'),
                                    ('sigmasquareds', 'sigmasquareds'),
                                    ('corrections', 'corrections'),
                                    ('dof_file', 'dof_file')]),
        (conestimate, ztopval, [(('zstats', pop_lambda), 'in_file')]),
        (ztopval, outputspec, [('out_file', 'pfiles')]),
        (modelestimate, outputspec, [('param_estimates', 'parameter_estimates'),
                                     ('dof_file', 'dof_file')]),
        (conestimate, outputspec, [('copes', 'copes'),
                                   ('varcopes', 'varcopes')]),
        ])
    return modelfit
Пример #23
0
Use :class:`nipype.algorithms.modelgen.SpecifyModel` to generate design information.
"""

modelspec = pe.Node(interface=model.SpecifyModel(), name="modelspec")
"""
Use :class:`nipype.interfaces.fsl.Level1Design` to generate a run specific fsf
file for analysis
"""

level1design = pe.Node(interface=fsl.Level1Design(), name="level1design")
"""
Use :class:`nipype.interfaces.fsl.FEATModel` to generate a run specific mat
file for use by FILMGLS
"""

modelgen = pe.MapNode(interface=fsl.FEATModel(),
                      name='modelgen',
                      iterfield=['fsf_file', 'ev_files'])
"""
Use :class:`nipype.interfaces.fsl.FILMGLS` to estimate a model specified by a
mat file and a functional run
"""

modelestimate = pe.MapNode(interface=fsl.FILMGLS(smooth_autocorr=True,
                                                 mask_size=5,
                                                 threshold=1000),
                           name='modelestimate',
                           iterfield=['design_file', 'in_file'])
"""
Use :class:`nipype.interfaces.fsl.ContrastMgr` to generate contrast estimates
"""
Пример #24
0
def modelfit_fsl(wf_name='modelfit'):
    """

    Fit 1st level GLM using FSL routines

    Usage (TODO)

    modelfit.inputs.inputspec.fwhm = 12
    modelfit.inputs.inputspec.brain_mask = ['/opt/shared2/nipype-test/testblock/example_func_brain_mask.nii.gz', '/opt/shared2/nipype-test/testblock/example_func_brain_mask.nii.gz']

    modelfit.inputs.inputspec.input_units = 'secs'
    modelfit.inputs.inputspec.in_file = ['/opt/shared2/nipype-test/testblock/mc_data_brain.nii.gz', '/opt/shared2/nipype-test/testblock/mc_data_brain.nii.gz']
    modelfit.inputs.inputspec.TR = 2
    modelfit.inputs.inputspec.high_pass_filter_cutoff = 100 #sigma in TR
    modelfit.inputs.inputspec.event_files = ['/opt/shared2/nipype-test/testblock/a']

    cont1 = ['whisker', 'T', ['a', 'a'], [1.0, 0.0]]
    cont2 = ['-whisker', 'T', ['a', 'a'], [-1.0, 0.0]]
    cont3 = ['Task','F', [cont1, cont2]]
    contrasts = [cont1]

    modelfit.inputs.inputspec.contrasts = contrasts #TODO: change condition names

    modelfit.inputs.inputspec.bases_function = {'dgamma': {'derivs':  True}}
    modelfit.inputs.inputspec.model_serial_correlations = True


    #modelfit.write_graph('graph.dot');
    modelfit.write_graph('graph.dot', graph2use='colored');
    x=modelfit.run()
    #x=modelfit.run(plugin='MultiProc', plugin_args={'n_procs': 8})

    server.serve_content(modelfit)
    """

    modelfit = pe.Workflow(name=wf_name)
    """
        Set up a node to define all inputs required for the preprocessing workflow

    """

    inputnode = pe.Node(interface=util.IdentityInterface(
        fields=[
            'in_file', 'ev_file', 'confounders', 'contrasts',
            'high_pass_filter_cutoff', 'fwhm', 'interscan_interval', 'TR',
            'input_units', 'bases_function', 'model_serial_correlations',
            'brain_mask'
        ],
        mandatory_inputs=True),
                        name='inputspec')

    #TODO: eliminate brain mask

    #inputnode.iterables=[('high_pass_filter_cutoff', [30, 60, 90, 120, 500])]
    """
        Set up a node to define outputs for the preprocessing workflow

    """

    outputnode = pe.Node(interface=util.IdentityInterface(
        fields=['zstats', 'zfstats', 'copes', 'varcopes'],
        mandatory_inputs=True),
                         name='outputspec')

    # collect subject info

    getsubjectinfo = pe.MapNode(util.Function(
        input_names=['ev_file', 'confounders'],
        output_names=['subject_info'],
        function=get_subject_info),
                                name='getsubjectinfo',
                                iterfield=['confounders'])

    # nipype.algorithms.modelgen.SpecifyModel to generate design information.

    modelspec = pe.MapNode(interface=model.SpecifyModel(),
                           name="modelspec",
                           iterfield=['subject_info'])

    # smooth #TODO: move into preproc pipeline

    smooth = preproc.create_susan_smooth("smooth")
    #smooth.get_node( "smooth").iterables=[('fwhm', [6., 8., 10., 12., 14., 16.])]

    toSigma = pe.Node(interface=util.Function(
        input_names=['high_pass_filter_cutoff', 'TR'],
        output_names=['high_pass_filter_opstring'],
        function=highpass_operand),
                      name='toSigma')

    highpass = pe.MapNode(interface=fsl.ImageMaths(suffix='_tempfilt',
                                                   op_string=''),
                          iterfield=['in_file'],
                          name='highpass')

    # Use nipype.interfaces.fsl.Level1Design to generate a run specific fsf file for analysis

    level1design = pe.MapNode(interface=fsl.Level1Design(),
                              name="level1design",
                              iterfield='session_info')

    # Use nipype.interfaces.fsl.FEATModel to generate a run specific mat file for use by FILMGLS

    modelgen = pe.MapNode(interface=fsl.FEATModel(),
                          name='modelgen',
                          iterfield=['fsf_file', 'ev_files'])

    # Use nipype.interfaces.fsl.FILMGLS to estimate a model specified by a mat file and a functional run

    modelestimate = pe.MapNode(
        interface=fsl.FILMGLS(smooth_autocorr=True, mask_size=5,
                              threshold=200),
        name='modelestimate',
        #iterfield=['design_file', 'in_file'])
        iterfield=['in_file', 'design_file'])

    # Use nipype.interfaces.fsl.ContrastMgr to generate contrast estimates

    conestimate = pe.MapNode(interface=fsl.ContrastMgr(),
                             name='conestimate',
                             iterfield=[
                                 'param_estimates', 'sigmasquareds',
                                 'corrections', 'dof_file', 'tcon_file'
                             ])

    modelfit.connect([
        (
            inputnode,
            smooth,
            [
                ('in_file', 'inputnode.in_files'),
                ('fwhm', 'inputnode.fwhm'),  # in iterable
                ('brain_mask', 'inputnode.mask_file')
            ]),
        (smooth, highpass, [('outputnode.smoothed_files', 'in_file')]),
        (inputnode, toSigma, [('high_pass_filter_cutoff',
                               'high_pass_filter_cutoff')]),
        (inputnode, toSigma, [('TR', 'TR')]),
        (toSigma, highpass, [('high_pass_filter_opstring', 'op_string')]),
        (inputnode, getsubjectinfo, [('ev_file', 'ev_file'),
                                     ('confounders', 'confounders')]),
        (getsubjectinfo, modelspec, [('subject_info', 'subject_info')]),
        (highpass, modelspec, [('out_file', 'functional_runs')]),
        (highpass, modelestimate, [('out_file', 'in_file')]),
        (inputnode, modelspec, [
            ('input_units', 'input_units'),
            ('TR', 'time_repetition'),
            ('high_pass_filter_cutoff', 'high_pass_filter_cutoff'),
        ]),
        (inputnode, level1design, [('TR', 'interscan_interval'),
                                   ('model_serial_correlations',
                                    'model_serial_correlations'),
                                   ('bases_function', 'bases'),
                                   ('contrasts', 'contrasts')]),
        (modelspec, level1design, [('session_info', 'session_info')]),
        (level1design, modelgen, [('fsf_files', 'fsf_file'),
                                  ('ev_files', 'ev_files')]),
        (modelgen, modelestimate, [('design_file', 'design_file')]),
        (modelgen, conestimate, [('con_file', 'tcon_file')]),
        (modelestimate, conestimate, [('param_estimates', 'param_estimates'),
                                      ('sigmasquareds', 'sigmasquareds'),
                                      ('corrections', 'corrections'),
                                      ('dof_file', 'dof_file')]),
        (conestimate, outputnode, [('zstats', 'zstats'),
                                   ('zfstats', 'zfstats'), ('copes', 'copes'),
                                   ('varcopes', 'varcopes')])
    ])

    return modelfit
Пример #25
0
cont6 = ['Trauma1 > Trauma2', 'T', ['trauma1_0', 'trauma1_1', 'trauma1_2', 'trauma1_3', 'trauma2_0', 'trauma2_1', 'trauma2_2', 'trauma2_3'], [0.25, 0.25, 0.25, 0.25, -0.25, -0.25, -0.25, -0.25 ]]
cont7 = ['Trauma1_01>relax1_01', 'T', ['trauma1_0', 'trauma1_1', 'relax1_0', 'relax1_1'], [0.5,0.5,-0.5,-0.5]]
contrasts = [cont1, cont2, cont3, cont4, cont5, cont6, cont7]


level1design.inputs.interscan_interval = tr
level1design.inputs.bases = {'dgamma': {'derivs': False}}
level1design.inputs.contrasts = contrasts
level1design.inputs.model_serial_correlations = True    
"""
Use :class:`nipype.interfaces.fsl.FEATModel` to generate a run specific mat
file for use by FILMGLS
"""

modelgen = pe.Node(
    interface=fsl.FEATModel(),
    name='modelgen',
    )
"""
Use :class:`nipype.interfaces.fsl.FILMGLS` to estimate a model specified by a
mat file and a functional run
"""
mask =  pe.Node(interface= fsl.maths.ApplyMask(), name = 'mask')


modelestimate = pe.Node(
    interface=fsl.FILMGLS(smooth_autocorr=True, mask_size=5, threshold=1000),
    name='modelestimate',
    )

Пример #26
0
def l1(preprocessing_dir,
	bf_path = '~/ni_data/irfs/chr_beta1.txt',
	debug=False,
	exclude={},
	habituation='confound',
	highpass_sigma=225,
	lowpass_sigma=False,
	include={},
	keep_work=False,
	out_base="",
	mask="",
	match={},
	tr=1,
	workflow_name="generic",
	modality="cbv",
	n_jobs_percentage=1,
	invert=False,
	):
	"""Calculate subject level GLM statistic scores.

	Parameters
	----------

	bf_path : str, optional
		Basis set path. It should point to a text file in the so-called FEAT/FSL "#2" format (1 entry per volume).
	exclude : dict
		A dictionary with any combination of "sessions", "subjects", "tasks" as keys and corresponding identifiers as values.
		If this is specified matching entries will be excluded in the analysis.
	debug : bool, optional
		Whether to enable nipype debug mode.
		This increases logging.
	habituation : {"", "confound", "separate_contrast", "in_main_contrast"}, optional
		How the habituation regressor should be handled.
		Anything which evaluates as False (though we recommend "") means no habituation regressor will be introduced.
	highpass_sigma : int, optional
		Highpass threshold (in seconds).
	include : dict
		A dictionary with any combination of "sessions", "subjects", "tasks" as keys and corresponding identifiers as values.
		If this is specified only matching entries will be included in the analysis.
	invert : bool
		If true the values will be inverted with respect to zero.
		This is commonly used for iron nano-particle Cerebral Blood Volume (CBV) measurements.
	keep_work : bool, optional
		Whether to keep the work directory (containing all the intermediary workflow steps, as managed by nipypye).
		This is useful for debugging and quality control.
	out_base : str, optional
		Path to the directory inside which both the working directory and the output directory will be created.
	mask : str, optional
		Path to the brain mask which shall be used to define the brain volume in the analysis.
		This has to point to an existing NIfTI file containing zero and one values only.
	n_jobs_percentage : float, optional
		Percentage of the cores present on the machine which to maximally use for deploying jobs in parallel.
	tr : int, optional
		Repetition time, in seconds.
	workflow_name : str, optional
		Name of the workflow; this will also be the name of the final output directory produced under `out_dir`.
	"""

	from samri.pipelines.utils import bids_data_selection

	preprocessing_dir = path.abspath(path.expanduser(preprocessing_dir))
	out_base = path.abspath(path.expanduser(out_base))

	data_selection = bids_data_selection(preprocessing_dir, structural_match=False, functional_match=match, subjects=False, sessions=False)
	ind = data_selection.index.tolist()

	out_dir = path.join(out_base,workflow_name)
	workdir_name = workflow_name+'_work'
	workdir = path.join(out_base,workdir_name)
	if not os.path.exists(workdir):
		os.makedirs(workdir)
	data_selection.to_csv(path.join(workdir,'data_selection.csv'))

	get_scan = pe.Node(name='get_scan', interface=util.Function(function=get_bids_scan,input_names=inspect.getargspec(get_bids_scan)[0], output_names=['scan_path','scan_type','task', 'nii_path', 'nii_name', 'events_name', 'subject_session', 'metadata_filename', 'dict_slice']))
	get_scan.inputs.ignore_exception = True
	get_scan.inputs.data_selection = data_selection
	get_scan.inputs.bids_base = preprocessing_dir
	get_scan.iterables = ("ind_type", ind)

	eventfile = pe.Node(name='eventfile', interface=util.Function(function=corresponding_eventfile,input_names=inspect.getargspec(corresponding_eventfile)[0], output_names=['eventfile']))

	if invert:
		invert = pe.Node(interface=fsl.ImageMaths(), name="invert")
		invert.inputs.op_string = '-mul -1'

	specify_model = pe.Node(interface=SpecifyModel(), name="specify_model")
	specify_model.inputs.input_units = 'secs'
	specify_model.inputs.time_repetition = tr
	specify_model.inputs.high_pass_filter_cutoff = highpass_sigma

	level1design = pe.Node(interface=Level1Design(), name="level1design")
	level1design.inputs.interscan_interval = tr
	if bf_path:
		bf_path = path.abspath(path.expanduser(bf_path))
		level1design.inputs.bases = {"custom": {"bfcustompath":bf_path}}
	else:
		# We are not adding derivatives here, as these conflict with the habituation option.
		# !!! This is not difficult to solve, and would only require the addition of an elif condition to the habituator definition, which would add multiple column copies for each of the derivs.
		level1design.inputs.bases = {'gamma': {'derivs':True, 'gammasigma':30, 'gammadelay':10}}
	level1design.inputs.model_serial_correlations = True

	modelgen = pe.Node(interface=fsl.FEATModel(), name='modelgen')
	#modelgen.inputs.ignore_exception = True

	glm = pe.Node(interface=fsl.GLM(), name='glm', iterfield='design')
#	glm.inputs.out_cope = "cope.nii.gz"
#	glm.inputs.out_varcb_name = "varcb.nii.gz"
#	#not setting a betas output file might lead to beta export in lieu of COPEs
#	glm.inputs.out_file = "betas.nii.gz"
#	glm.inputs.out_t_name = "t_stat.nii.gz"
#	glm.inputs.out_p_name = "p_stat.nii.gz"
	if mask == 'mouse':
		mask = '/usr/share/mouse-brain-atlases/dsurqec_200micron_mask.nii'
	else:
		glm.inputs.mask = path.abspath(path.expanduser(mask))
	glm.interface.mem_gb = 6
	#glm.inputs.ignore_exception = True

	out_file_name_base = 'sub-{{subject}}_ses-{{session}}_task-{{task}}_acq-{{acquisition}}_run-{{run}}_{{modality}}_{}.{}'

	betas_filename = pe.Node(name='betas_filename', interface=util.Function(function=bids_dict_to_source,input_names=inspect.getargspec(bids_dict_to_source)[0], output_names=['filename']))
	betas_filename.inputs.source_format = out_file_name_base.format('betas','nii.gz')
	cope_filename = pe.Node(name='cope_filename', interface=util.Function(function=bids_dict_to_source,input_names=inspect.getargspec(bids_dict_to_source)[0], output_names=['filename']))
	cope_filename.inputs.source_format = out_file_name_base.format('cope','nii.gz')
	varcb_filename = pe.Node(name='varcb_filename', interface=util.Function(function=bids_dict_to_source,input_names=inspect.getargspec(bids_dict_to_source)[0], output_names=['filename']))
	varcb_filename.inputs.source_format = out_file_name_base.format('varcb','nii.gz')
	tstat_filename = pe.Node(name='tstat_filename', interface=util.Function(function=bids_dict_to_source,input_names=inspect.getargspec(bids_dict_to_source)[0], output_names=['filename']))
	tstat_filename.inputs.source_format = out_file_name_base.format('tstat','nii.gz')
	zstat_filename = pe.Node(name='zstat_filename', interface=util.Function(function=bids_dict_to_source,input_names=inspect.getargspec(bids_dict_to_source)[0], output_names=['filename']))
	zstat_filename.inputs.source_format = out_file_name_base.format('zstat','nii.gz')
	pstat_filename = pe.Node(name='pstat_filename', interface=util.Function(function=bids_dict_to_source,input_names=inspect.getargspec(bids_dict_to_source)[0], output_names=['filename']))
	pstat_filename.inputs.source_format = out_file_name_base.format('pstat','nii.gz')
	pfstat_filename = pe.Node(name='pfstat_filename', interface=util.Function(function=bids_dict_to_source,input_names=inspect.getargspec(bids_dict_to_source)[0], output_names=['filename']))
	pfstat_filename.inputs.source_format = out_file_name_base.format('pfstat','nii.gz')
	design_filename = pe.Node(name='design', interface=util.Function(function=bids_dict_to_source,input_names=inspect.getargspec(bids_dict_to_source)[0], output_names=['filename']))
	design_filename.inputs.source_format = out_file_name_base.format('design','mat')

	design_rename = pe.Node(interface=util.Rename(), name='design_rename')

	datasink = pe.Node(nio.DataSink(), name='datasink')
	datasink.inputs.base_directory = path.join(out_base,workflow_name)
	datasink.inputs.parameterization = False

	workflow_connections = [
		(get_scan, eventfile, [('nii_path', 'timecourse_file')]),
		(specify_model, level1design, [('session_info', 'session_info')]),
		(level1design, modelgen, [('ev_files', 'ev_files')]),
		(level1design, modelgen, [('fsf_files', 'fsf_file')]),
		(modelgen, glm, [('design_file', 'design')]),
		(modelgen, glm, [('con_file', 'contrasts')]),
		(get_scan, datasink, [(('dict_slice',bids_dict_to_dir), 'container')]),
		(get_scan, betas_filename, [('dict_slice', 'bids_dictionary')]),
		(get_scan, cope_filename, [('dict_slice', 'bids_dictionary')]),
		(get_scan, varcb_filename, [('dict_slice', 'bids_dictionary')]),
		(get_scan, tstat_filename, [('dict_slice', 'bids_dictionary')]),
		(get_scan, zstat_filename, [('dict_slice', 'bids_dictionary')]),
		(get_scan, pstat_filename, [('dict_slice', 'bids_dictionary')]),
		(get_scan, pfstat_filename, [('dict_slice', 'bids_dictionary')]),
		(get_scan, design_filename, [('dict_slice', 'bids_dictionary')]),
		(betas_filename, glm, [('filename', 'out_file')]),
		(cope_filename, glm, [('filename', 'out_cope')]),
		(varcb_filename, glm, [('filename', 'out_varcb_name')]),
		(tstat_filename, glm, [('filename', 'out_t_name')]),
		(zstat_filename, glm, [('filename', 'out_z_name')]),
		(pstat_filename, glm, [('filename', 'out_p_name')]),
		(pfstat_filename, glm, [('filename', 'out_pf_name')]),
		(modelgen, design_rename, [('design_file', 'in_file')]),
		(design_filename, design_rename, [('filename', 'format_string')]),
		(glm, datasink, [('out_pf', '@pfstat')]),
		(glm, datasink, [('out_p', '@pstat')]),
		(glm, datasink, [('out_z', '@zstat')]),
		(glm, datasink, [('out_t', '@tstat')]),
		(glm, datasink, [('out_cope', '@cope')]),
		(glm, datasink, [('out_varcb', '@varcb')]),
		(glm, datasink, [('out_file', '@betas')]),
		(design_rename, datasink, [('out_file', '@design')]),
		]

	if habituation:
		level1design.inputs.orthogonalization = {1: {0:0,1:0,2:0}, 2: {0:1,1:1,2:0}}
		specify_model.inputs.bids_condition_column = 'samri_l1_regressors'
		specify_model.inputs.bids_amplitude_column = 'samri_l1_amplitude'
		add_habituation = pe.Node(name='add_habituation', interface=util.Function(function=eventfile_add_habituation,input_names=inspect.getargspec(eventfile_add_habituation)[0], output_names=['out_file']))
		# Regressor names need to be prefixed with "e" plus a numerator so that Level1Design will be certain to conserve the order.
		add_habituation.inputs.original_stimulation_value='1stim'
		add_habituation.inputs.habituation_value='2habituation'
		workflow_connections.extend([
			(eventfile, add_habituation, [('eventfile', 'in_file')]),
			(add_habituation, specify_model, [('out_file', 'bids_event_file')]),
			])
	if not habituation:
		specify_model.inputs.bids_condition_column = ''
		level1design.inputs.contrasts = [('allStim','T', ['ev0'],[1])]
		workflow_connections.extend([
			(eventfile, specify_model, [('eventfile', 'bids_event_file')]),
			])
	#condition names as defined in eventfile_add_habituation:
	elif habituation=="separate_contrast":
		level1design.inputs.contrasts = [('stim','T', ['1stim','2habituation'],[1,0]),('hab','T', ['1stim','2habituation'],[0,1])]
	elif habituation=="in_main_contrast":
		level1design.inputs.contrasts = [('all','T', ['1stim','2habituation'],[1,1])]
	elif habituation=="confound":
		level1design.inputs.contrasts = [('stim','T', ["1stim", "2habituation"],[1,0])]
	else:
		print(habituation)
		raise ValueError('The value you have provided for the `habituation` parameter, namely "{}", is invalid. Please choose one of: {{None, False,"","confound","in_main_contrast","separate_contrast"}}'.format(habituation))

	if highpass_sigma or lowpass_sigma:
		bandpass = pe.Node(interface=fsl.maths.TemporalFilter(), name="bandpass")
		bandpass.inputs.highpass_sigma = highpass_sigma
		bandpass.interface.mem_gb = 16
		if lowpass_sigma:
			bandpass.inputs.lowpass_sigma = lowpass_sigma
		else:
			bandpass.inputs.lowpass_sigma = tr
		if invert:
			workflow_connections.extend([
				(get_scan, invert, [('nii_path', 'in_file')]),
				(invert, bandpass, [('out_file', 'in_file')]),
				(bandpass, specify_model, [('out_file', 'functional_runs')]),
				(bandpass, glm, [('out_file', 'in_file')]),
				(bandpass, datasink, [('out_file', '@ts_file')]),
				(get_scan, bandpass, [('nii_name', 'out_file')]),
				])
		else:
			workflow_connections.extend([
				(get_scan, bandpass, [('nii_path', 'in_file')]),
				(bandpass, specify_model, [('out_file', 'functional_runs')]),
				(bandpass, glm, [('out_file', 'in_file')]),
				(bandpass, datasink, [('out_file', '@ts_file')]),
				(get_scan, bandpass, [('nii_name', 'out_file')]),
				])
	else:
		if invert:
			workflow_connections.extend([
				(get_scan, invert, [('nii_path', 'in_file')]),
				(invert, specify_model, [('out_file', 'functional_runs')]),
				(invert, glm, [('out_file', 'in_file')]),
				(invert, datasink, [('out_file', '@ts_file')]),
				(get_scan, invert, [('nii_name', 'out_file')]),
				])
		else:
			workflow_connections.extend([
				(get_scan, specify_model, [('nii_path', 'functional_runs')]),
				(get_scan, glm, [('nii_path', 'in_file')]),
				(get_scan, datasink, [('nii_path', '@ts_file')]),
				])


	workflow_config = {'execution': {'crashdump_dir': path.join(out_base,'crashdump'),}}
	if debug:
		workflow_config['logging'] = {
			'workflow_level':'DEBUG',
			'utils_level':'DEBUG',
			'interface_level':'DEBUG',
			'filemanip_level':'DEBUG',
			'log_to_file':'true',
			}

	workflow = pe.Workflow(name=workdir_name)
	workflow.connect(workflow_connections)
	workflow.base_dir = out_base
	workflow.config = workflow_config
	workflow.write_graph(dotfilename=path.join(workflow.base_dir,workdir_name,"graph.dot"), graph2use="hierarchical", format="png")

	n_jobs = max(int(round(mp.cpu_count()*n_jobs_percentage)),2)
	workflow.run(plugin="MultiProc", plugin_args={'n_procs' : n_jobs})
	if not keep_work:
		shutil.rmtree(path.join(out_base,workdir_name))