Ejemplo n.º 1
0
def create_2lvl(name="group"):
    import nipype.interfaces.fsl as fsl
    import nipype.pipeline.engine as pe
    import nipype.interfaces.utility as niu

    wk = pe.Workflow(name=name)

    inputspec = pe.Node(niu.IdentityInterface(fields=['copes','varcopes',
                                                      'template', "contrasts",
                                                      "regressors"]),name='inputspec')

    model = pe.Node(fsl.MultipleRegressDesign(),name='l2model')

    #wk.connect(inputspec,('copes',get_len),model,'num_copes')
    wk.connect(inputspec, 'contrasts', model, "contrasts")
    wk.connect(inputspec, 'regressors', model, "regressors")

    mergecopes = pe.Node(fsl.Merge(dimension='t'),name='merge_copes')
    mergevarcopes = pe.Node(fsl.Merge(dimension='t'),name='merge_varcopes')

    flame = pe.Node(fsl.FLAMEO(run_mode='ols'),name='flameo')
    wk.connect(inputspec,'copes',mergecopes,'in_files')
    wk.connect(inputspec,'varcopes',mergevarcopes,'in_files')
    wk.connect(model,'design_mat',flame,'design_file')
    wk.connect(model,'design_con',flame, 't_con_file')
    wk.connect(mergecopes, 'merged_file', flame, 'cope_file')
    wk.connect(mergevarcopes,'merged_file',flame,'var_cope_file')
    wk.connect(model,'design_grp',flame,'cov_split_file')

    bet = pe.Node(fsl.BET(mask=True,frac=0.3),name="template_brainmask")
    wk.connect(inputspec,'template',bet,'in_file')
    wk.connect(bet,'mask_file',flame,'mask_file')

    outputspec = pe.Node(niu.IdentityInterface(fields=['zstat','tstat','cope',
                                                       'varcope','mrefvars',
                                                       'pes','res4d','mask',
                                                       'tdof','weights','pstat']),
        name='outputspec')

    wk.connect(flame,'copes',outputspec,'cope')
    wk.connect(flame,'var_copes',outputspec,'varcope')
    wk.connect(flame,'mrefvars',outputspec,'mrefvars')
    wk.connect(flame,'pes',outputspec,'pes')
    wk.connect(flame,'res4d',outputspec,'res4d')
    wk.connect(flame,'weights',outputspec,'weights')
    wk.connect(flame,'zstats',outputspec,'zstat')
    wk.connect(flame,'tstats',outputspec,'tstat')
    wk.connect(flame,'tdof',outputspec,'tdof')
    wk.connect(bet,'mask_file',outputspec,'mask')

    ztopval = pe.MapNode(interface=fsl.ImageMaths(op_string='-ztop',
        suffix='_pval'),
        name='z2pval',
        iterfield=['in_file'])

    wk.connect(flame,'zstats',ztopval,'in_file')
    wk.connect(ztopval,'out_file',outputspec,'pstat')

    return wk
def create_2lvl_rand(name="group_randomize", mask=None, iters=5000):
    import nipype.interfaces.fsl as fsl
    import nipype.pipeline.engine as pe
    import nipype.interfaces.utility as niu

    wk = pe.Workflow(name=name)

    inputspec = pe.Node(niu.IdentityInterface(fields=[
        'copes', 'varcopes', 'template', "contrasts", "group", "regressors"
    ]),
                        name='inputspec')

    model = pe.Node(fsl.MultipleRegressDesign(), name='l2model')

    wk.connect(inputspec, 'contrasts', model, "contrasts")
    wk.connect(inputspec, 'regressors', model, "regressors")
    wk.connect(inputspec, 'group', model, 'groups')

    mergecopes = pe.Node(fsl.Merge(dimension='t'), name='merge_copes')

    rand = pe.Node(fsl.Randomise(base_name='TwoSampleT',
                                 raw_stats_imgs=True,
                                 tfce=True,
                                 num_perm=iters),
                   name='randomize')

    wk.connect(inputspec, 'copes', mergecopes, 'in_files')
    wk.connect(model, 'design_mat', rand, 'design_mat')
    wk.connect(model, 'design_con', rand, 'tcon')
    wk.connect(mergecopes, 'merged_file', rand, 'in_file')
    wk.connect(model, 'design_grp', rand, 'x_block_labels')

    if mask == None:
        bet = pe.Node(fsl.BET(mask=True, frac=0.3), name="template_brainmask")
        wk.connect(inputspec, 'template', bet, 'in_file')
        wk.connect(bet, 'mask_file', rand, 'mask')

    else:
        wk.connect(inputspec, 'template', rand, 'mask')

    outputspec = pe.Node(niu.IdentityInterface(fields=[
        'f_corrected_p_files', 'f_p_files', 'fstat_files',
        't_corrected_p_files', 't_p_files', 'tstat_file', 'mask'
    ]),
                         name='outputspec')

    wk.connect(rand, 'f_corrected_p_files', outputspec, 'f_corrected_p_files')
    wk.connect(rand, 'f_p_files', outputspec, 'f_p_files')
    wk.connect(rand, 'fstat_files', outputspec, 'fstat_files')
    wk.connect(rand, 't_corrected_p_files', outputspec, 't_corrected_p_files')
    wk.connect(rand, 't_p_files', outputspec, 't_p_files')
    wk.connect(rand, 'tstat_files', outputspec, 'tstat_file')
    if mask == None:
        wk.connect(bet, 'mask_file', outputspec, 'mask')
    else:
        wk.connect(inputspec, 'template', outputspec, 'mask')
    return wk
Ejemplo n.º 3
0
# Dictionary with regressors

dictReg = {
    'reg1': leftHanded,  # dummy variables for left handed people
    'reg2': rightHanded  # dummy variables for right handed people
}

# Contrasts
cont01 = ['left>right', 'T', ['reg1', 'reg2'], [1, -1]]
cont02 = ['right>left', 'T', ['reg1', 'reg2'], [-1, 1]]
cont03 = ['activation', 'T', ['reg1', 'reg2'], [0.5, 0.5]]

contrastList = [cont01, cont02, cont03]

# Setting up the second level analysis model node
level2design = Node(fsl.MultipleRegressDesign(contrasts=contrastList,
                                              regressors=dictReg),
                    name='level2design')

# Model calculation by FLAMEO
flameo = Node(fsl.FLAMEO(run_mode='fe'), name="flameo")

###########
#
# NODES FOR THE MERGING IMAGES
#
###########
# merging cope files
copemerge = Node(fsl.Merge(dimension='t', in_files=listCopeFiles),
                 name="copemerge")

# merging varcope files
Ejemplo n.º 4
0
		Parkflow_rpe = Workflow(name='workflow')
		Parkflow_rpe.base_dir = sub_rpe_workflow_dir

		# Create nodes

		copemerge = Node(interface=fsl.Merge(
			dimension='t',
			in_files=copes),
			name='copemerge')
		varcopemerge = Node(interface=fsl.Merge(
			dimension='t',
			in_files=varcopes),
			name='varcopemerge')

		multregmodel = Node(interface=fsl.MultipleRegressDesign(
			contrasts=[],
			regressors={}),
			name='multregmodel')

		feedback_tcont = ['group_mean', 'T',['reg1','reg2'],[1,0]]
		rpe_pos_tcont = ['rpe+', 'T',['reg1','reg2'],[0,1]]
		rpe_neg_tcont = ['rpe-', 'T',['reg1','reg2'],[0,-1]]
		
		multregmodel.inputs.contrasts = [feedback_tcont, rpe_pos_tcont, rpe_neg_tcont]
		multregmodel.inputs.regressors = dict(reg1=list(EV_rpe_design_df['feedback']),reg2=list(EV_rpe_design_df['rpe_demeaned']))

		FE=Node(interface=fsl.FLAMEO(
			run_mode='fe',
			mask_file=maskfile),
			name='FE',
			stats_dir=os.path.join(Parkflow_rpe.base_dir,'stats'))
Ejemplo n.º 5
0
def test_FLAME1(tmp_path, wakemandg_hensonrn_downsampled, use_var_cope):
    os.chdir(str(tmp_path))

    # prepare
    data = wakemandg_hensonrn_downsampled

    cope_files = data["stat-effect_statmap"]
    var_cope_files = data["stat-variance_statmap"]
    mask_files = data["mask"]

    subjects = data["subjects"]
    spreadsheet_file = data["spreadsheet"]

    regressors, contrasts, _, _ = group_design(
        subjects=subjects,
        spreadsheet=spreadsheet_file,
        variables=[
            {
                "name": "Sub",
                "type": "id"
            },
            {
                "name": "Age",
                "type": "continuous"
            },
            {
                "name": "ReactionTime",
                "type": "categorical"
            },
        ],
        contrasts=[
            {
                "variable": ["Age"],
                "type": "infer"
            },
            {
                "variable": ["ReactionTime"],
                "type": "infer"
            },
        ],
    )

    # run FSL
    merge_cope_file = _merge(cope_files, "t")
    merge_var_cope_file = _merge(var_cope_files, "t")
    merge_mask_file = _merge_mask(mask_files)

    workflow = pe.Workflow("comparison", base_dir=str(tmp_path))

    demeaned_regressors = OrderedDict()  # need to manually demean here
    for variable_name, values in regressors.items():
        if variable_name.lower() != "intercept":
            values = (np.array(values) - np.nanmean(values)).tolist()
        demeaned_regressors[variable_name] = values

    multipleregressdesign = pe.Node(
        fsl.MultipleRegressDesign(
            regressors=demeaned_regressors,
            contrasts=contrasts,
        ),
        name="multipleregressdesign",
    )

    flameo = pe.Node(
        FSLFLAMEO(
            run_mode="flame1",
            cope_file=merge_cope_file,
            mask_file=merge_mask_file,
        ),
        name="flameo",
    )

    if use_var_cope:
        flameo.inputs.var_cope_file = merge_var_cope_file

    workflow.connect(multipleregressdesign, "design_mat", flameo,
                     "design_file")
    workflow.connect(multipleregressdesign, "design_con", flameo, "t_con_file")
    workflow.connect(multipleregressdesign, "design_fts", flameo, "f_con_file")
    workflow.connect(multipleregressdesign, "design_grp", flameo,
                     "cov_split_file")

    execgraph = workflow.run()

    # retrieve flameo again
    for node in execgraph.nodes():
        if node.name == "flameo":
            flameo = node

    result = flameo.result

    r0 = dict(
        cope=result.outputs.copes[0],
        var_cope=result.outputs.var_copes[0],
        tstat=result.outputs.tstats[0],
        fstat=result.outputs.fstats,
        tdof=result.outputs.tdof[0],
    )

    # run halfpipe
    if use_var_cope:
        var_cope_files_or_none = var_cope_files
    else:
        var_cope_files_or_none = None

    result = fit(
        cope_files=cope_files,
        var_cope_files=var_cope_files_or_none,
        mask_files=mask_files,
        regressors=regressors,
        contrasts=contrasts,
        algorithms_to_run=["flame1"],
        num_threads=1,
    )

    r1 = dict(
        cope=result["copes"][0],
        var_cope=result["var_copes"][0],
        tstat=result["tstats"][0],
        fstat=result["fstats"][2],
        tdof=result["dof"][0],
    )

    # compare
    mask = nib.load(merge_mask_file).get_fdata() > 0

    for k in set(r0.keys()) & set(r1.keys()):
        a0 = nib.load(r0[k]).get_fdata()[mask]
        a1 = nib.load(r1[k]).get_fdata()[mask]

        # weak criteria, determined post-hoc
        # we don't expect exactly identical results, because FSL and numpy
        # use different numerics code and we use double precision while FSL
        # uses single precision floating point
        # so these assertions are here to verify that the small differences
        # will not get any larger with future changes or optimizations

        # max difference of one percent
        assert (float(np.isclose(a0, a1, rtol=1e-2).mean()) >
                0.995), f"Too many diverging voxels for {k}"

        if k not in frozenset(["var_cope"]):
            assert np.all(
                np.abs(a0 - a1)[np.logical_not(np.isclose(a0, a1, rtol=1e-2))]
                < 25), f"Difference in diverging voxels is too big for {k}"

            # mean error average needs to be below 0.05
            assert (float(np.abs(a0 - a1).mean()) <
                    5e-2), f"Too high mean error average for {k}"
Ejemplo n.º 6
0
def modelfit_2ndlevel(
        wf_name='2nd_level_modelfit',
        method='flameo',
        standardize=True):  #TODO: standardization in sepatae workflow!

    #method is one of 'flameo' or 'palm' or 'randomise' or 'randomise_parallel'

    model = pe.Workflow(name=wf_name)
    """
        Set up a node to define all inputs required for the preprocessing workflow

    """

    inputnode = pe.Node(
        interface=util.IdentityInterface(
            fields=[
                'copes', 'varcopes', 'func2anat_mat', 'std_brain',
                'anat_to_std_warp', 'std_brain_mask', 'regressors',
                'contrasts', 'groups'
            ],  # TODO: groups!!
            mandatory_inputs=True),
        name='inputspec')
    """
        Set up a node to define outputs for the preprocessing workflow

    """

    outputnode = pe.Node(interface=util.IdentityInterface(
        fields=['zstats'], mandatory_inputs=True),
                         name='outputspec')

    ###################################################################################################
    # merge copes
    copemerge = pe.Node(interface=fsl.Merge(dimension='t'), name="copemerge")

    # standardize copes and varcopes
    if (standardize):

        applyWarpCope = pe.MapNode(
            interface=fsl.ApplyWarp(interp='sinc'),
            name="warp_cope",
            iterfield=['in_file', 'field_file', 'premat'])

        model.connect(inputnode, 'func2anat_mat', applyWarpCope, 'premat')
        model.connect(inputnode, 'copes', applyWarpCope, 'in_file')
        model.connect(inputnode, 'std_brain', applyWarpCope, 'ref_file')
        model.connect(inputnode, 'anat_to_std_warp', applyWarpCope,
                      'field_file')
        model.connect(applyWarpCope, 'out_file', copemerge, 'in_files')
    else:
        model.connect(inputnode, 'copes', copemerge, 'in_files')

    if (method == 'flameo'):  # same for varcopes if flameo

        varcopemerge = pe.Node(interface=fsl.Merge(dimension='t'),
                               name="varcopemerge")

        if (standardize):
            applyWarpVarcope = pe.MapNode(
                interface=fsl.ApplyWarp(interp='sinc'),
                name="warp_varcope",
                iterfield=['in_file', 'field_file', premat])

            model.connect(inputnode, 'func2anat_mat', applyWarpVarcope,
                          'premat')
            model.connect(inputnode, 'varcopes', applyWarpVarcope, 'in_file')
            model.connect(inputnode, 'std_brain', applyWarpVarcope, 'ref_file')
            model.connect(inputnode, 'anat_to_std_warp', applyWarpVarcope,
                          'field_file')

            model.connect(applyWarpVarcope, 'out_file', varcopemerge,
                          'in_files')
        else:
            model.connect(inputnode, 'varcopes', varcopemerge, 'in_files')

    #level2model = pe.Node(interface=fsl.L2Model(num_copes=35),
    #                     name='l2model')

    level2model = pe.Node(interface=fsl.MultipleRegressDesign(), name='design')

    model.connect(inputnode, 'regressors', level2model, 'regressors')
    model.connect(inputnode, 'contrasts', level2model, 'contrasts')
    model.connect(inputnode, 'groups', level2model, 'groups')

    if (method == 'flameo'):
        flameo = pe.Node(interface=fsl.FLAMEO(run_mode='fe'), name="flameo")

        model.connect([
            (inputnode, flameo, [('std_brain_mask', 'mask_file')]),
            (copemerge, flameo, [('merged_file', 'cope_file')]),
            (varcopemerge, flameo, [('merged_file', 'var_cope_file')]),
            (level2model, flameo, [('design_mat', 'design_file'),
                                   ('design_con', 't_con_file'),
                                   ('design_grp', 'cov_split_file')]),
            (flameo, outputnode, [('zstats', 'zstats')])
        ])
    elif (method == 'palm'):
        palm = pe.Node(util.Function(input_names=[
            'cope_file', 'design_file', 'contrast_file', 'group_file',
            'mask_file', 'cluster_threshold'
        ],
                                     output_names=['palm_outputs'],
                                     function=run_palm),
                       name='palm')

        model.connect([(inputnode, palm, [('std_brain_mask', 'mask_file')]),
                       (copemerge, palm, [('merged_file', 'cope_file')]),
                       (level2model, palm, [('design_mat', 'design_file'),
                                            ('design_con', 'contrast_file'),
                                            ('design_grp', 'group_file')]),
                       (palm, outputnode, [('palm_outputs', 'zstats')])])
        palm.inputs.cluster_threshold = 2.3  #TODO: make parametrizable
        palm.plugin_args = {
            'sbatch_args': '-p om_all_nodes -N1 -c2 --mem=10G',
            'overwrite': True
        }
    elif (method == 'randomise'):
        rand = pe.Node(util.Function(input_names=[
            'cope_file', 'design_file', 'contrast_file', 'group_file',
            'mask_file', 'cluster_threshold', 'n'
        ],
                                     output_names=['palm_outputs'],
                                     function=run_rand),
                       name='randomise')

        model.connect([(inputnode, rand, [('std_brain_mask', 'mask_file')]),
                       (copemerge, rand, [('merged_file', 'cope_file')]),
                       (level2model, rand, [('design_mat', 'design_file'),
                                            ('design_con', 'contrast_file'),
                                            ('design_grp', 'group_file')]),
                       (rand, outputnode, [('palm_outputs', 'zstats')])])
        rand.inputs.cluster_threshold = 2.3  #TODO: make parametrizable
        rand.inputs.n = 1000
        #rand.plugin_args = {'sbatch_args': '-p om_all_nodes -N1 -c2 --mem=10G', 'overwrite': True}
    elif (method == 'randomise_parallel'):
        rand = pe.Node(util.Function(input_names=[
            'cope_file', 'design_file', 'contrast_file', 'group_file',
            'mask_file', 'cluster_threshold', 'n'
        ],
                                     output_names=['palm_outputs'],
                                     function=run_rand_par),
                       name='randomise')

        model.connect([(inputnode, rand, [('std_brain_mask', 'mask_file')]),
                       (copemerge, rand, [('merged_file', 'cope_file')]),
                       (level2model, rand, [('design_mat', 'design_file'),
                                            ('design_con', 'contrast_file'),
                                            ('design_grp', 'group_file')]),
                       (rand, outputnode, [('palm_outputs', 'zstats')])])
        rand.inputs.cluster_threshold = 2.3  # TODO: make parametrizable
        rand.inputs.n = 1000
        #rand.plugin_args = {'sbatch_args': '-p om_all_nodes -N1 -c2 --mem=10G', 'overwrite': True}

    else:
        print('Error: No such 2nd-level statistical model method: ' + method)

    return model
Ejemplo n.º 7
0
def create_volume_mixedfx_workflow(name="volume_group",
                                   subject_list=None,
                                   regressors=None,
                                   contrasts=None,
                                   exp_info=None):

    # Handle default arguments
    if subject_list is None:
        subject_list = []
    if regressors is None:
        regressors = dict(group_mean=[])
    if contrasts is None:
        contrasts = [["group_mean", "T", ["group_mean"], [1]]]
    if exp_info is None:
        exp_info = lyman.default_experiment_parameters()

    # Define workflow inputs
    inputnode = Node(
        IdentityInterface(["l1_contrast", "copes", "varcopes", "dofs"]),
        "inputnode")

    # Merge the fixed effect summary images into one 4D image
    merge = Node(MergeAcrossSubjects(regressors=regressors), "merge")

    # Make a simple design
    design = Node(fsl.MultipleRegressDesign(contrasts=contrasts), "design")

    # Fit the mixed effects model
    flameo = Node(fsl.FLAMEO(run_mode=exp_info["flame_mode"]), "flameo")

    # Estimate the smoothness of the data
    smoothest = Node(fsl.SmoothEstimate(), "smoothest")

    # Correct for multiple comparisons
    cluster = Node(
        fsl.Cluster(threshold=exp_info["cluster_zthresh"],
                    pthreshold=exp_info["grf_pthresh"],
                    out_threshold_file=True,
                    out_index_file=True,
                    out_localmax_txt_file=True,
                    peak_distance=exp_info["peak_distance"],
                    use_mm=True), "cluster")

    # Project the mask and thresholded zstat onto the surface
    surfproj = create_surface_projection_workflow(exp_info=exp_info)

    # Segment the z stat image with a watershed algorithm
    watershed = Node(Watershed(), "watershed")

    # Make static report images in the volume
    report = Node(MFXReport(), "report")
    report.inputs.subjects = subject_list

    # Save the experiment info
    saveparams = Node(SaveParameters(exp_info=exp_info), "saveparams")

    # Define the workflow outputs
    outputnode = Node(
        IdentityInterface([
            "copes", "varcopes", "mask_file", "flameo_stats", "thresh_zstat",
            "surf_zstat", "surf_mask", "cluster_image", "seg_file",
            "peak_file", "lut_file", "report", "json_file"
        ]), "outputnode")

    # Define and connect up the workflow
    group = Workflow(name)
    group.connect([
        (inputnode, merge, [("copes", "cope_files"),
                            ("varcopes", "varcope_files"),
                            ("dofs", "dof_files")]),
        (inputnode, saveparams, [("copes", "in_file")]),
        (merge, flameo, [("cope_file", "cope_file"),
                         ("varcope_file", "var_cope_file"),
                         ("dof_file", "dof_var_cope_file"),
                         ("mask_file", "mask_file")]),
        (merge, design, [("regressors", "regressors")]),
        (design, flameo, [("design_con", "t_con_file"),
                          ("design_grp", "cov_split_file"),
                          ("design_mat", "design_file")]),
        (flameo, smoothest, [("zstats", "zstat_file")]),
        (merge, smoothest, [("mask_file", "mask_file")]),
        (smoothest, cluster, [("dlh", "dlh"), ("volume", "volume")]),
        (flameo, cluster, [("zstats", "in_file")]),
        (cluster, watershed, [("threshold_file", "zstat_file"),
                              ("localmax_txt_file", "localmax_file")]),
        (merge, report, [("mask_file", "mask_file"),
                         ("cope_file", "cope_file")]),
        (flameo, report, [("zstats", "zstat_file")]),
        (cluster, report, [("threshold_file", "zstat_thresh_file"),
                           ("localmax_txt_file", "localmax_file")]),
        (watershed, report, [("seg_file", "seg_file")]),
        (merge, surfproj, [("mask_file", "inputs.mask_file")]),
        (cluster, surfproj, [("threshold_file", "inputs.zstat_file")]),
        (merge, outputnode, [("cope_file", "copes"),
                             ("varcope_file", "varcopes"),
                             ("mask_file", "mask_file")]),
        (flameo, outputnode, [("stats_dir", "flameo_stats")]),
        (cluster, outputnode, [("threshold_file", "thresh_zstat"),
                               ("index_file", "cluster_image")]),
        (watershed, outputnode, [("seg_file", "seg_file"),
                                 ("peak_file", "peak_file"),
                                 ("lut_file", "lut_file")]),
        (surfproj, outputnode, [("outputs.surf_zstat", "surf_zstat"),
                                ("outputs.surf_mask", "surf_mask")]),
        (report, outputnode, [("out_files", "report")]),
        (saveparams, outputnode, [("json_file", "json_file")]),
    ])

    return group, inputnode, outputnode
Ejemplo n.º 8
0
def init_higherlevel_wf(run_mode="flame1", name="higherlevel",
                        subjects=None, covariates=None,
                        subject_groups=None, group_contrasts=None,
                        outname=None, workdir=None, task=None):
    """

    :param run_mode: mode argument passed to FSL FLAMEO (Default value = "flame1")
    :param name: workflow name (Default value = "higherlevel")
    :param subjects: list of subject names (Default value = None)
    :param covariates: two-level dictionary of covariates by name and subject (Default value = None)
    :param subject_groups: dictionary of subjects by group (Default value = None)
    :param group_contrasts: two-level dictionary of contrasts by contrast name and values by group (Default = None)
    :param outname: names of inputs for higherlevel workflow, names of outputs from firstlevel workflow

    """
    workflow = pe.Workflow(name=name)

    inputnode = pe.Node(
        interface=niu.IdentityInterface(
            fields=["imgs", "varcopes", "dof_files", "mask_files"]
        ),
        name="inputnode"
    )

    outputnode = pe.Node(
        interface=niu.IdentityInterface(
            fields=["imgs", "varcopes", "zstats", "dof_files", "mask_file"]
        ),
        name="outputnode"
    )

    # merge all input nii image files to one big nii file
    maskmerge = pe.Node(
        interface=fsl.Merge(dimension="t"),
        name="maskmerge"
    )
    # calculate the intersection of all masks
    maskagg = pe.Node(
        interface=fsl.ImageMaths(
            op_string="-Tmin -thr 1 -bin"
        ),
        name="maskagg"
    )

    # merge all input nii image files to one big nii file
    imgmerge = pe.Node(
        interface=fsl.Merge(dimension="t"),
        name="imgmerge"
    )

    # we get a text dof_file, but need to transform it to an nii image
    gendofimage = pe.MapNode(
        interface=fsl.ImageMaths(),
        iterfield=["in_file", "op_string"],
        name="gendofimage"
    )

    # merge all input nii image files to one big nii file
    varcopemerge = pe.Node(
        interface=fsl.Merge(dimension="t"),
        name="varcopemerge"
    )

    # merge all generated nii image files to one big nii file
    dofmerge = pe.Node(
        interface=fsl.Merge(dimension="t"),
        name="dofmerge"
    )

    # specify statistical analysis

    # Read qcresults.json and exclude bad subjects from statistics
    excluded_overview = get_qualitycheck_exclude(workdir)
    excluded_subjects = []
    if excluded_overview:
        df_exclude = pd.DataFrame(excluded_overview).transpose()
        excluded_subjects = df_exclude.loc[df_exclude[task] == True].index
        trimmed_subjects = list(subjects)
        for excluded_subject in excluded_subjects:
            trimmed_subjects.remove(excluded_subject)

        # save json file in workdir with list for included subjects if subjects were excluded due to qualitycheck
        # use of sets here for easy substraction of subjects
        included_subjects = list(set(subjects) - set(excluded_subjects))
        df_included_subjects = pd.DataFrame(included_subjects, columns=['Subjects'])
        df_included_subjects = df_included_subjects.sort_values(by=['Subjects'])  # sort by name
        df_included_subjects = df_included_subjects.reset_index(drop=True)  # reindex for ascending numbers
        json_path = workdir + '/included_subjects.json'
        df_included_subjects.to_json(json_path)
        with open(json_path, 'w') as json_file:
            # json is loaded from pandas to json and then dumped to get indent in file
            json.dump(json.loads(df_included_subjects.to_json()), json_file, indent=4)
    else:
        trimmed_subjects = subjects # in case there are no excluded subjects

    # option 1: one-sample t-test
    contrasts = [["mean", "T", ["intercept"], [1]]]
    level2model = pe.Node(
        interface=fsl.MultipleRegressDesign(
            regressors={"intercept": [1.0 for s in trimmed_subjects]},
            contrasts=contrasts
        ),
        name="l2model"
    )

    if covariates is not None:

        # Transform covariates dict to pandas dataframe
        df_covariates = pd.DataFrame(covariates)
        if list(excluded_subjects):
            # Read qcresults.json and exclude bad subjects from covariates and subject_groups
            df_covariates = df_covariates.drop(excluded_subjects)

            for excluded_subject in excluded_subjects:
                subject_groups.pop(excluded_subject, None)

        for covariate in df_covariates:
            # Demean covariates for flameo
            df_covariates[covariate] = df_covariates[covariate] - df_covariates[covariate].mean()
        # transform reduced covariates back to dict for later purposes
        covariates = df_covariates.to_dict()

        # add SubjectGroups and ID to header
        df_subject_group = pd.DataFrame.from_dict(subject_groups, orient='index', columns=['SubjectGroup'])
        df_covariates = pd.concat([df_subject_group, df_covariates], axis=1, sort=True)
        df_covariates = df_covariates.reset_index()  # add id column
        df_covariates = df_covariates.rename(columns={'index': 'Subject_ID'})  # rename subject column

        # save demeaned covariates to csv
        df_covariates.to_csv(workdir + '/demeaned_covariates.csv')

        # transform to dictionary of lists
        regressors = {k: [float(v[s]) for s in trimmed_subjects] for k, v in covariates.items()}
        if (subject_groups is None) or (bool(subject_groups) is False):
            # one-sample t-test with covariates
            regressors["intercept"] = [1.0 for s in trimmed_subjects]
            level2model = pe.Node(
                interface=fsl.MultipleRegressDesign(
                    regressors=regressors,
                    contrasts=contrasts
                ),
                name="l2model"
            )
        else:
            # two-sample t-tests with covariates

            # dummy coding of variables: group names --> numbers in the matrix
            # see fsl feat documentation
            # https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FEAT/UserGuide#Tripled_Two-Group_Difference_.28.22Tripled.22_T-Test.29
            dummies = pd.Series(subject_groups).str.get_dummies().to_dict()
            # transform to dictionary of lists
            dummies = {k: [float(v[s]) for s in trimmed_subjects] for k, v in dummies.items()}
            regressors.update(dummies)

            # transform to dictionary of lists
            contrasts = [[k, "T"] + list(map(list, zip(*v.items()))) for k, v in group_contrasts.items()]

            level2model = pe.Node(
                interface=fsl.MultipleRegressDesign(
                    regressors=regressors,
                    contrasts=contrasts
                ),
                name="l2model"
            )

    contrast_names = [c[0] for c in contrasts]

    # actuallt run FSL FLAME

    if outname not in ["reho", "alff"]:
        flameo = pe.MapNode(
            interface=fsl.FLAMEO(
                run_mode=run_mode
            ),
            name="flameo",
            iterfield=["cope_file", "var_cope_file"]
        )
    else:
        flameo = pe.MapNode(
            interface=fsl.FLAMEO(
                run_mode=run_mode
            ),
            name="flameo",
            iterfield=["cope_file"]
        )

    workflow.connect([
        (inputnode, imgmerge, [
            ("imgs", "in_files")
        ]),

        (inputnode, maskmerge, [
            ("mask_files", "in_files")
        ]),
        (maskmerge, maskagg, [
            ("merged_file", "in_file")
        ]),
    ])
    if outname not in ["reho", "alff"]:
        workflow.connect([
            (inputnode, gendofimage, [
                ("imgs", "in_file"),
                (("dof_files", gen_merge_op_str), "op_string")
            ]),

            (inputnode, varcopemerge, [
                ("varcopes", "in_files")
            ]),

            (gendofimage, dofmerge, [
                ("out_file", "in_files")
            ])])

    workflow.connect([
        (imgmerge, flameo, [
            ("merged_file", "cope_file")
        ])])

    if outname not in ["reho", "alff"]:
        workflow.connect([
            (varcopemerge, flameo, [
                ("merged_file", "var_cope_file")
            ]),
            (dofmerge, flameo, [
                ("merged_file", "dof_var_cope_file")
            ])])

    workflow.connect(([
        (level2model, flameo, [
            ("design_mat", "design_file"),
            ("design_con", "t_con_file"),
            ("design_grp", "cov_split_file")
        ]),

        (flameo, outputnode, [
            (("copes", flatten), "imgs"),
            (("var_copes", flatten), "varcopes"),
            (("zstats", flatten), "zstats"),
            (("tdof", flatten), "dof_files")
        ]),
        (maskagg, flameo, [
            ("out_file", "mask_file")
        ]),
        (maskagg, outputnode, [
            ("out_file", "mask_file")
        ]),
    ]))

    return workflow, contrast_names
Ejemplo n.º 9
0
        #               "age_sex": {"variables": ["age", "male", "female"],
        #                            "contrasts": [("pos_age", 'T', ["age"], [1]),
        #                                          ("neg_age", 'T', ["age"], [-1]),
        #                                          ("male_higher_than_female", 'T', ["male", "female"], [1, -1]),
        #                                          ("female_higher_than_male", 'T', ["male", "female"], [-1, 1]),
        #                                         ]},
        "first_sum": {
            "variables": ["firstSum", "age", "male", "female"],
            "contrasts": [
                ("pos_firstSum", 'T', ["firstSum"], [1]),
                ("neg_firstSum", 'T', ["firstSum"], [-1]),
            ]
        }
    }
    for name, model in models.iteritems():
        model_node = pe.Node(fsl.MultipleRegressDesign(),
                             name="%s_model" % name)
        regressors = {}
        for reg in model["variables"]:
            regressors[reg] = list(regressors_df[reg])
        model_node.inputs.regressors = regressors
        model_node.inputs.contrasts = model["contrasts"]
        model_nodes[name] = model_node

#     first_part_model_node = pe.Node(fsl.MultipleRegressDesign(), name="first_part_model")
#     regressors = {}
#     for reg in confounds + ["past", "future", "positive", "negative", "friends"]:
#         regressors[reg] = list(regressors_df[reg])
#     past = ("past", 'T', ["past"], [1])
#     future = ("future", 'T', ["future"], [1])
#     past_vs_future = ("past_vs_future", 'T', ["past", "future"], [1, -1])
def create_run_flow(name='run_flow'):
    """custom made fixed_effects_workflow for investigating run effects
    
    Inputs:

         inputspec.copes : list of list of cope files (one list per contrast)
         inputspec.varcopes : list of list of varcope files (one list per
                              contrast)
         inputspec.dof_files : degrees of freedom files for each run

    Outputs:

         outputspec.res4d : 4d residual time series
         outputspec.copes : contrast parameter estimates
         outputspec.varcopes : variance of contrast parameter estimates
         outputspec.zstats : z statistics of contrasts
         outputspec.tstats : t statistics of contrasts
    """
    from nipype.interfaces.utility import Function
    from nipype.interfaces import fsl
    from nipype import Node, MapNode, Workflow
    from nipype.interfaces.utility import IdentityInterface

    """
    Instantiate Workflow
    """
    runmodel_dir = '/home/data_oli/run-groups/'
    run_flow = Workflow(name=name)
    inputspec = Node(IdentityInterface(fields=['copes',
                                               'varcopes',
                                               'dof_files'
                                               ]),
                     name='inputspec')

    """
    Merge the copes and varcopes for each condition
    """

    copemerge = MapNode(interface=fsl.Merge(dimension='t'),
                           iterfield=['in_files'],
                           name="copemerge")

    varcopemerge = MapNode(interface=fsl.Merge(dimension='t'),
                              iterfield=['in_files'],
                              name="varcopemerge")

    """
    Oli wrote this function to read the EVs / contrasts for the run model from
    a text file and bring them into shape for level2model
    """

    def get_run_contrast(con_file, ev_file):
        """
        Read the files containing regressor values and contrasts for
        2nd level analysis. Returns them in a shape that is accepted by
        'fsl.MultipleRegressDesign()'.
        
        Parameters
        ----------
        con_file:   file
            text file containing the 2nd lvl contrasts. Each row in file is a
            contrast. 
        ev_file:    file
            text file containing regressor values. Header will be ignored.
            First column represents input name (here: run number). Further
            columns represent regressor values. Columns seperated by tabs.
        Returns
        -------
        evdict:     dict
            containing 2nd lvl regressors
        runtrast:   list
            containing 2nd lvl contrasts. 
        """

        # create regressor dict
        with open(ev_file, 'rt') as f:
            evlines = [line.split() for line in f.readlines()]
        evnames = evlines[0][1:]
        evweights = [list(map(float, i[1:])) for i in evlines[1:]]
        evdict = dict()
        for name in evnames:
            evdict[name] = ([i[evnames.index(name)] for i in evweights])

        # create contrast list
        # TODO: this works with simple main effects. Should be made more flexible later on.
        with open(con_file, 'rt') as f:
            conlines = [i.split() for i in f.readlines()]

        runtrast = []

        for conline in conlines:
            if conline[0] == '#':
                continue
            # if contrast is a T-Test
            elif conline[1]=='T':
                runtrast.append(tuple(conline[0:2] + [[conline[2]]] + [[float(conline[3])]]))
        nl2 = len(runtrast)
        return evdict, runtrast, nl2

    run_contrast = Node(Function(input_names=['con_file', 'ev_file'],
                                 output_names=['evdict', 'runtrast', 'nl2'],
                                 function=get_run_contrast),
                        name='run_contrast')

    run_contrast.inputs.con_file = runmodel_dir + 'runcontrast.txt'
    run_contrast.inputs.ev_file = runmodel_dir + 'behav.txt'

    """
    Generate subject and condition specific level 2 model design files
    """
    level2model = Node(interface=fsl.MultipleRegressDesign(),
                       name='runmodel')

    """
    Estimate a second level model
    """

    flameo = MapNode(interface=fsl.FLAMEO(run_mode='fe'), name="flameo",
                     iterfield=['cope_file', 'var_cope_file'])

    def get_dofvolumes(dof_files, cope_files):
        import os
        import nibabel as nb
        import numpy as np
        img = nb.load(cope_files[0])
        if len(img.shape) > 3:
            out_data = np.zeros(img.shape)
        else:
            out_data = np.zeros(list(img.shape) + [1])
        for i in range(out_data.shape[-1]):
            dof = np.loadtxt(dof_files[i])
            out_data[:, :, :, i] = dof
        filename = os.path.join(os.getcwd(), 'dof_file.nii.gz')
        newimg = nb.Nifti1Image(out_data, None, img.header)
        newimg.to_filename(filename)
        return filename

    gendof = Node(Function(input_names=['dof_files', 'cope_files'],
                                output_names=['dof_volume'],
                                function=get_dofvolumes),
                  name='gendofvolume')

    """
    Connect all the Nodes in the workflow
    """

    outputspec = Node(IdentityInterface(fields=['res4d',
                                                'copes', 'varcopes',
                                                'zstats', 'tstats',
                                                'nl2']),
                      name='outputspec')

    run_flow.connect([(inputspec, copemerge, [('copes', 'in_files')]),
                      (inputspec, varcopemerge, [('varcopes', 'in_files')]),
                      (inputspec, gendof, [('dof_files', 'dof_files')]),
                      (copemerge, gendof, [('merged_file', 'cope_files')]),
                      (copemerge, flameo, [('merged_file', 'cope_file')]),
                      (varcopemerge, flameo, [('merged_file',
                                               'var_cope_file')]),
                      (run_contrast, level2model, [('evdict', 'regressors'),
                                                   ('runtrast', 'contrasts')]),
                      (level2model, flameo, [('design_mat', 'design_file'),
                                             ('design_con', 't_con_file'),
                                             ('design_fts', 'f_con_file'),
                                             ('design_grp', 'cov_split_file')]),
                      (gendof, flameo, [('dof_volume', 'dof_var_cope_file')]),
                      (run_contrast, outputspec, [('nl2', 'nl2')]),
                      (flameo, outputspec, [('res4d', 'res4d'),
                                            ('copes', 'copes'),
                                            ('var_copes', 'varcopes'),
                                            ('zstats', 'zstats'),
                                            ('tstats', 'tstats')
                                            ])
                      ])
    return run_flow
Ejemplo n.º 11
0
def init_model_wf(
        workdir: Path,
        model,
        numinputs=1,
        variables=None,
        memcalc=MemoryCalculator.default(),
):
    name = f"{format_workflow(model.name)}_wf"
    workflow = pe.Workflow(name=name)

    if model is None:
        return workflow

    #
    inputnode = Node(
        niu.IdentityInterface(
            fields=[f"in{i:d}" for i in range(1, numinputs + 1)]),
        allow_missing_input_source=True,
        name="inputnode",
    )
    outputnode = pe.Node(niu.IdentityInterface(fields=["resultdicts"]),
                         name="outputnode")

    # setup outputs
    make_resultdicts_a = pe.Node(
        MakeResultdicts(
            tagkeys=["model", "contrast"],
            imagekeys=[
                "design_matrix", "contrast_matrix", *modelfit_model_outputs
            ],
            deletekeys=["contrast"],
        ),
        name="make_resultdicts_a",
    )

    statmaps = [
        modelfit_aliases[m] if m in modelfit_aliases else m
        for m in modelfit_contrast_outputs
    ]
    make_resultdicts_b = pe.Node(
        MakeResultdicts(
            tagkeys=["model", "contrast"],
            imagekeys=statmaps,
            metadatakeys=["critical_z"],
            missingvalues=[
                None,
                False,
            ],  # need to use False because traits doesn't support NoneType
        ),
        name="make_resultdicts_b",
    )

    if model is not None:
        make_resultdicts_a.inputs.model = model.name
        make_resultdicts_b.inputs.model = model.name

    # copy out results
    merge_resultdicts_b = pe.Node(niu.Merge(3), name="merge_resultdicts_b")
    workflow.connect(make_resultdicts_a, "resultdicts", merge_resultdicts_b,
                     "in1")
    workflow.connect(make_resultdicts_b, "resultdicts", merge_resultdicts_b,
                     "in2")

    workflow.connect(merge_resultdicts_b, "out", outputnode, "resultdicts")

    resultdict_datasink = pe.Node(
        ResultdictDatasink(base_directory=str(workdir)),
        name="resultdict_datasink")
    workflow.connect(merge_resultdicts_b, "out", resultdict_datasink,
                     "indicts")

    # merge inputs
    merge_resultdicts_a = Node(
        niu.Merge(numinputs),
        allow_missing_input_source=True,
        name="merge_resultdicts_a",
    )
    for i in range(1, numinputs + 1):
        workflow.connect(inputnode, f"in{i:d}", merge_resultdicts_a,
                         f"in{i:d}")

    # filter inputs
    filter_kwargs = dict(
        require_one_of_images=["effect", "reho", "falff", "alff"],
        exclude_files=[
            str(workdir / "exclude*.json"),
            str(workdir / "reports" / "exclude*.json"),
        ],
    )
    if (hasattr(model, "filters") and model.filters is not None
            and len(model.filters) > 0):
        filter_kwargs.update(dict(filter_dicts=model.filters))
    if hasattr(model, "spreadsheet"):
        if model.spreadsheet is not None and variables is not None:
            filter_kwargs.update(
                dict(spreadsheet=model.spreadsheet, variable_dicts=variables))
    filter_resultdicts = pe.Node(
        interface=FilterResultdicts(**filter_kwargs),
        name="filter_resultdicts",
    )
    workflow.connect(merge_resultdicts_a, "out", filter_resultdicts,
                     "in_dicts")

    # aggregate data structures
    # output is a list where each element respresents a separate model run
    aggregate_resultdicts = pe.Node(
        AggregateResultdicts(numinputs=1, across=model.across),
        name="aggregate_resultdicts",
    )
    workflow.connect(filter_resultdicts, "resultdicts", aggregate_resultdicts,
                     "in1")

    # extract fields from the aggregated data structure
    aliases = dict(effect=["reho", "falff", "alff"])
    extract_from_resultdict = MapNode(
        ExtractFromResultdict(keys=[model.across, *statmaps], aliases=aliases),
        iterfield="indict",
        allow_undefined_iterfield=True,
        name="extract_from_resultdict",
    )
    workflow.connect(aggregate_resultdicts, "resultdicts",
                     extract_from_resultdict, "indict")

    # copy over aggregated metadata and tags to outputs
    for make_resultdicts_node in [make_resultdicts_a, make_resultdicts_b]:
        workflow.connect(extract_from_resultdict, "tags",
                         make_resultdicts_node, "tags")
        workflow.connect(extract_from_resultdict, "metadata",
                         make_resultdicts_node, "metadata")
        workflow.connect(extract_from_resultdict, "vals",
                         make_resultdicts_node, "vals")

    # create models
    if model.type in ["fe", "me"]:  # intercept only model
        countimages = pe.Node(
            niu.Function(
                input_names=["arrarr"],
                output_names=["image_count"],
                function=len_for_each,
            ),
            name="countimages",
        )
        workflow.connect(extract_from_resultdict, "effect", countimages,
                         "arrarr")

        modelspec = MapNode(
            InterceptOnlyDesign(),
            name="modelspec",
            iterfield="n_copes",
            mem_gb=memcalc.min_gb,
        )
        workflow.connect(countimages, "image_count", modelspec, "n_copes")

    elif model.type in ["lme"]:  # glm
        modelspec = MapNode(
            GroupDesign(
                spreadsheet=model.spreadsheet,
                contrastdicts=model.contrasts,
                variabledicts=variables,
            ),
            name="modelspec",
            iterfield="subjects",
            mem_gb=memcalc.min_gb,
        )
        workflow.connect(extract_from_resultdict, "sub", modelspec, "subjects")

    else:
        raise ValueError()

    workflow.connect(modelspec, "contrast_names", make_resultdicts_b,
                     "contrast")

    # run models
    if model.type in [
            "fe"
    ]:  # fixed effects aggregate for multiple runs, sessions, etc.
        # pass length one inputs because we may want to use them on a higher level
        workflow.connect(
            aggregate_resultdicts,
            "non_aggregated_resultdicts",
            merge_resultdicts_b,
            "in3",
        )

        # need to merge
        mergenodeargs = dict(iterfield="in_files",
                             mem_gb=memcalc.volume_std_gb * 3)
        mergemask = MapNode(MergeMask(), name="mergemask", **mergenodeargs)
        workflow.connect(extract_from_resultdict, "mask", mergemask,
                         "in_files")

        mergeeffect = MapNode(Merge(dimension="t"),
                              name="mergeeffect",
                              **mergenodeargs)
        workflow.connect(extract_from_resultdict, "effect", mergeeffect,
                         "in_files")

        mergevariance = MapNode(Merge(dimension="t"),
                                name="mergevariance",
                                **mergenodeargs)
        workflow.connect(extract_from_resultdict, "variance", mergevariance,
                         "in_files")

        fe_run_mode = MapNode(
            niu.Function(
                input_names=["var_cope_file"],
                output_names=["run_mode"],
                function=_fe_run_mode,
            ),
            iterfield=["var_cope_file"],
            name="fe_run_mode",
        )
        workflow.connect(mergevariance, "merged_file", fe_run_mode,
                         "var_cope_file")

        # prepare design matrix
        multipleregressdesign = MapNode(
            fsl.MultipleRegressDesign(),
            name="multipleregressdesign",
            iterfield=["regressors", "contrasts"],
            mem_gb=memcalc.min_gb,
        )
        workflow.connect(modelspec, "regressors", multipleregressdesign,
                         "regressors")
        workflow.connect(modelspec, "contrasts", multipleregressdesign,
                         "contrasts")

        # use FSL implementation
        modelfit = MapNode(
            FLAMEO(),
            name="modelfit",
            mem_gb=memcalc.volume_std_gb * 10,
            iterfield=[
                "run_mode",
                "mask_file",
                "cope_file",
                "var_cope_file",
                "design_file",
                "t_con_file",
                "cov_split_file",
            ],
        )
        workflow.connect(fe_run_mode, "run_mode", modelfit, "run_mode")
        workflow.connect(mergemask, "merged_file", modelfit, "mask_file")
        workflow.connect(mergeeffect, "merged_file", modelfit, "cope_file")
        workflow.connect(mergevariance, "merged_file", modelfit,
                         "var_cope_file")
        workflow.connect(multipleregressdesign, "design_mat", modelfit,
                         "design_file")
        workflow.connect(multipleregressdesign, "design_con", modelfit,
                         "t_con_file")
        workflow.connect(multipleregressdesign, "design_grp", modelfit,
                         "cov_split_file")

        # mask output
        workflow.connect(mergemask, "merged_file", make_resultdicts_b, "mask")

    elif model.type in ["me", "lme"]:  # mixed effects across subjects
        # use custom implementation
        modelfit = MapNode(
            ModelFit(algorithms_to_run=model.algorithms),
            name="modelfit",
            n_procs=config.nipype.omp_nthreads,
            mem_gb=memcalc.volume_std_gb * 10,
            iterfield=[
                "mask_files",
                "cope_files",
                "var_cope_files",
                "regressors",
                "contrasts",
            ],
        )
        workflow.connect(extract_from_resultdict, "mask", modelfit,
                         "mask_files")
        workflow.connect(extract_from_resultdict, "effect", modelfit,
                         "cope_files")
        workflow.connect(extract_from_resultdict, "variance", modelfit,
                         "var_cope_files")

        workflow.connect(modelspec, "regressors", modelfit, "regressors")
        workflow.connect(modelspec, "contrasts", modelfit, "contrasts")

        # random field theory
        smoothest = MapNode(
            fsl.SmoothEstimate(),
            iterfield=["zstat_file", "mask_file"],
            name="smoothest",
            allow_undefined_iterfield=True,
        )
        workflow.connect([(modelfit, smoothest, [(("zstats", ravel),
                                                  "zstat_file")])])
        workflow.connect([(modelfit, smoothest, [(("masks", ravel),
                                                  "mask_file")])])

        criticalz = pe.Node(
            niu.Function(
                input_names=["voxels", "resels"],
                output_names=["critical_z"],
                function=_critical_z,
            ),
            name="criticalz",
        )
        workflow.connect(smoothest, "volume", criticalz, "voxels")
        workflow.connect(smoothest, "resels", criticalz, "resels")
        workflow.connect(criticalz, "critical_z", make_resultdicts_b,
                         "critical_z")

    else:
        raise ValueError()

    # connect modelfit outputs
    assert modelfit.outputs is not None
    for k, _ in modelfit.outputs.items():
        if k in modelfit_exclude:
            continue

        attr = k
        if k in modelfit_aliases:
            attr = modelfit_aliases[k]
        if attr in statmaps:
            workflow.connect(modelfit, k, make_resultdicts_b, attr)
        else:
            workflow.connect(modelfit, k, make_resultdicts_a, attr)

    # make tsv files for design and contrast matrices
    maketsv = MapNode(
        MakeDesignTsv(),
        iterfield=["regressors", "contrasts", "row_index"],
        name="maketsv",
    )
    workflow.connect(extract_from_resultdict, model.across, maketsv,
                     "row_index")
    workflow.connect(modelspec, "regressors", maketsv, "regressors")
    workflow.connect(modelspec, "contrasts", maketsv, "contrasts")

    workflow.connect(maketsv, "design_tsv", make_resultdicts_a,
                     "design_matrix")
    workflow.connect(maketsv, "contrasts_tsv", make_resultdicts_a,
                     "contrast_matrix")

    return workflow
Ejemplo n.º 12
0
def l2_anova(
    l1_dir,
    keep_work=False,
    l2_dir="",
    loud=False,
    tr=1,
    nprocs=6,
    workflow_name="generic",
    mask="/usr/share/mouse-brain-atlases/dsurqec_200micron_mask.nii",
    exclude={},
    include={},
    match_regex='.+/sub-(?P<sub>[a-zA-Z0-9]+)/ses-(?P<ses>[a-zA-Z0-9]+)/.*?_acq-(?P<acq>[a-zA-Z0-9]+)_task-(?P<task>[a-zA-Z0-9]+)_(?P<mod>[a-zA-Z0-9]+)_(?P<stat>(cope|varcb)+)\.(?:nii|nii\.gz)'
):

    l1_dir = path.expanduser(l1_dir)
    if not l2_dir:
        l2_dir = path.abspath(path.join(l1_dir, "..", "..", "l2"))

    mask = path.abspath(path.expanduser(mask))

    datafind = nio.DataFinder()
    datafind.inputs.root_paths = l1_dir
    datafind.inputs.match_regex = match_regex
    datafind_res = datafind.run()

    data_selection = zip(*[
        datafind_res.outputs.sub, datafind_res.outputs.ses, datafind_res.
        outputs.acq, datafind_res.outputs.task, datafind_res.outputs.mod,
        datafind_res.outputs.stat, datafind_res.outputs.out_paths
    ])
    data_selection = [list(i) for i in data_selection]
    data_selection = pd.DataFrame(data_selection,
                                  columns=('subject', 'session', 'acquisition',
                                           'task', 'modality', 'statistic',
                                           'path'))

    data_selection = data_selection.sort_values(['session', 'subject'],
                                                ascending=[1, 1])
    if exclude:
        for key in exclude:
            data_selection = data_selection[~data_selection[key].
                                            isin(exclude[key])]
    if include:
        for key in include:
            data_selection = data_selection[data_selection[key].isin(
                include[key])]

    copes = data_selection[data_selection['statistic'] ==
                           'cope']['path'].tolist()
    varcopes = data_selection[data_selection['statistic'] ==
                              'varcb']['path'].tolist()

    copemerge = pe.Node(interface=fsl.Merge(dimension='t'), name="copemerge")
    copemerge.inputs.in_files = copes
    copemerge.inputs.merged_file = 'copes.nii.gz'

    varcopemerge = pe.Node(interface=fsl.Merge(dimension='t'),
                           name="varcopemerge")
    varcopemerge.inputs.in_files = varcopes
    varcopemerge.inputs.merged_file = 'varcopes.nii.gz'

    copeonly = data_selection[data_selection['statistic'] == 'cope']
    regressors = {}
    for sub in copeonly['subject'].unique():
        #print(sub)
        regressor = [copeonly['subject'] == sub][0]
        regressor = [int(i) for i in regressor]
        key = "sub-" + str(sub)
        regressors[key] = regressor
    reference = str(copeonly['session'].unique()[0])
    for ses in copeonly['session'].unique()[1:]:
        #print(ses)
        regressor = [copeonly['session'] == ses][0]
        regressor = [int(i) for i in regressor]
        key = "ses-(" + str(ses) + '-' + reference + ')'
        regressors[key] = regressor

    sessions = [[i, 'T', [i], [1]] for i in regressors.keys() if "ses-" in i]
    contrasts = deepcopy(sessions)
    contrasts.append(['anova', 'F', sessions])

    level2model = pe.Node(interface=fsl.MultipleRegressDesign(),
                          name='level2model')
    level2model.inputs.regressors = regressors
    level2model.inputs.contrasts = contrasts
    #print(regressors)
    #print(contrasts)
    #return

    flameo = pe.Node(interface=fsl.FLAMEO(), name="flameo")
    flameo.inputs.mask_file = mask
    # Using 'fe' instead of 'ols' is recommended (https://dpaniukov.github.io/2016/07/14/three-level-analysis-with-fsl-and-ants-2.html)
    # This has also been tested in SAMRI and shown to give better estimates.
    flameo.inputs.run_mode = "flame12"

    substitutions = []
    t_counter = 1
    f_counter = 1
    for contrast in contrasts:
        if contrast[1] == 'T':
            for i in ['cope', 'tstat', 'zstat']:
                substitutions.append(
                    (i + str(t_counter), contrast[0] + "_" + i))
            t_counter += 1
        if contrast[1] == 'F':
            for i in ['zfstat', 'fstat']:
                substitutions.append(
                    (i + str(f_counter), contrast[0] + "_" + i))
            f_counter += 1

    datasink = pe.Node(nio.DataSink(), name='datasink')
    datasink.inputs.base_directory = path.join(l2_dir, workflow_name)
    datasink.inputs.substitutions = substitutions

    workflow_connections = [
        (copemerge, flameo, [('merged_file', 'cope_file')]),
        (varcopemerge, flameo, [('merged_file', 'var_cope_file')]),
        (level2model, flameo, [('design_mat', 'design_file')]),
        (level2model, flameo, [('design_grp', 'cov_split_file')]),
        (level2model, flameo, [('design_fts', 'f_con_file')]),
        (level2model, flameo, [('design_con', 't_con_file')]),
        (flameo, datasink, [('copes', '@copes')]),
        (flameo, datasink, [('tstats', '@tstats')]),
        (flameo, datasink, [('zstats', '@zstats')]),
        (flameo, datasink, [('fstats', '@fstats')]),
        (flameo, datasink, [('zfstats', '@zfstats')]),
    ]

    workdir_name = workflow_name + "_work"
    workflow = pe.Workflow(name=workdir_name)
    workflow.connect(workflow_connections)
    workflow.config = {
        "execution": {
            "crashdump_dir": path.join(l2_dir, "crashdump")
        }
    }
    workflow.base_dir = l2_dir
    workflow.write_graph(dotfilename=path.join(workflow.base_dir, workdir_name,
                                               "graph.dot"),
                         graph2use="hierarchical",
                         format="png")

    if not loud:
        try:
            workflow.run(plugin="MultiProc", plugin_args={'n_procs': nprocs})
        except RuntimeError:
            print(
                "WARNING: Some expected tasks have not been found (or another RuntimeError has occured)."
            )
        for f in listdir(getcwd()):
            if re.search("crash.*?-varcopemerge|-copemerge.*", f):
                remove(path.join(getcwd(), f))
    else:
        workflow.run(plugin="MultiProc", plugin_args={'n_procs': nprocs})

    if not keep_work:
        shutil.rmtree(path.join(l2_dir, workdir_name))
def create_lvl2tfce_wf(mask=False):
    '''
    Input [Mandatory]:
        ~~~~~~~~~~~ Set through inputs.inputspec
        proj_name: String, naming subdirectory to use to identify this instance of lvl2 modeling.
            e.g. 'nosmooth'
            The string will be used as a subdirectory in output_dir.
        copes_template: String, naming full path to the cope files. Use wildcards to grab all cope files wanted.
            contrast (below) will be used iteratively to grab only the appropriate con files from this glob list on each iteration.
            e.g. inputs.inputspec.copes_template = '/home/neuro/workdir/stress_lvl2/data/nosmooth/sub-*/model/sub-*/_modelestimate0/cope*nii.gz'
        contrast: Character defining contrast name.
            Name should match a dictionary entry in full_cons and con_regressors.
            ** Often you will want to input this with an iterable node.
        full_cons: dictionary of each contrast.
            Names should match con_regressors.
            Entries in format [('name', 'stat', [condition_list], [weight])]
            e.g. full_cons = {
                '1_instructions_Instructions': [('1_instructions_Instructions', 'T', ['1_instructions_Instructions'], [1])]
                }
        output_dir: string, representing directory of output.
            e.g. inputs.inputspec.output_dir ='/home/neuro/output'
            In the output directory, the data will be stored in a root dir, giving the time and date of processing.
            If a mask is used, the mask will also be included in the output folder name. wholebrain is used otherwise.
        subject_list: list of string, with BIDs-format IDs to identify subjects.
            Use this to drop high movement subjects, even if they are among other files that will be grabbed.
            e.g. inputs.inputspec.subject_list =['sub-001', sub-002']
        con_regressors: dictionary of by-subject regressors for each contrast.
                Names should match full_cons.
                e.g. inputs.inputspec.con_regressors = {
                        '1_instructions_Instructions': {'1_instructions_Instructions': [1] * len(subject_list),
                        'reg2': [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
                        'reg3': [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
                        }
                    }
        Input [Optional]:
            mask: [default: False] path to mask file. Can have different dimensions from functional data, but should obviously be in the same reference space if anatomical (see jt_util.align_mask).
                e.g. inputs.inputspec.mask_file = '/home/neuro/atlases/FSMAP/stress/realigned_masks/amygdala_bl_flirt.nii.gz'
            sinker_subs: list of tuples, each containing a pair of strings.
                These will be sinker substitutions. They will change filenames in the output folder.
                Usually best to run the pipeline once, before deciding on these.
                e.g. inputs.inputspec.sinker_subs = [('tstat', 'raw_tstat'),
                       ('tfce_corrp_raw_tstat', 'tfce_corrected_p')]
        Output:
            lvl2tfce_wf: workflow to perform second-level modeling, using threshold free cluster estimation (tfce; see https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Randomise/UserGuide)
    '''
    import nipype.pipeline.engine as pe # pypeline engine
    import nipype.interfaces.fsl as fsl
    import os
    from nipype import IdentityInterface
    from nipype.interfaces.utility.wrappers import Function
    ##################  Setup workflow.
    lvl2tfce_wf = pe.Workflow(name='lvl2tfce_wf')

    inputspec = pe.Node(IdentityInterface(
        fields=['proj_name',
                'copes_template',
                'output_dir',
                'mask_file',
                'subject_list',
                'con_regressors',
                'full_cons',
                'sinker_subs',
                'contrast'
                ],
        mandatory_inputs=False),
                 name='inputspec')
    if mask:
        inputspec.inputs.mask_file = mask

    ################## Make output directory.
    def mk_outdir(output_dir, proj_name, mask=False):
        import os
        from time import gmtime, strftime
        time_prefix = strftime("%Y-%m-%d_%Hh-%Mm", gmtime())+'_'
        if mask:
            new_out_dir = os.path.join(output_dir, time_prefix + mask.split('/')[-1].split('.')[0])
        else:
            new_out_dir = os.path.join(output_dir, time_prefix + 'wholebrain', proj_name)
        if not os.path.isdir(new_out_dir):
            os.makedirs(new_out_dir)
        return new_out_dir

    make_outdir = pe.Node(Function(input_names=['output_dir', 'proj_name', 'mask'],
                                   output_names=['new_out_dir'],
                                   function=mk_outdir),
                          name='make_outdir')

    ################## Get contrast
    def get_con(contrast, full_cons, con_regressors):
        con_info = full_cons[contrast]
        reg_info = con_regressors[contrast]
        return con_info, reg_info

    get_model_info = pe.Node(Function(input_names=['contrast', 'full_cons', 'con_regressors'],
                                      output_names=['con_info', 'reg_info'],
                                      function=get_con),
                             name='get_model_info')
    # get_model_info.inputs.full_cons = From inputspec
    # get_model_info.inputs.full_regs = From inputspec
    # get_model_info.inputs.contrast = From inputspec

    ################## Get files
    def get_files(subject_list, copes_template, contrast):
        import glob
        temp_list = []
        for x in glob.glob(copes_template):
            if any(subj in x for subj in subject_list):
                temp_list.append(x)
        out_list = [x for x in temp_list if contrast in x]
        return out_list

    get_copes = pe.Node(Function(
        input_names=['subject_list', 'copes_template', 'contrast'],
        output_names=['out_list'],
        function=get_files),
                        name='get_copes')
    # get_copes.inputs.subject_list = # From inputspec
    # get_copes.inputs.copes_template = # From inputspec.
    # get_copes.inputs.contrast = # From inputspec.

    ################## Merge into 4d files.
    merge_copes = pe.Node(interface=fsl.Merge(dimension='t'),
                    name='merge_copes')
    # merge_copes.inputs.in_files = copes

    ################## Level 2 design.
    level2model = pe.Node(interface=fsl.MultipleRegressDesign(),
                        name='level2model')
    # level2model.inputs.contrasts # from get_con_info
    # level2model.inputs.regressors # from get_con_info

    ################## Fit mask, if given 2 design.
    if mask:
        def fit_mask(mask_file, ref_file):
            from nilearn.image import resample_img
            import nibabel as nib
            import os
            out_file = resample_img(nib.load(mask_file),
                                   target_affine=nib.load(ref_file).affine,
                                   target_shape=nib.load(ref_file).shape[0:3],
                                   interpolation='nearest')
            nib.save(out_file, os.path.join(os.getcwd(), mask_file.split('.nii')[0]+'_fit.nii.gz'))
            out_mask = os.path.join(os.getcwd(), mask_file.split('.nii')[0]+'_fit.nii.gz')
            return out_mask
        fit_mask = pe.Node(Function(
            input_names=['mask_file', 'ref_file'],
            output_names=['out_mask'],
            function=fit_mask),
                            name='fit_mask')

    ################## FSL Randomize.
    randomise = pe.Node(interface=fsl.Randomise(), name = 'randomise')
    # randomise.inputs.in_file = #From merge_copes
    # randomise.inputs.design_mat = # From level2model design_mat
    # randomise.inputs.tcon = # From level2model design_con
    # randomise.inputs.cm_thresh = 2.49 # mass based cluster thresholding. Not used.
    # randomise.mask = # Provided from mask_reslice, if mask provided.
    randomise.inputs.tfce = True
    randomise.inputs.raw_stats_imgs = True
    randomise.inputs.vox_p_values = True
    # randomise.inputs.num_perm = 5000

    def adj_minmax(in_file):
        import nibabel as nib
        import numpy as np
        import os
        img = nib.load(in_file[0])
        data = img.get_data()
        img.header['cal_max'] = np.max(data)
        img.header['cal_min'] = np.min(data)
        nib.save(img, in_file[0])
        return in_file

    ################## Setup datasink.
    from nipype.interfaces.io import DataSink
    import os
    # sinker = pe.Node(DataSink(parameterization=False), name='sinker')
    sinker = pe.Node(DataSink(parameterization=True), name='sinker')

    ################## Setup Pipeline.
    lvl2tfce_wf.connect([
        (inputspec, make_outdir, [('output_dir', 'output_dir'),
                                 ('proj_name', 'proj_name')]),
        (inputspec, get_model_info, [('full_cons', 'full_cons'),
                                    ('con_regressors', 'con_regressors')]),
        (inputspec, get_model_info, [('contrast', 'contrast')]),
        (inputspec, get_copes, [('subject_list', 'subject_list'),
                               ('contrast', 'contrast'),
                               ('copes_template', 'copes_template')]),
        (get_copes, merge_copes, [('out_list', 'in_files')]),
        (get_model_info, level2model, [('con_info', 'contrasts')]),
        (get_model_info, level2model, [('reg_info', 'regressors')]),
        (merge_copes, randomise, [('merged_file', 'in_file')]),
        (level2model, randomise, [('design_mat', 'design_mat')]),
        (level2model, randomise, [('design_con', 'tcon')]),
        ])
    if mask:
        lvl2tfce_wf.connect([
            (inputspec, fit_mask, [('mask_file', 'mask_file')]),
            (merge_copes, fit_mask, [('merged_file', 'ref_file')]),
            (fit_mask, randomise, [('out_mask', 'mask')]),
            (inputspec, make_outdir, [('mask_file', 'mask')]),
            (fit_mask, sinker, [('out_mask', 'out.@mask')]),
            ])

    lvl2tfce_wf.connect([
        (inputspec, sinker, [('sinker_subs', 'substitutions')]),
        (make_outdir, sinker, [('new_out_dir', 'base_directory')]),
        (level2model, sinker, [('design_con', 'out.@con')]),
        (level2model, sinker, [('design_grp', 'out.@grp')]),
        (level2model, sinker, [('design_mat', 'out.@mat')]),
        (randomise, sinker, [(('t_corrected_p_files', adj_minmax), 'out.@t_cor_p')]),
        (randomise, sinker, [(('tstat_files', adj_minmax), 'out.@t_stat')]),
        ])
    return lvl2tfce_wf
Ejemplo n.º 14
0
def init_model_wf(workdir=None, numinputs=1, model=None, variables=None, memcalc=MemoryCalculator()):
    name = f"{formatlikebids(model.name)}_wf"
    workflow = pe.Workflow(name=name)

    if model is None:
        return workflow

    #
    inputnode = pe.Node(
        niu.IdentityInterface(fields=[f"in{i:d}" for i in range(1, numinputs + 1)]),
        name="inputnode",
    )
    outputnode = pe.Node(niu.IdentityInterface(fields=["resultdicts"]), name="outputnode")

    # setup outputs
    make_resultdicts_a = pe.Node(
        MakeResultdicts(
            tagkeys=["model", "contrast"],
            imagekeys=["design_matrix", "contrast_matrix"],
            deletekeys=["contrast"],
        ),
        name="make_resultdicts_a",
    )

    statmaps = ["effect", "variance", "z", "dof", "mask"]
    make_resultdicts_b = pe.Node(
        MakeResultdicts(
            tagkeys=["model", "contrast"],
            imagekeys=statmaps,
            metadatakeys=["critical_z"],
            missingvalues=[None, False],  # need to use False because traits doesn't support NoneType
        ),
        name="make_resultdicts_b",
    )

    if model is not None:
        make_resultdicts_a.inputs.model = model.name
        make_resultdicts_b.inputs.model = model.name

    # only output statistical map (_b) result dicts because the design matrix (_a) is
    # not relevant for higher level analyses
    workflow.connect(make_resultdicts_b, "resultdicts", outputnode, "resultdicts")

    # copy out results
    merge_resultdicts_b = pe.Node(niu.Merge(2), name="merge_resultdicts_b")
    workflow.connect(make_resultdicts_a, "resultdicts", merge_resultdicts_b, "in1")
    workflow.connect(make_resultdicts_b, "resultdicts", merge_resultdicts_b, "in2")

    resultdict_datasink = pe.Node(
        ResultdictDatasink(base_directory=workdir), name="resultdict_datasink"
    )
    workflow.connect(merge_resultdicts_b, "out", resultdict_datasink, "indicts")

    # merge inputs
    merge_resultdicts_a = pe.Node(niu.Merge(numinputs), name="merge_resultdicts_a")
    for i in range(1, numinputs + 1):
        workflow.connect(inputnode, f"in{i:d}", merge_resultdicts_a, f"in{i:d}")

    # filter inputs
    filterkwargs = dict(
        requireoneofimages=["effect", "reho", "falff", "alff"],
        excludefiles=str(Path(workdir) / "exclude*.json"),
    )
    if hasattr(model, "filters") and model.filters is not None and len(model.filters) > 0:
        filterkwargs.update(dict(filterdicts=model.filters))
    if hasattr(model, "spreadsheet"):
        if model.spreadsheet is not None and variables is not None:
            filterkwargs.update(dict(spreadsheet=model.spreadsheet, variabledicts=variables))
    filterresultdicts = pe.Node(
        interface=FilterResultdicts(**filterkwargs),
        name="filterresultdicts",
    )
    workflow.connect(merge_resultdicts_a, "out", filterresultdicts, "indicts")

    # aggregate data structures
    # output is a list where each element respresents a separate model run
    aggregateresultdicts = pe.Node(
        AggregateResultdicts(numinputs=1, across=model.across), name="aggregateresultdicts"
    )
    workflow.connect(filterresultdicts, "resultdicts", aggregateresultdicts, "in1")

    # extract fields from the aggregated data structure
    aliases = dict(effect=["reho", "falff", "alff"])
    extractfromresultdict = pe.MapNode(
        ExtractFromResultdict(keys=[model.across, *statmaps], aliases=aliases),
        iterfield="indict",
        name="extractfromresultdict",
    )
    workflow.connect(aggregateresultdicts, "resultdicts", extractfromresultdict, "indict")

    # copy over aggregated metadata and tags to outputs
    for make_resultdicts_node in [make_resultdicts_a, make_resultdicts_b]:
        workflow.connect(extractfromresultdict, "tags", make_resultdicts_node, "tags")
        workflow.connect(extractfromresultdict, "metadata", make_resultdicts_node, "metadata")
        workflow.connect(extractfromresultdict, "vals", make_resultdicts_node, "vals")

    # create models
    if model.type in ["fe", "me"]:  # intercept only model
        countimages = pe.Node(
            niu.Function(input_names=["arrarr"], output_names=["image_count"], function=lenforeach),
            name="countimages",
        )
        workflow.connect(extractfromresultdict, "effect", countimages, "arrarr")

        modelspec = pe.MapNode(
            InterceptOnlyModel(), name="modelspec", iterfield="n_copes", mem_gb=memcalc.min_gb
        )
        workflow.connect(countimages, "image_count", modelspec, "n_copes")

    elif model.type in ["lme"]:  # glm
        modelspec = pe.MapNode(
            LinearModel(
                spreadsheet=model.spreadsheet,
                contrastdicts=model.contrasts,
                variabledicts=variables,
            ),
            name="modelspec",
            iterfield="subjects",
            mem_gb=memcalc.min_gb,
        )
        workflow.connect(extractfromresultdict, "sub", modelspec, "subjects")

    else:
        raise ValueError()

    workflow.connect(modelspec, "contrast_names", make_resultdicts_b, "contrast")

    # run models
    if model.type in ["fe"]:

        # need to merge
        mergenodeargs = dict(iterfield="in_files", mem_gb=memcalc.volume_std_gb * numinputs)
        mergemask = pe.MapNode(MergeMask(), name="mergemask", **mergenodeargs)
        workflow.connect(extractfromresultdict, "mask", mergemask, "in_files")

        mergeeffect = pe.MapNode(Merge(dimension="t"), name="mergeeffect", **mergenodeargs)
        workflow.connect(extractfromresultdict, "effect", mergeeffect, "in_files")

        mergevariance = pe.MapNode(Merge(dimension="t"), name="mergevariance", **mergenodeargs)
        workflow.connect(extractfromresultdict, "variance", mergevariance, "in_files")

        fe_run_mode = pe.MapNode(
            niu.Function(input_names=["var_cope_file"], output_names=["run_mode"], function=_fe_run_mode),
            iterfield=["var_cope_file"],
            name="fe_run_mode",
        )
        workflow.connect(mergevariance, "merged_file", fe_run_mode, "var_cope_file")

        # prepare design matrix
        multipleregressdesign = pe.MapNode(
            fsl.MultipleRegressDesign(),
            name="multipleregressdesign",
            iterfield=["regressors", "contrasts"],
            mem_gb=memcalc.min_gb,
        )
        workflow.connect(modelspec, "regressors", multipleregressdesign, "regressors")
        workflow.connect(modelspec, "contrasts", multipleregressdesign, "contrasts")

        # use FSL implementation
        modelfit = pe.MapNode(
            FSLFLAMEO(),
            name="modelfit",
            mem_gb=memcalc.volume_std_gb * 100,
            iterfield=[
                "run_mode",
                "mask_file",
                "cope_file",
                "var_cope_file",
                "design_file",
                "t_con_file",
                "cov_split_file",
            ],
        )
        workflow.connect(fe_run_mode, "run_mode", modelfit, "run_mode")
        workflow.connect(mergemask, "merged_file", modelfit, "mask_file")
        workflow.connect(mergeeffect, "merged_file", modelfit, "cope_file")
        workflow.connect(mergevariance, "merged_file", modelfit, "var_cope_file")
        workflow.connect(multipleregressdesign, "design_mat", modelfit, "design_file")
        workflow.connect(multipleregressdesign, "design_con", modelfit, "t_con_file")
        workflow.connect(multipleregressdesign, "design_grp", modelfit, "cov_split_file")

        # mask output
        workflow.connect(mergemask, "merged_file", make_resultdicts_b, "mask")

    elif model.type in ["me", "lme"]:

        # use custom implementation
        modelfit = pe.MapNode(
            FLAME1(),
            name="modelfit",
            n_procs=config.nipype.omp_nthreads,
            mem_gb=memcalc.volume_std_gb * 100,
            iterfield=[
                "mask_files",
                "cope_files",
                "var_cope_files",
                "regressors",
                "contrasts",
            ],
        )
        workflow.connect(extractfromresultdict, "mask", modelfit, "mask_files")
        workflow.connect(extractfromresultdict, "effect", modelfit, "cope_files")
        workflow.connect(extractfromresultdict, "variance", modelfit, "var_cope_files")

        workflow.connect(modelspec, "regressors", modelfit, "regressors")
        workflow.connect(modelspec, "contrasts", modelfit, "contrasts")

        # mask output
        workflow.connect(modelfit, "masks", make_resultdicts_b, "mask")

        # random field theory
        smoothest = pe.MapNode(fsl.SmoothEstimate(), iterfield=["zstat_file", "mask_file"], name="smoothest")
        workflow.connect([(modelfit, smoothest, [(("zstats", ravel), "zstat_file")])])
        workflow.connect([(modelfit, smoothest, [(("masks", ravel), "mask_file")])])

        criticalz = pe.MapNode(
            niu.Function(input_names=["resels"], output_names=["critical_z"], function=_critical_z),
            iterfield=["resels"],
            name="criticalz",
        )
        workflow.connect(smoothest, "resels", criticalz, "resels")
        workflow.connect(criticalz, "critical_z", make_resultdicts_b, "critical_z")

    workflow.connect(modelfit, "copes", make_resultdicts_b, "effect")
    workflow.connect(modelfit, "var_copes", make_resultdicts_b, "variance")
    workflow.connect(modelfit, "zstats", make_resultdicts_b, "z")
    workflow.connect(modelfit, "tdof", make_resultdicts_b, "dof")

    # make tsv files for design and contrast matrices
    maketsv = pe.MapNode(
        MakeDesignTsv(),
        iterfield=["regressors", "contrasts", "row_index"],
        name="maketsv"
    )
    workflow.connect(extractfromresultdict, model.across, maketsv, "row_index")
    workflow.connect(modelspec, "regressors", maketsv, "regressors")
    workflow.connect(modelspec, "contrasts", maketsv, "contrasts")

    workflow.connect(maketsv, "design_tsv", make_resultdicts_a, "design_matrix")
    workflow.connect(maketsv, "contrasts_tsv", make_resultdicts_a, "contrast_matrix")

    return workflow
#                                          ("male_higher_than_female", 'T', ["male", "female"], [1, -1]),
#                                          ("female_higher_than_male", 'T', ["male", "female"], [-1, 1]),
                                        ]},
#               "age_sex": {"variables": ["age", "male", "female"],
#                            "contrasts": [("pos_age", 'T', ["age"], [1]),
#                                          ("neg_age", 'T', ["age"], [-1]),
#                                          ("male_higher_than_female", 'T', ["male", "female"], [1, -1]),
#                                          ("female_higher_than_male", 'T', ["male", "female"], [-1, 1]),
#                                         ]},
#             "first_sum": {"variables": ["firstSum", "age", "male", "female"],
#                          "contrasts": [("pos_firstSum", 'T', ["firstSum"], [1]),
#                                        ("neg_firstSum", 'T', ["firstSum"], [-1]),
#                                       ]}
                }
    for name, model in models.iteritems():
        model_node = pe.Node(fsl.MultipleRegressDesign(), name="%s_model"%name)
        regressors = {}
        for reg in model["variables"]:
            regressors[reg] = list(regressors_df[reg])
        model_node.inputs.regressors = regressors
        model_node.inputs.contrasts = model["contrasts"]
        model_nodes[name] = model_node
        
#     first_part_model_node = pe.Node(fsl.MultipleRegressDesign(), name="first_part_model")
#     regressors = {}
#     for reg in confounds + ["past", "future", "positive", "negative", "friends"]:
#         regressors[reg] = list(regressors_df[reg])
#     past = ("past", 'T', ["past"], [1])
#     future = ("future", 'T', ["future"], [1])
#     past_vs_future = ("past_vs_future", 'T', ["past", "future"], [1, -1])
#     future_vs_past = ("future_vs_past", 'T', ["future", "past"], [1, -1])
Ejemplo n.º 16
0
    def wf_prep_files():
        prep_files = pe.Workflow(name="prep_files")
        prep_files.base_dir = input_dir + os.sep + "group_level"

        template = {"mask": "sub-{subject}/sub-{subject}.feat/mask.nii.gz"}
        whole_brain_mask = pe.MapNode(
            SelectFiles(templates=template),
            iterfield="subject",
            name="whole_brain_mask",
        )
        whole_brain_mask.inputs.base_directory = input_dir
        whole_brain_mask.inputs.subject = subject_list

        gen_groupmask = pe.Node(
            Function(
                function=_create_group_mask,
                input_names=["brain_masks", "base_dir"],
                output_names=["groupmask_path"],
            ),
            name="gen_groupmask",
        )
        gen_groupmask.inputs.base_dir = input_dir + os.sep + "group_level" + os.sep

        designs = pe.Node(
            Function(
                function=_groupmean_contrast,
                input_names=[
                    "subject_list", "regressors_path", "contrast_path"
                ],
                output_names=["groups", "regressors", "contrasts"],
            ),
            name="designs",
        )
        designs.inputs.subject_list = subject_list
        designs.inputs.regressors_path = regressors_path
        designs.inputs.contrast_path = contrast_path

        model = pe.Node(fsl.MultipleRegressDesign(), name=f"model")

        outputnode = pe.Node(
            interface=niu.IdentityInterface(
                fields=["mask", "regressors", "contrasts"]),
            name="outputnode",
        )

        prep_files.connect([
            (whole_brain_mask, gen_groupmask, [("mask", "brain_masks")]),
            (
                designs,
                model,
                [
                    ("groups", "groups"),
                    ("regressors", "regressors"),
                    ("contrasts", "contrasts"),
                ],
            ),
            (gen_groupmask, outputnode, [("groupmask_path", "mask")]),
            (
                model,
                outputnode,
                [
                    ("design_grp", "group"),
                    ("design_mat", "regressors"),
                    ("design_con", "contrasts"),
                ],
            ),
        ])
        return prep_files