def create_2lvl(name="group"):
    import nipype.interfaces.fsl as fsl
    import nipype.pipeline.engine as pe
    import nipype.interfaces.utility as niu

    wk = pe.Workflow(name=name)

    inputspec = pe.Node(niu.IdentityInterface(fields=['copes','varcopes',
                                                      'template', "contrasts",
                                                      "regressors"]),name='inputspec')

    model = pe.Node(fsl.MultipleRegressDesign(),name='l2model')

    #wk.connect(inputspec,('copes',get_len),model,'num_copes')
    wk.connect(inputspec, 'contrasts', model, "contrasts")
    wk.connect(inputspec, 'regressors', model, "regressors")

    mergecopes = pe.Node(fsl.Merge(dimension='t'),name='merge_copes')
    mergevarcopes = pe.Node(fsl.Merge(dimension='t'),name='merge_varcopes')

    flame = pe.Node(fsl.FLAMEO(run_mode='ols'),name='flameo')
    wk.connect(inputspec,'copes',mergecopes,'in_files')
    wk.connect(inputspec,'varcopes',mergevarcopes,'in_files')
    wk.connect(model,'design_mat',flame,'design_file')
    wk.connect(model,'design_con',flame, 't_con_file')
    wk.connect(mergecopes, 'merged_file', flame, 'cope_file')
    wk.connect(mergevarcopes,'merged_file',flame,'var_cope_file')
    wk.connect(model,'design_grp',flame,'cov_split_file')

    bet = pe.Node(fsl.BET(mask=True,frac=0.3),name="template_brainmask")
    wk.connect(inputspec,'template',bet,'in_file')
    wk.connect(bet,'mask_file',flame,'mask_file')

    outputspec = pe.Node(niu.IdentityInterface(fields=['zstat','tstat','cope',
                                                       'varcope','mrefvars',
                                                       'pes','res4d','mask',
                                                       'tdof','weights','pstat']),
        name='outputspec')

    wk.connect(flame,'copes',outputspec,'cope')
    wk.connect(flame,'var_copes',outputspec,'varcope')
    wk.connect(flame,'mrefvars',outputspec,'mrefvars')
    wk.connect(flame,'pes',outputspec,'pes')
    wk.connect(flame,'res4d',outputspec,'res4d')
    wk.connect(flame,'weights',outputspec,'weights')
    wk.connect(flame,'zstats',outputspec,'zstat')
    wk.connect(flame,'tstats',outputspec,'tstat')
    wk.connect(flame,'tdof',outputspec,'tdof')
    wk.connect(bet,'mask_file',outputspec,'mask')

    ztopval = pe.MapNode(interface=fsl.ImageMaths(op_string='-ztop',
        suffix='_pval'),
        name='z2pval',
        iterfield=['in_file'])

    wk.connect(flame,'zstats',ztopval,'in_file')
    wk.connect(ztopval,'out_file',outputspec,'pstat')

    return wk
                      name="selectCopes")

#%%

copemerge = pe.Node(interface=fsl.Merge(dimension='t'), name="copemerge")

varcopemerge = pe.Node(interface=fsl.Merge(dimension='t'), name="varcopemerge")

maskemerge = pe.Node(interface=fsl.Merge(dimension='t'), name="maskemerge")
#copeImages = glob.glob('/media/Data/work/firstLevelKPE/_subject_id_*/feat_fit/run0.feat/stats/cope1.nii.gz')
#copemerge.inputs.in_files = copeImages

# Configure FSL 2nd level analysis
l2_model = pe.Node(fsl.L2Model(), name='l2_model')

flameo_ols = pe.Node(fsl.FLAMEO(run_mode='ols'), name='flameo_ols')


def _len(inlist):
    print(len(inlist))
    return len(inlist)


### use randomize
rand = pe.Node(fsl.Randomise(), name="randomize")

rand.inputs.mask = '/home/oad4/scratch60/kpe_fsl/derivatives/fmriprep/sub-1369/ses-1/func/sub-1369_ses-1_task-Memory_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz'  # group mask file (was created earlier)
rand.inputs.one_sample_group_mean = True
rand.inputs.tfce = True
rand.inputs.vox_p_values = True
rand.inputs.num_perm = 5000
示例#3
0
}

# Contrasts
cont01 = ['left>right', 'T', ['reg1', 'reg2'], [1, -1]]
cont02 = ['right>left', 'T', ['reg1', 'reg2'], [-1, 1]]
cont03 = ['activation', 'T', ['reg1', 'reg2'], [0.5, 0.5]]

contrastList = [cont01, cont02, cont03]

# Setting up the second level analysis model node
level2design = Node(fsl.MultipleRegressDesign(contrasts=contrastList,
                                              regressors=dictReg),
                    name='level2design')

# Model calculation by FLAMEO
flameo = Node(fsl.FLAMEO(run_mode='fe'), name="flameo")

###########
#
# NODES FOR THE MERGING IMAGES
#
###########
# merging cope files
copemerge = Node(fsl.Merge(dimension='t', in_files=listCopeFiles),
                 name="copemerge")

# merging varcope files
varcopemerge = Node(fsl.Merge(dimension='t', in_files=listVarcopeFiles),
                    name="varcopemerge")

# merging mask files
示例#4
0
			name='varcopemerge')

		multregmodel = Node(interface=fsl.MultipleRegressDesign(
			contrasts=[],
			regressors={}),
			name='multregmodel')

		feedback_tcont = ['group_mean', 'T',['reg1','reg2'],[1,0]]
		rpe_pos_tcont = ['rpe+', 'T',['reg1','reg2'],[0,1]]
		rpe_neg_tcont = ['rpe-', 'T',['reg1','reg2'],[0,-1]]
		
		multregmodel.inputs.contrasts = [feedback_tcont, rpe_pos_tcont, rpe_neg_tcont]
		multregmodel.inputs.regressors = dict(reg1=list(EV_rpe_design_df['feedback']),reg2=list(EV_rpe_design_df['rpe_demeaned']))

		FE=Node(interface=fsl.FLAMEO(
			run_mode='fe',
			mask_file=maskfile),
			name='FE',
			stats_dir=os.path.join(Parkflow_rpe.base_dir,'stats'))

		Parkflow_rpe.connect([(copemerge,FE,[('merged_file','cope_file')]),
				(varcopemerge,FE,[('merged_file','var_cope_file')]),
				(multregmodel,FE,[('design_mat','design_file'),
								('design_con','t_con_file'),
								('design_grp','cov_split_file')]),
				])

		Parkflow_rpe.write_graph(graph2use='colored')
		Parkflow_rpe.run()

示例#5
0
def fsl_higher_level_wf(
    output_dir,
    work_dir,
    step,
    database_path,
    smoothing_fwhm=None,
    smoothing_type=None,
    align_volumes=None,
    smoothing_level=None,
    name="fsl_higher_level_wf",
):
    """
    Produce a second level (across runs) workflow for a given subject.

    This workflow generates processes functional_data across a
    single session (read: between runs) and computes
    effects, variances, residuals and statmaps
    using FSLs FLAME0 given information in the bids model file
    """
    workflow = pe.Workflow(name=name)
    workflow.base_dir = work_dir
    workflow.desc = ""

    # layout = BIDSLayout.load(database_path)
    level = step["Level"]

    image_pattern = ("[sub-{subject}/][ses-{session}/]"
                     "[sub-{subject}_][ses-{session}_]task-{task}_"
                     "[acq-{acquisition}_][rec-{reconstruction}_]"
                     "[echo-{echo}_][space-{space}_]contrast-{contrast}_"
                     "stat-{stat<effect|variance|z|p|t|F>}_statmap.nii.gz")

    wrangle_inputs = pe.Node(
        IdentityInterface(fields=["contrast_metadata", "contrast_maps"]),
        name=f"wrangle_{level}_inputs",
    )

    get_info = pe.Node(
        GenerateHigherInfo(
            model=step,
            database_path=database_path,
            align_volumes=align_volumes,
        ),
        name=f"get_{level}_info",
    )

    if smoothing_level == "l2":
        smoothing_fwhm
        smoothing_type
        pass

    estimate_model = pe.MapNode(
        fsl.FLAMEO(output_type="NIFTI_GZ", run_mode="fe"),
        iterfield=[
            "design_file",
            "t_con_file",
            "mask_file",
            "cov_split_file",
            "dof_var_cope_file",
            "var_cope_file",
            "cope_file",
        ],
        name=f"model_{level}_estimate",
    )

    calculate_p = pe.MapNode(
        fsl.ImageMaths(output_type="NIFTI_GZ",
                       op_string="-ztop",
                       suffix="_pval"),
        iterfield=["in_file"],
        name=f"model_{level}_caculate_p",
    )

    collate = pe.Node(
        MergeAll(
            fields=[
                "effect_maps",
                "variance_maps",
                "zscore_maps",
                "pvalue_maps",
                "tstat_maps",
                "contrast_metadata",
            ],
            check_lengths=False,
        ),
        name=f"collate_{level}_level",
    )

    collate_outputs = pe.Node(
        CollateWithMetadata(
            fields=[
                "effect_maps", "variance_maps", "zscore_maps", "pvalue_maps",
                "tstat_maps"
            ],
            field_to_metadata_map={
                "effect_maps": {
                    "stat": "effect"
                },
                "variance_maps": {
                    "stat": "variance"
                },
                "zscore_maps": {
                    "stat": "z"
                },
                "pvalue_maps": {
                    "stat": "p"
                },
                "tstat_maps": {
                    "stat": "t"
                },
            },
        ),
        name=f"collate_{level}_outputs",
    )

    ds_contrast_maps = pe.MapNode(
        BIDSDataSink(base_directory=output_dir, path_patterns=image_pattern),
        iterfield=["entities", "in_file"],
        run_without_submitting=True,
        name=f"ds_{level}_contrast_maps",
    )

    wrangle_outputs = pe.Node(
        IdentityInterface(
            fields=["contrast_metadata", "contrast_maps", "brain_mask"]),
        name=f"wrangle_{level}_outputs",
    )

    workflow.connect([
        (
            wrangle_inputs,
            get_info,
            [("contrast_metadata", "contrast_metadata"),
             ("contrast_maps", "contrast_maps")],
        ),
        (
            get_info,
            estimate_model,
            [
                ("design_matrices", "design_file"),
                ("contrast_matrices", "t_con_file"),
                ("covariance_matrices", "cov_split_file"),
                ("dof_maps", "dof_var_cope_file"),
                ("variance_maps", "var_cope_file"),
                ("effect_maps", "cope_file"),
                ("brain_mask", "mask_file"),
            ],
        ),
        (estimate_model, calculate_p, [("zstats", "in_file")]),
        (
            estimate_model,
            collate,
            [
                ("copes", "effect_maps"),
                ("var_copes", "variance_maps"),
                ("zstats", "zscore_maps"),
                ("tstats", "tstat_maps"),
            ],
        ),
        (calculate_p, collate, [("out_file", "pvalue_maps")]),
        (get_info, collate, [("contrast_metadata", "contrast_metadata")]),
        (
            collate,
            collate_outputs,
            [
                ("effect_maps", "effect_maps"),
                ("variance_maps", "variance_maps"),
                ("zscore_maps", "zscore_maps"),
                ("pvalue_maps", "pvalue_maps"),
                ("tstat_maps", "tstat_maps"),
                ("contrast_metadata", "metadata"),
            ],
        ),
        (
            collate_outputs,
            ds_contrast_maps,
            [("out", "in_file"), ("metadata", "entities")],
        ),
        (
            collate_outputs,
            wrangle_outputs,
            [("metadata", "contrast_metadata"), ("out", "contrast_maps")],
        ),
    ])

    return workflow
# Master Node
fixed_fx = pe.Workflow(name='fixedfx')

#merge the copes and varcopes for each condition
copemerge = pe.MapNode(interface=fsl.Merge(dimension='t'),
                       iterfield=['in_files'],
                       name="copemerge")
varcopemerge = pe.MapNode(interface=fsl.Merge(dimension='t'),
                          iterfield=['in_files'],
                          name="varcopemerge")

#level 2 model design files (there's one for each condition of each subject)
level2model = pe.Node(interface=fsl.L2Model(), name='l2model')

#estimate a second level model
flameo = pe.MapNode(interface=fsl.FLAMEO(run_mode='fe', mask_file=mniMask),
                    name="flameo",
                    iterfield=['cope_file', 'var_cope_file'])
'''
Connections
'''
fixed_fx.connect([
    (copemerge, flameo, [('merged_file', 'cope_file')]),
    (varcopemerge, flameo, [('merged_file', 'var_cope_file')]),
    (level2model, flameo, [('design_mat', 'design_file'),
                           ('design_con', 't_con_file'),
                           ('design_grp', 'cov_split_file')]),
])
"""
=======================
Within-Subject workflow
示例#7
0
def create_fixed_effects_flow(name='fixedfx'):
    """Create a fixed-effects workflow

    This workflow is used to combine registered copes and varcopes across runs
    for an individual subject

    Example
    -------

    >>> fixedfx = create_fixed_effects_flow()
    >>> fixedfx.base_dir = '.'
    >>> fixedfx.inputs.inputspec.copes = [['cope1run1.nii.gz', 'cope1run2.nii.gz'], ['cope2run1.nii.gz', 'cope2run2.nii.gz']] # per contrast
    >>> fixedfx.inputs.inputspec.varcopes = [['varcope1run1.nii.gz', 'varcope1run2.nii.gz'], ['varcope2run1.nii.gz', 'varcope2run2.nii.gz']] # per contrast
    >>> fixedfx.inputs.inputspec.dof_files = ['dofrun1', 'dofrun2'] # per run
    >>> fixedfx.run() #doctest: +SKIP

    Inputs::

         inputspec.copes : list of list of cope files (one list per contrast)
         inputspec.varcopes : list of list of varcope files (one list per
                              contrast)
         inputspec.dof_files : degrees of freedom files for each run

    Outputs::

         outputspec.res4d : 4d residual time series
         outputspec.copes : contrast parameter estimates
         outputspec.varcopes : variance of contrast parameter estimates
         outputspec.zstats : z statistics of contrasts
         outputspec.tstats : t statistics of contrasts
    """

    fixed_fx = pe.Workflow(name=name)

    inputspec = pe.Node(util.IdentityInterface(fields=['copes',
                                                       'varcopes',
                                                       'dof_files'
                                                       ]),
                        name='inputspec')

    """
    Use :class:`nipype.interfaces.fsl.Merge` to merge the copes and
    varcopes for each condition
    """

    copemerge = pe.MapNode(interface=fsl.Merge(dimension='t'),
                           iterfield=['in_files'],
                           name="copemerge")

    varcopemerge = pe.MapNode(interface=fsl.Merge(dimension='t'),
                           iterfield=['in_files'],
                           name="varcopemerge")

    """
    Use :class:`nipype.interfaces.fsl.L2Model` to generate subject and condition
    specific level 2 model design files
    """

    level2model = pe.Node(interface=fsl.L2Model(),
                          name='l2model')

    """
    Use :class:`nipype.interfaces.fsl.FLAMEO` to estimate a second level model
    """

    flameo = pe.MapNode(interface=fsl.FLAMEO(run_mode='fe'), name="flameo",
                        iterfield=['cope_file', 'var_cope_file'])

    def get_dofvolumes(dof_files, cope_files):
        import os
        import nibabel as nb
        import numpy as np
        img = nb.load(cope_files[0])
        out_data = np.zeros(img.get_shape())
        for i in range(out_data.shape[-1]):
            dof = np.loadtxt(dof_files[i])
            out_data[:, :, :, i] = dof
        filename = os.path.join(os.getcwd(), 'dof_file.nii.gz')
        newimg = nb.Nifti1Image(out_data, None, img.get_header())
        newimg.to_filename(filename)
        return filename

    gendof = pe.Node(util.Function(input_names=['dof_files', 'cope_files'],
                                   output_names=['dof_volume'],
                                   function=get_dofvolumes),
                     name='gendofvolume')

    outputspec = pe.Node(util.IdentityInterface(fields=['res4d',
                                                        'copes', 'varcopes',
                                                        'zstats', 'tstats']),
                         name='outputspec')

    fixed_fx.connect([(inputspec, copemerge, [('copes', 'in_files')]),
                      (inputspec, varcopemerge, [('varcopes', 'in_files')]),
                      (inputspec, gendof, [('dof_files', 'dof_files')]),
                      (copemerge, gendof, [('merged_file', 'cope_files')]),
                      (copemerge, flameo, [('merged_file', 'cope_file')]),
                      (varcopemerge, flameo, [('merged_file',
                                               'var_cope_file')]),
                      (level2model, flameo, [('design_mat', 'design_file'),
                                            ('design_con', 't_con_file'),
                                            ('design_grp', 'cov_split_file')]),
                      (gendof, flameo, [('dof_volume', 'dof_var_cope_file')]),
                      (flameo, outputspec, [('res4d', 'res4d'),
                                            ('copes', 'copes'),
                                            ('var_copes', 'varcopes'),
                                            ('zstats', 'zstats'),
                                            ('tstats', 'tstats')
                                            ])
                      ])
    return fixed_fx
示例#8
0
def fsl_glm(con_maps,
            se_maps,
            sample_sizes,
            mask,
            inference,
            cdt=0.01,
            q=0.05,
            work_dir='fsl_glm',
            two_sided=True):
    """
    Run a GLM with FSL.
    """
    assert con_maps.shape == se_maps.shape
    assert con_maps.shape[0] == sample_sizes.shape[0]

    if inference == 'mfx':
        run_mode = 'flame1'
    elif inference == 'ffx':
        run_mode = 'fe'
    else:
        raise ValueError('Input "inference" must be "mfx" or "ffx".')

    if 0 < cdt < 1:
        cdt_z = p_to_z(cdt, tail='two')
    else:
        cdt_z = cdt

    work_dir = op.abspath(work_dir)
    if op.isdir(work_dir):
        raise ValueError('Working directory already '
                         'exists: "{0}"'.format(work_dir))

    mkdir(work_dir)
    cope_file = op.join(work_dir, 'cope.nii.gz')
    varcope_file = op.join(work_dir, 'varcope.nii.gz')
    mask_file = op.join(work_dir, 'mask.nii.gz')
    design_file = op.join(work_dir, 'design.mat')
    tcon_file = op.join(work_dir, 'design.con')
    cov_split_file = op.join(work_dir, 'cov_split.mat')
    dof_file = op.join(work_dir, 'dof.nii.gz')

    dofs = (np.array(sample_sizes) - 1).astype(str)

    con_maps[np.isnan(con_maps)] = 0
    cope_4d_img = unmask(con_maps, mask)
    se_maps[np.isnan(se_maps)] = 0
    se_maps = se_maps**2  # square SE to get var
    varcope_4d_img = unmask(se_maps, mask)
    dof_maps = np.ones(con_maps.shape)
    for i in range(len(dofs)):
        dof_maps[i, :] = dofs[i]
    dof_4d_img = unmask(dof_maps, mask)

    # Covariance splitting file
    cov_data = [
        '/NumWaves\t1', '/NumPoints\t{0}'.format(con_maps.shape[0]), '',
        '/Matrix'
    ]
    cov_data += ['1'] * con_maps.shape[0]
    with open(cov_split_file, 'w') as fo:
        fo.write('\n'.join(cov_data))

    # T contrast file
    tcon_data = [
        '/ContrastName1 MFX-GLM', '/NumWaves\t1', '/NumPoints\t1', '',
        '/Matrix', '1'
    ]
    with open(tcon_file, 'w') as fo:
        fo.write('\n'.join(tcon_data))

    cope_4d_img.to_filename(cope_file)
    varcope_4d_img.to_filename(varcope_file)
    dof_4d_img.to_filename(dof_file)
    mask.to_filename(mask_file)

    design_matrix = [
        '/NumWaves\t1', '/NumPoints\t{0}'.format(con_maps.shape[0]),
        '/PPheights\t1', '', '/Matrix'
    ]
    design_matrix += ['1'] * con_maps.shape[0]
    with open(design_file, 'w') as fo:
        fo.write('\n'.join(design_matrix))

    flameo = fsl.FLAMEO()
    flameo.inputs.cope_file = cope_file
    flameo.inputs.var_cope_file = varcope_file
    flameo.inputs.cov_split_file = cov_split_file
    flameo.inputs.design_file = design_file
    flameo.inputs.t_con_file = tcon_file
    flameo.inputs.mask_file = mask_file
    flameo.inputs.run_mode = run_mode
    flameo.inputs.dof_var_cope_file = dof_file
    res = flameo.run()

    temp_img = nib.load(res.outputs.zstats)
    temp_img = nib.Nifti1Image(temp_img.get_data() * -1, temp_img.affine)
    temp_img.to_filename(op.join(work_dir, 'temp_zstat2.nii.gz'))

    temp_img2 = nib.load(res.outputs.copes)
    temp_img2 = nib.Nifti1Image(temp_img2.get_data() * -1, temp_img2.affine)
    temp_img2.to_filename(op.join(work_dir, 'temp_copes2.nii.gz'))

    # FWE correction
    # Estimate smoothness
    est = fsl.model.SmoothEstimate()
    est.inputs.dof = con_maps.shape[0] - 1
    est.inputs.mask_file = mask_file
    est.inputs.residual_fit_file = res.outputs.res4d
    est_res = est.run()

    # Positive clusters
    cl = fsl.model.Cluster()
    cl.inputs.threshold = cdt_z
    cl.inputs.pthreshold = q
    cl.inputs.in_file = res.outputs.zstats
    cl.inputs.cope_file = res.outputs.copes
    cl.inputs.use_mm = True
    cl.inputs.find_min = False
    cl.inputs.dlh = est_res.outputs.dlh
    cl.inputs.volume = est_res.outputs.volume
    cl.inputs.out_threshold_file = op.join(work_dir, 'thresh_zstat1.nii.gz')
    cl.inputs.connectivity = 26
    cl.inputs.out_localmax_txt_file = op.join(work_dir, 'lmax_zstat1_tal.txt')
    cl_res = cl.run()

    out_cope_img = nib.load(res.outputs.copes)
    out_t_img = nib.load(res.outputs.tstats)
    out_z_img = nib.load(res.outputs.zstats)
    out_cope_map = apply_mask(out_cope_img, mask)
    out_t_map = apply_mask(out_t_img, mask)
    out_z_map = apply_mask(out_z_img, mask)
    pos_z_map = apply_mask(nib.load(cl_res.outputs.threshold_file), mask)

    if two_sided:
        # Negative clusters
        cl2 = fsl.model.Cluster()
        cl2.inputs.threshold = cdt_z
        cl2.inputs.pthreshold = q
        cl2.inputs.in_file = op.join(work_dir, 'temp_zstat2.nii.gz')
        cl2.inputs.cope_file = op.join(work_dir, 'temp_copes2.nii.gz')
        cl2.inputs.use_mm = True
        cl2.inputs.find_min = False
        cl2.inputs.dlh = est_res.outputs.dlh
        cl2.inputs.volume = est_res.outputs.volume
        cl2.inputs.out_threshold_file = op.join(work_dir,
                                                'thresh_zstat2.nii.gz')
        cl2.inputs.connectivity = 26
        cl2.inputs.out_localmax_txt_file = op.join(work_dir,
                                                   'lmax_zstat2_tal.txt')
        cl2_res = cl2.run()

        neg_z_map = apply_mask(nib.load(cl2_res.outputs.threshold_file), mask)
        thresh_z_map = pos_z_map - neg_z_map
    else:
        thresh_z_map = pos_z_map

    LGR.info('Cleaning up...')
    rmtree(work_dir)
    rmtree(res.outputs.stats_dir)

    # Compile outputs
    out_p_map = stats.norm.sf(abs(out_z_map)) * 2
    log_p_map = -np.log10(out_p_map)
    images = {
        'cope': out_cope_map,
        'z': out_z_map,
        'thresh_z': thresh_z_map,
        't': out_t_map,
        'p': out_p_map,
        'log_p': log_p_map
    }
    return images
# -*- coding: utf-8 -*-
"""
Created on Mon May 12 17:04:43 2014

@author: Dalton
"""


from nipype.interfaces.fsl import L2Model
model = L2Model(num_copes=3) # 3 sessions
model.run()

from nipype.interfaces import fsl
import os
flameo = fsl.FLAMEO()
                             
flameo.inputs.cope_file='/Users/Dalton/Documents/FSL/ValuePilotTestingL2/FFX/710/copes/_subject_id_710/_conestimate0/cope1.nii.gz'
flameo.inputs.var_cope_file='/Users/Dalton/Documents/FSL/ValuePilotTestingL2/FFX/710/varcopes/_subject_id_710/_conestimate0/varcope1.nii.gz'
#flameo.inputs.cov_split_file='cov_split.mat'
flameo.inputs.design_file='/Users/Dalton/Documents/FSL/ValuePilotTestingL2/designfiles/design.mat'
#flameo.inputs.t_con_file='design.con'
#flameo.inputs.mask_file='mask.nii'
#flameo.inputs.run_mode='fe'

flameo.run()
示例#10
0
            varcopes = [
                os.path.join(x, 'stats', '%s%i.nii.gz' % ('varcope', contrast))
                for x in featdirs
            ]

            # define nodes

            copemerge = Node(interface=fsl.Merge(dimension='t',
                                                 in_files=copes),
                             name='copemerge')
            varcopemerge = Node(interface=fsl.Merge(dimension='t',
                                                    in_files=varcopes),
                                name='varcopemerge')
            level2model = Node(interface=fsl.L2Model(num_copes=len(copes)),
                               name='l2model')
            OLS = Node(interface=fsl.FLAMEO(run_mode='ols',
                                            mask_file=groupmaskfile),
                       name='OLS')

            # create workflow

            CNPgroup = Workflow(name='cnp_group')
            CNPgroup.base_dir = outcopedir
            CNPgroup.connect([
                (copemerge, OLS, [('merged_file', 'cope_file')]),
                (varcopemerge, OLS, [('merged_file', 'var_cope_file')]),
                (level2model, OLS, [('design_mat', 'design_file'),
                                    ('design_con', 't_con_file'),
                                    ('design_grp', 'cov_split_file')]),
            ])

            CNPgroup.write_graph(graph2use='colored')
    varcopemerge = Node(interface=fsl.Merge(dimension='t', in_files=varcopes),
                        name='varcopemerge')

    multregmodel = Node(interface=fsl.MultipleRegressDesign(contrasts=[],
                                                            regressors={}),
                        name='multregmodel')

    hcminpd_tcont = ['hc-pd' + hc_v_onoff, 'T', ['reg1', 'reg2'], [1, -1]]
    pdminhc_tcont = ['pd' + hc_v_onoff + '-hc', 'T', ['reg1', 'reg2'], [-1, 1]]

    multregmodel.inputs.contrasts = [hcminpd_tcont, pdminhc_tcont]
    multregmodel.inputs.regressors = dict(reg1=HC_EV, reg2=PD_EV)
    multregmodel.inputs.groups = group_list

    flame12 = Node(interface=fsl.FLAMEO(run_mode='flame12',
                                        mask_file=groupmaskfile,
                                        infer_outliers=True),
                   name='flame12',
                   stats_dir=os.path.join(Parkflow_group_rpe.base_dir,
                                          'stats'))

    # Use level2model if not using covariate
    Parkflow_group_rpe.connect([
        (copemerge, flame12, [('merged_file', 'cope_file')]),
        (varcopemerge, flame12, [('merged_file', 'var_cope_file')]),
        (multregmodel, flame12, [('design_mat', 'design_file'),
                                 ('design_con', 't_con_file'),
                                 ('design_grp', 'cov_split_file')]),
    ])

    Parkflow_group_rpe.write_graph(graph2use='colored')
示例#12
0
    Parkflow_group_rpe = Workflow(name='workflow')
    Parkflow_group_rpe.base_dir = os.path.join(group_dir, "feedback_RPE",
                                               "cope" + str(contrast))

    if not os.path.exists(Parkflow_group_rpe.base_dir):
        os.makedirs(Parkflow_group_rpe.base_dir)

    # Create nodes

    copemerge = Node(interface=fsl.Merge(dimension='t', in_files=copes),
                     name='copemerge')
    varcopemerge = Node(interface=fsl.Merge(dimension='t', in_files=varcopes),
                        name='varcopemerge')
    level2model = Node(interface=fsl.L2Model(num_copes=len(copes)),
                       name='l2model')
    flame12 = Node(interface=fsl.FLAMEO(run_mode='flame12',
                                        mask_file=groupmaskfile),
                   infer_outliers=True,
                   name='flame12',
                   stats_dir=os.path.join(Parkflow_group_rpe.base_dir,
                                          'stats'))

    Parkflow_group_rpe.connect([
        (copemerge, flame12, [('merged_file', 'cope_file')]),
        (varcopemerge, flame12, [('merged_file', 'var_cope_file')]),
        (level2model, flame12, [('design_mat', 'design_file'),
                                ('design_con', 't_con_file'),
                                ('design_grp', 'cov_split_file')]),
    ])

    Parkflow_group_rpe.write_graph(graph2use='colored')
    Parkflow_group_rpe.run()
示例#13
0
文件: glm.py 项目: setina42/SAMRI
def l2_common_effect(l1_dir,
	groupby="session",
	keep_work=False,
	loud=False,
	tr=1,
	nprocs=6,
	mask="/usr/share/mouse-brain-atlases/dsurqec_200micron_mask.nii",
	match={},
	n_jobs_percentage=1,
	out_base="",
	subjects=[],
	sessions=[],
	tasks=[],
	exclude={},
	include={},
	workflow_name="generic",
	debug=False,
	target_set=[],
	):
	"""Determine the common effect in a sample of 3D feature maps.

	Parameters
	----------

	n_jobs_percentage : float, optional
		Percentage of the cores present on the machine which to maximally use for deploying jobs in parallel.
	"""

	from samri.pipelines.utils import bids_data_selection

	l1_dir = path.abspath(path.expanduser(l1_dir))
	out_base = path.abspath(path.expanduser(out_base))
	mask=path.abspath(path.expanduser(mask))

	data_selection = bids_data_selection(l1_dir,
		structural_match=False,
		functional_match=match,
		subjects=False,
		sessions=False,
		verbose=True,
		)
	ind = data_selection.index.tolist()

	out_dir = path.join(out_base,workflow_name)
	workdir_name = workflow_name+'_work'
	workdir = path.join(out_base,workdir_name)
	if not os.path.exists(workdir):
		os.makedirs(workdir)

	data_selection = data_selection.sort_values(['session', 'subject'], ascending=[1, 1])
	if exclude:
		for key in exclude:
			data_selection = data_selection[~data_selection[key].isin(exclude[key])]
	if include:
		for key in include:
			data_selection = data_selection[data_selection[key].isin(include[key])]
	data_selection.to_csv(path.join(workdir,'data_selection.csv'))

	copemerge = pe.Node(interface=fsl.Merge(dimension='t'),name="copemerge")
	varcopemerge = pe.Node(interface=fsl.Merge(dimension='t'),name="varcopemerge")

	level2model = pe.Node(interface=fsl.L2Model(),name='level2model')

	flameo = pe.Node(interface=fsl.FLAMEO(), name="flameo")
	flameo.inputs.mask_file = mask
	flameo.inputs.run_mode = "ols"

	datasink = pe.Node(nio.DataSink(), name='datasink')
	datasink.inputs.base_directory = out_dir
	datasink_substitutions = [('_iterable_', '')]

	if groupby == "subject_set":
		datasink_substitutions.extend([('subject', 'sub-')])
		common_fields = ''
		common_fields += 'acq-'+data_selection.acq.drop_duplicates().item()
		try:
			common_fields += '_run-'+data_selection.run.drop_duplicates().item()
		except ValueError:
			pass

		infosource = pe.Node(interface=util.IdentityInterface(fields=['iterable']), name="infosource")
		infosource.iterables = [('iterable', target_set)]

		copes = pe.Node(name='copes', interface=util.Function(function=select_from_datafind_df, input_names=inspect.getargspec(select_from_datafind_df)[0], output_names=['selection']))
		copes.inputs.bids_dictionary_override = {'modality':'cope'}
		copes.inputs.df = data_selection
		copes.inputs.list_output = True

		varcopes = pe.Node(name='varcopes', interface=util.Function(function=select_from_datafind_df, input_names=inspect.getargspec(select_from_datafind_df)[0], output_names=['selection']))
		varcopes.inputs.bids_dictionary_override = {'modality':'varcb'}
		varcopes.inputs.df = data_selection
		varcopes.inputs.list_output = True

		workflow_connections = [
			(infosource, copes, [('iterable', 'bids_dictionary')]),
			(infosource, varcopes, [('iterable', 'bids_dictionary')]),
			(infosource, copemerge, [(('iterable',dict_and_suffix,"subject","_cope.nii.gz"), 'merged_file')]),
			(infosource, varcopemerge, [(('iterable',dict_and_suffix,"subject","_varcb.nii.gz"), 'merged_file')]),
			]
	if groupby == "subject":
		datasink_substitutions.extend([('subject', 'sub-')])
		common_fields = ''
		common_fields += 'acq-'+data_selection.acq.drop_duplicates().item()
		try:
			common_fields += '_run-'+data_selection.run.drop_duplicates().item()
		except ValueError:
			pass

		subjects = data_selection[['subject']].drop_duplicates()
		# TODO: could not find a better way to convert pandas df column into list of dicts
		subjects_ = subjects.T.to_dict()
		subjects = [subjects_[i] for i in subjects_.keys()]

		infosource = pe.Node(interface=util.IdentityInterface(fields=['iterable']), name="infosource")
		infosource.iterables = [('iterable', subjects)]

		copes = pe.Node(name='copes', interface=util.Function(function=select_from_datafind_df, input_names=inspect.getargspec(select_from_datafind_df)[0], output_names=['selection']))
		copes.inputs.bids_dictionary_override = {'modality':'cope'}
		copes.inputs.df = data_selection
		copes.inputs.list_output = True

		varcopes = pe.Node(name='varcopes', interface=util.Function(function=select_from_datafind_df, input_names=inspect.getargspec(select_from_datafind_df)[0], output_names=['selection']))
		varcopes.inputs.bids_dictionary_override = {'modality':'varcb'}
		varcopes.inputs.df = data_selection
		varcopes.inputs.list_output = True

		workflow_connections = [
			(infosource, copes, [('iterable', 'bids_dictionary')]),
			(infosource, varcopes, [('iterable', 'bids_dictionary')]),
			(infosource, copemerge, [(('iterable',dict_and_suffix,"subject","_cope.nii.gz"), 'merged_file')]),
			(infosource, varcopemerge, [(('iterable',dict_and_suffix,"subject","_varcb.nii.gz"), 'merged_file')]),
			]
	elif groupby == "subject_task":
		#does not currently work, due to missing iterator combinations (same issue as preprocessing)
		merge = pe.Node(interface=util.Merge(2), name="merge")
		infosource = pe.Node(interface=util.IdentityInterface(fields=['subject','task']), name="infosource")
		infosource.iterables = [('subject', subjects),('task', tasks)]
		datasource = pe.Node(interface=nio.DataGrabber(infields=["subject","task",], outfields=["copes", "varcbs"]), name="datasource")
		datasource.inputs.template_args = dict(
			copes=[["subject","subject","task",]],
			varcbs=[["subject","subject","task",]]
			)
		datasource.inputs.field_template = dict(
			copes="sub-%s/ses-*/sub-%s_ses-*_task-%s_cope.nii.gz",
			varcbs="sub-%s/ses-*/sub-%s_ses-*_task-%s_varcb.nii.gz",
			)
		workflow_connections = [
			(infosource, datasource, [('subject', 'subject'),('task','task')]),
			(infosource, merge, [('subject', 'in1'),('task','in2')]),
			(merge, copemerge, [(('out',add_suffix,"_cope.nii.gz"), 'merged_file')]),
			(merge, varcopemerge, [(('out',add_suffix,"_varcb.nii.gz"), 'merged_file')]),
			]
	elif groupby == "session":
		datasink_substitutions.extend([('session', 'ses-')])
		common_fields = ''
		common_fields += 'acq-'+data_selection.acq.drop_duplicates().item()
		try:
			common_fields += '_run-'+data_selection.run.drop_duplicates().item()
		except ValueError:
			pass

		sessions = data_selection[['session']].drop_duplicates()
		# TODO: could not find a better way to convert pandas df column into list of dicts
		sessions_ = sessions.T.to_dict()
		sessions = [sessions_[i] for i in sessions_.keys()]

		infosource = pe.Node(interface=util.IdentityInterface(fields=['iterable']), name="infosource")
		infosource.iterables = [('iterable', sessions)]

		copes = pe.Node(name='copes', interface=util.Function(function=select_from_datafind_df, input_names=inspect.getargspec(select_from_datafind_df)[0], output_names=['selection']))
		copes.inputs.bids_dictionary_override = {'modality':'cope'}
		copes.inputs.df = data_selection
		copes.inputs.list_output = True

		varcopes = pe.Node(name='varcopes', interface=util.Function(function=select_from_datafind_df, input_names=inspect.getargspec(select_from_datafind_df)[0], output_names=['selection']))
		varcopes.inputs.bids_dictionary_override = {'modality':'varcb'}
		varcopes.inputs.df = data_selection
		varcopes.inputs.list_output = True

		workflow_connections = [
			(infosource, copes, [('iterable', 'bids_dictionary')]),
			(infosource, varcopes, [('iterable', 'bids_dictionary')]),
			(infosource, copemerge, [(('iterable',dict_and_suffix,"session","_cope.nii.gz"), 'merged_file')]),
			(infosource, varcopemerge, [(('iterable',dict_and_suffix,"session","_varcb.nii.gz"), 'merged_file')]),
			]
	elif groupby == "task":
		datasink_substitutions.extend([('task', 'task-')])
		common_fields = ''
		common_fields += 'acq-'+data_selection.acq.drop_duplicates().item()
		try:
			common_fields += '_run-'+data_selection.run.drop_duplicates().item()
		except ValueError:
			pass
		try:
			common_fields += '_ses-'+data_selection.session.drop_duplicates().item()
		except ValueError:
			pass

		iters = data_selection[['task']].drop_duplicates()
		# TODO: could not find a better way to convert pandas df column into list of dicts
		iters_ = iters.T.to_dict()
		iters = [iters_[i] for i in iters_.keys()]

		infosource = pe.Node(interface=util.IdentityInterface(fields=['iterable']), name="infosource")
		infosource.iterables = [('iterable', iters)]

		copes = pe.Node(name='copes', interface=util.Function(function=select_from_datafind_df, input_names=inspect.getargspec(select_from_datafind_df)[0], output_names=['selection']))
		copes.inputs.bids_dictionary_override = {'modality':'cope'}
		copes.inputs.df = data_selection
		copes.inputs.list_output = True

		varcopes = pe.Node(name='varcopes', interface=util.Function(function=select_from_datafind_df, input_names=inspect.getargspec(select_from_datafind_df)[0], output_names=['selection']))
		varcopes.inputs.bids_dictionary_override = {'modality':'varcb'}
		varcopes.inputs.df = data_selection
		varcopes.inputs.list_output = True

		workflow_connections = [
			(infosource, copes, [('iterable', 'bids_dictionary')]),
			(infosource, varcopes, [('iterable', 'bids_dictionary')]),
			(infosource, copemerge, [(('iterable',dict_and_suffix,"task","_cope.nii.gz"), 'merged_file')]),
			(infosource, varcopemerge, [(('iterable',dict_and_suffix,"task","_varcb.nii.gz"), 'merged_file')]),
			]
	elif groupby == "none":
		common_fields = ''
		common_fields += 'acq-'+data_selection.acq.drop_duplicates().item()
		common_fields += '_run-'+data_selection.run.drop_duplicates().item()

		datasink_substitutions.extend([('cope1.nii.gz', common_fields+'_'+'cope.nii.gz')])
		datasink_substitutions.extend([('tstat1.nii.gz', common_fields+'_'+'tstat.nii.gz')])
		datasink_substitutions.extend([('zstat1.nii.gz', common_fields+'_'+'zstat.nii.gz')])
		datasink.inputs.substitutions = datasink_substitutions

		copes = pe.Node(name='copes', interface=util.Function(function=select_from_datafind_df, input_names=inspect.getargspec(select_from_datafind_df)[0], output_names=['selection']))
		copes.inputs.bids_dictionary_override = {'modality':'cope'}
		copes.inputs.df = data_selection
		copes.inputs.list_output = True

		varcopes = pe.Node(name='varcopes', interface=util.Function(function=select_from_datafind_df, input_names=inspect.getargspec(select_from_datafind_df)[0], output_names=['selection']))
		varcopes.inputs.bids_dictionary_override = {'modality':'varcb'}
		varcopes.inputs.df = data_selection
		varcopes.inputs.list_output = True

		copemerge.inputs.merged_file = 'cope.nii.gz'
		varcopemerge.inputs.merged_file = 'varcb.nii.gz'

		workflow_connections = []

	elif groupby == "mtask":
		infosource = pe.Node(interface=util.IdentityInterface(fields=['iterable']), name="infosource")
		infosource.iterables = [('iterable', tasks)]
		datasource = pe.Node(interface=nio.DataGrabber(infields=["group",], outfields=["copes", "varcbs"]), name="datasource")
		datasource.inputs.template_args = dict(
			copes=[['group']],
			varcbs=[['group']]
			)
		datasource.inputs.field_template = dict(
			copes="sub-*/ses-*/sub-*_ses-*_task-%s_cope.nii.gz ",
			varcbs="sub-*/ses-*/sub-*_ses-*_task-%s_varcb.nii.gz ",
			)
		workflow_connections = [
			(infosource, datasource, [('iterable', 'group')]),
			(infosource, copemerge, [(('iterable',add_suffix,"_cope.nii.gz"), 'merged_file')]),
			(infosource, varcopemerge, [(('iterable',add_suffix,"_varcb.nii.gz"), 'merged_file')]),
			]

	datasink_substitutions.extend([('cope1.nii.gz', common_fields+'_'+'cope.nii.gz')])
	datasink_substitutions.extend([('tstat1.nii.gz', common_fields+'_'+'tstat.nii.gz')])
	datasink_substitutions.extend([('zstat1.nii.gz', common_fields+'_'+'zstat.nii.gz')])
	datasink.inputs.substitutions = datasink_substitutions

	workflow_connections.extend([
		(copes, copemerge, [('selection', 'in_files')]),
		(varcopes, varcopemerge, [('selection', 'in_files')]),
		(varcopes, level2model, [(('selection',mylen), 'num_copes')]),
		(copemerge,flameo,[('merged_file','cope_file')]),
		(varcopemerge,flameo,[('merged_file','var_cope_file')]),
		(level2model,flameo, [('design_mat','design_file')]),
		(level2model,flameo, [('design_grp','cov_split_file')]),
		(level2model,flameo, [('design_con','t_con_file')]),
		(flameo, datasink, [('copes', '@copes')]),
		(flameo, datasink, [('fstats', '@fstats')]),
		(flameo, datasink, [('tstats', '@tstats')]),
		(flameo, datasink, [('zstats', '@zstats')]),
		])

	workflow_config = {'execution': {'crashdump_dir': path.join(out_base,'crashdump'),}}
	if debug:
		workflow_config['logging'] = {
			'workflow_level':'DEBUG',
			'utils_level':'DEBUG',
			'interface_level':'DEBUG',
			'filemanip_level':'DEBUG',
			'log_to_file':'true',
			}

	workflow = pe.Workflow(name=workdir_name)
	workflow.connect(workflow_connections)
	workflow.base_dir = out_base
	workflow.config = workflow_config
	workflow.write_graph(dotfilename=path.join(workflow.base_dir,workdir_name,"graph.dot"), graph2use="hierarchical", format="png")

	n_jobs = max(int(round(mp.cpu_count()*n_jobs_percentage)),2)
	if not loud:
		try:
			workflow.run(plugin="MultiProc", plugin_args={'n_procs' : nprocs})
		except RuntimeError:
			print("WARNING: Some expected tasks have not been found (or another RuntimeError has occured).")
		for f in listdir(getcwd()):
			if re.search("crash.*?-varcopemerge|-copemerge.*", f):
				remove(path.join(getcwd(), f))
	else:
		workflow.run(plugin="MultiProc", plugin_args={'n_procs' : n_jobs})
	if not keep_work:
		shutil.rmtree(path.join(out_base,workdir_name))
示例#14
0
文件: ibma.py 项目: Z-Yxin/NiMARE
def mfx_glm(con_maps, se_maps, sample_sizes, mask, cdt=0.01, q=0.05,
            work_dir='mfx_glm'):
    """
    Run a mixed-effects GLM on contrast and standard error images.

    Parameters
    ----------
    con_maps : (n_contrasts, n_voxels) :obj:`numpy.ndarray`
        A 2D array of contrast maps in the same space, after masking.
    var_maps : (n_contrasts, n_voxels) :obj:`numpy.ndarray`
        A 2D array of contrast standard error maps in the same space, after
        masking. Must match shape and order of ``con_maps``.
    sample_sizes : (n_contrasts,) :obj:`numpy.ndarray`
        A 1D array of sample sizes associated with contrasts in ``con_maps``
        and ``var_maps``. Must be in same order as rows in ``con_maps`` and
        ``var_maps``.
    mask : :obj:`nibabel.Nifti1Image`
        Mask image, used to unmask results maps in compiling output.
    equal_var : :obj:`bool`, optional
        Whether equal variance is assumed across contrasts. Default is True.
        False is not yet implemented.
    q : :obj:`float`, optional
        Alpha for multiple comparisons correction.
    corr : :obj:`str` or :obj:`None`, optional
        Multiple comparisons correction method to employ. May be None.

    Returns
    -------
    result : :obj:`nimare.meta.MetaResult`
        MetaResult object containing maps for test statistics, p-values, and
        negative log(p) values.

    TODO
    ----
    Step 1: Concatenate con_maps into 4D image and save to file in working
            directory
    Step 2: Repeat with var_maps
    Step 3: Write out mask image to file in working directory
    Step 4: Create design file
    Step 5: Create t contrast file
    Step 6: Create covariance split file
    Step 7: Create DOF file for varcopes
    Step 8: Run flameo with --runmode=flame1, setting --logdir to working
            directory
    """
    assert con_maps.shape == se_maps.shape
    assert con_maps.shape[0] == sample_sizes.shape[0]

    if 0 < cdt < 1:
        cdt_z = p_to_z(cdt, tail='two')
    else:
        cdt_z = cdt

    work_dir = op.abspath(work_dir)
    if op.isdir(work_dir):
        raise ValueError('Working directory already '
                         'exists: "{0}"'.format(work_dir))

    mkdir(work_dir)
    cope_file = op.join(work_dir, 'cope.nii.gz')
    varcope_file = op.join(work_dir, 'varcope.nii.gz')
    mask_file = op.join(work_dir, 'mask.nii.gz')
    design_file = op.join(work_dir, 'design.mat')
    tcon_file = op.join(work_dir, 'design.con')
    cov_split_file = op.join(work_dir, 'cov_split.mat')
    dof_file = op.join(work_dir, 'dof.nii.gz')

    dofs = (np.array(sample_sizes) - 1).astype(str)

    con_maps[np.isnan(con_maps)] = 0
    cope_4d_img = unmask(con_maps, mask)
    se_maps[np.isnan(se_maps)] = 0
    varcope_4d_img = unmask(se_maps, mask)
    dof_maps = np.ones(con_maps.shape)
    for i in range(len(dofs)):
        dof_maps[i, :] = dofs[i]
    dof_4d_img = unmask(dof_maps, mask)

    # Covariance splitting file
    cov_data = ['/NumWaves\t1',
                '/NumPoints\t{0}'.format(con_maps.shape[0]),
                '',
                '/Matrix']
    cov_data += ['1'] * con_maps.shape[0]
    with open(cov_split_file, 'w') as fo:
        fo.write('\n'.join(cov_data))

    # T contrast file
    tcon_data = ['/ContrastName1 MFX-GLM',
                 '/NumWaves\t1',
                 '/NumPoints\t1',
                 '',
                 '/Matrix',
                 '1']
    with open(tcon_file, 'w') as fo:
        fo.write('\n'.join(tcon_data))

    cope_4d_img.to_filename(cope_file)
    varcope_4d_img.to_filename(varcope_file)
    dof_4d_img.to_filename(dof_file)
    mask.to_filename(mask_file)

    design_matrix = ['/NumWaves\t1',
                     '/NumPoints\t{0}'.format(con_maps.shape[0]),
                     '/PPheights\t1',
                     '',
                     '/Matrix']
    design_matrix += ['1'] * con_maps.shape[0]
    with open(design_file, 'w') as fo:
        fo.write('\n'.join(design_matrix))

    flameo = fsl.FLAMEO()
    flameo.inputs.cope_file = cope_file
    flameo.inputs.var_cope_file = varcope_file
    flameo.inputs.cov_split_file = cov_split_file
    flameo.inputs.design_file = design_file
    flameo.inputs.t_con_file = tcon_file
    flameo.inputs.mask_file = mask_file
    flameo.inputs.run_mode = 'flame1'
    flameo.inputs.dof_var_cope_file = dof_file
    res = flameo.run()

    temp_img = nib.load(res.outputs.zstats)
    temp_img = nib.Nifti1Image(temp_img.get_data()*-1, temp_img.affine)
    temp_img.to_filename(op.join(work_dir, 'temp_zstat2.nii.gz'))

    temp_img2 = nib.load(res.outputs.copes)
    temp_img2 = nib.Nifti1Image(temp_img2.get_data()*-1, temp_img2.affine)
    temp_img2.to_filename(op.join(work_dir, 'temp_copes2.nii.gz'))

    # FWE correction
    # Estimate smoothness
    est = fsl.model.SmoothEstimate()
    est.inputs.dof = con_maps.shape[0] - 1
    est.inputs.mask_file = mask_file
    est.inputs.residual_fit_file = res.outputs.res4d
    est_res = est.run()

    # Positive clusters
    cl = fsl.model.Cluster()
    cl.inputs.threshold = cdt_z
    cl.inputs.pthreshold = q
    cl.inputs.in_file = res.outputs.zstats
    cl.inputs.cope_file = res.outputs.copes
    cl.inputs.use_mm = True
    cl.inputs.find_min = False
    cl.inputs.dlh = est_res.outputs.dlh
    cl.inputs.volume = est_res.outputs.volume
    cl.inputs.out_threshold_file = op.join(work_dir, 'thresh_zstat1.nii.gz')
    cl.inputs.connectivity = 26
    cl.inputs.out_localmax_txt_file = op.join(work_dir, 'lmax_zstat1_tal.txt')
    cl_res = cl.run()

    # Negative clusters
    cl2 = fsl.model.Cluster()
    cl2.inputs.threshold = cdt_z
    cl2.inputs.pthreshold = q
    cl2.inputs.in_file = op.join(work_dir, 'temp_zstat2.nii.gz')
    cl2.inputs.cope_file = op.join(work_dir, 'temp_copes2.nii.gz')
    cl2.inputs.use_mm = True
    cl2.inputs.find_min = False
    cl2.inputs.dlh = est_res.outputs.dlh
    cl2.inputs.volume = est_res.outputs.volume
    cl2.inputs.out_threshold_file = op.join(work_dir, 'thresh_zstat2.nii.gz')
    cl2.inputs.connectivity = 26
    cl2.inputs.out_localmax_txt_file = op.join(work_dir, 'lmax_zstat2_tal.txt')
    cl2_res = cl2.run()

    out_cope_img = nib.load(res.outputs.copes)
    out_t_img = nib.load(res.outputs.tstats)
    out_z_img = nib.load(res.outputs.zstats)
    out_cope_map = apply_mask(out_cope_img, mask)
    out_t_map = apply_mask(out_t_img, mask)
    out_z_map = apply_mask(out_z_img, mask)
    pos_z_map = apply_mask(nib.load(cl_res.outputs.threshold_file), mask)
    neg_z_map = apply_mask(nib.load(cl2_res.outputs.threshold_file), mask)
    thresh_z_map = pos_z_map - neg_z_map
    if matlab:
        thresh_z_map = pos_z_map

    print('Cleaning up...')
    rmtree(work_dir)
    rmtree(res.outputs.stats_dir)

    # Compile outputs
    out_p_map = stats.norm.sf(abs(out_z_map)) * 2
    log_p_map = -np.log10(out_p_map)
    result = MetaResult(mask=mask, cope=out_cope_map, z=out_z_map,
                        thresh_z=thresh_z_map, t=out_t_map, p=out_p_map,
                        log_p=log_p_map)
    return result
def second_level_wf(output_dir, bids_ref, name='wf_2nd_level'):
    workflow = pe.Workflow(name=name)

    inputnode = pe.Node(niu.IdentityInterface(
        fields=['group_mask', 'in_copes', 'in_varcopes']),
                        name='inputnode')

    outputnode = pe.Node(niu.IdentityInterface(fields=[
        'zstats_raw', 'zstats_fwe', 'zstats_clust', 'clust_index_file',
        'clust_localmax_txt_file'
    ]),
                         name='outputnode')

    # Configure FSL 2nd level analysis
    l2_model = pe.Node(fsl.L2Model(), name='l2_model')
    flameo_ols = pe.Node(fsl.FLAMEO(run_mode='ols'), name='flameo_ols')

    merge_copes = pe.Node(fsl.Merge(dimension='t'), name='merge_copes')
    merge_varcopes = pe.Node(fsl.Merge(dimension='t'), name='merge_varcopes')

    # Thresholding - FDR ################################################
    # Calculate pvalues with ztop
    fdr_ztop = pe.Node(fsl.ImageMaths(op_string='-ztop', suffix='_pval'),
                       name='fdr_ztop')
    # Find FDR threshold: fdr -i zstat1_pval -m <group_mask> -q 0.05
    # fdr_th = <write Nipype interface for fdr>
    # Apply threshold:
    # fslmaths zstat1_pval -mul -1 -add 1 -thr <fdr_th> -mas <group_mask> \
    #     zstat1_thresh_vox_fdr_pstat1

    # Thresholding - FWE ################################################
    # smoothest -r %s -d %i -m %s
    smoothness = pe.Node(fsl.SmoothEstimate(), name='smoothness')
    # ptoz 0.025 -g %f
    # p = 0.05 / 2 for 2-tailed test
    fwe_ptoz = pe.Node(PtoZ(pvalue=0.025), name='fwe_ptoz')
    # fslmaths %s -uthr %s -thr %s nonsignificant
    # fslmaths %s -sub nonsignificant zstat1_thresh
    fwe_nonsig0 = pe.Node(fsl.Threshold(direction='above'), name='fwe_nonsig0')
    fwe_nonsig1 = pe.Node(fsl.Threshold(direction='below'), name='fwe_nonsig1')
    fwe_thresh = pe.Node(fsl.BinaryMaths(operation='sub'), name='fwe_thresh')

    # Thresholding - Cluster ############################################
    # cluster -i %s -c %s -t 3.2 -p 0.025 -d %s --volume=%s  \
    #     --othresh=thresh_cluster_fwe_zstat1 --connectivity=26 --mm
    cluster_kwargs = {
        'connectivity': 26,
        'threshold': 3.2,
        'pthreshold': 0.025,
        'out_threshold_file': True,
        'out_index_file': True,
        'out_localmax_txt_file': True
    }
    cluster_pos = pe.Node(fsl.Cluster(**cluster_kwargs), name='cluster_pos')
    cluster_neg = pe.Node(fsl.Cluster(**cluster_kwargs), name='cluster_neg')
    zstat_inv = pe.Node(fsl.BinaryMaths(operation='mul', operand_value=-1),
                        name='zstat_inv')
    cluster_inv = pe.Node(fsl.BinaryMaths(operation='mul', operand_value=-1),
                          name='cluster_inv')
    cluster_all = pe.Node(fsl.BinaryMaths(operation='add'), name='cluster_all')

    ds_zraw = pe.Node(GroupDerivativesDataSink(base_directory=str(output_dir),
                                               keep_dtype=False,
                                               suffix='zstat',
                                               sub='all'),
                      name='ds_zraw',
                      run_without_submitting=True)
    ds_zraw.inputs.source_file = bids_ref

    ds_zfwe = pe.Node(GroupDerivativesDataSink(base_directory=str(output_dir),
                                               keep_dtype=False,
                                               suffix='zstat',
                                               desc='fwe',
                                               sub='all'),
                      name='ds_zfwe',
                      run_without_submitting=True)
    ds_zfwe.inputs.source_file = bids_ref

    ds_zclust = pe.Node(GroupDerivativesDataSink(
        base_directory=str(output_dir),
        keep_dtype=False,
        suffix='zstat',
        desc='clust',
        sub='all'),
                        name='ds_zclust',
                        run_without_submitting=True)
    ds_zclust.inputs.source_file = bids_ref

    ds_clustidx_pos = pe.Node(GroupDerivativesDataSink(
        base_directory=str(output_dir),
        keep_dtype=False,
        suffix='pclusterindex',
        sub='all'),
                              name='ds_clustidx_pos',
                              run_without_submitting=True)
    ds_clustidx_pos.inputs.source_file = bids_ref

    ds_clustlmax_pos = pe.Node(GroupDerivativesDataSink(
        base_directory=str(output_dir),
        keep_dtype=False,
        suffix='plocalmax',
        desc='intask',
        sub='all'),
                               name='ds_clustlmax_pos',
                               run_without_submitting=True)
    ds_clustlmax_pos.inputs.source_file = bids_ref

    ds_clustidx_neg = pe.Node(GroupDerivativesDataSink(
        base_directory=str(output_dir),
        keep_dtype=False,
        suffix='nclusterindex',
        sub='all'),
                              name='ds_clustidx_neg',
                              run_without_submitting=True)
    ds_clustidx_neg.inputs.source_file = bids_ref

    ds_clustlmax_neg = pe.Node(GroupDerivativesDataSink(
        base_directory=str(output_dir),
        keep_dtype=False,
        suffix='nlocalmax',
        desc='intask',
        sub='all'),
                               name='ds_clustlmax_neg',
                               run_without_submitting=True)
    ds_clustlmax_neg.inputs.source_file = bids_ref

    workflow.connect([
        (inputnode, l2_model, [(('in_copes', _len), 'num_copes')]),
        (inputnode, flameo_ols, [('group_mask', 'mask_file')]),
        (inputnode, smoothness, [('group_mask', 'mask_file'),
                                 (('in_copes', _dof), 'dof')]),
        (inputnode, merge_copes, [('in_copes', 'in_files')]),
        (inputnode, merge_varcopes, [('in_varcopes', 'in_files')]),
        (l2_model, flameo_ols, [('design_mat', 'design_file'),
                                ('design_con', 't_con_file'),
                                ('design_grp', 'cov_split_file')]),
        (merge_copes, flameo_ols, [('merged_file', 'cope_file')]),
        (merge_varcopes, flameo_ols, [('merged_file', 'var_cope_file')]),
        (flameo_ols, smoothness, [('res4d', 'residual_fit_file')]),
        (flameo_ols, fwe_nonsig0, [('zstats', 'in_file')]),
        (fwe_nonsig0, fwe_nonsig1, [('out_file', 'in_file')]),
        (smoothness, fwe_ptoz, [('resels', 'resels')]),
        (fwe_ptoz, fwe_nonsig0, [('zstat', 'thresh')]),
        (fwe_ptoz, fwe_nonsig1, [(('zstat', _neg), 'thresh')]),
        (flameo_ols, fwe_thresh, [('zstats', 'in_file')]),
        (fwe_nonsig1, fwe_thresh, [('out_file', 'operand_file')]),
        (flameo_ols, cluster_pos, [('zstats', 'in_file')]),
        (merge_copes, cluster_pos, [('merged_file', 'cope_file')]),
        (smoothness, cluster_pos, [('volume', 'volume'), ('dlh', 'dlh')]),
        (flameo_ols, zstat_inv, [('zstats', 'in_file')]),
        (zstat_inv, cluster_neg, [('out_file', 'in_file')]),
        (cluster_neg, cluster_inv, [('threshold_file', 'in_file')]),
        (merge_copes, cluster_neg, [('merged_file', 'cope_file')]),
        (smoothness, cluster_neg, [('volume', 'volume'), ('dlh', 'dlh')]),
        (cluster_pos, cluster_all, [('threshold_file', 'in_file')]),
        (cluster_inv, cluster_all, [('out_file', 'operand_file')]),
        (flameo_ols, ds_zraw, [('zstats', 'in_file')]),
        (fwe_thresh, ds_zfwe, [('out_file', 'in_file')]),
        (cluster_all, ds_zclust, [('out_file', 'in_file')]),
        (cluster_pos, ds_clustidx_pos, [('index_file', 'in_file')]),
        (cluster_pos, ds_clustlmax_pos, [('localmax_txt_file', 'in_file')]),
        (cluster_neg, ds_clustidx_neg, [('index_file', 'in_file')]),
        (cluster_neg, ds_clustlmax_neg, [('localmax_txt_file', 'in_file')]),
    ])
    return workflow
示例#16
0
def l2_common_effect(
    l1_dir,
    groupby="session",
    keep_work=False,
    l2_dir="",
    loud=False,
    tr=1,
    nprocs=6,
    workflow_name="generic",
    mask="~/ni_data/templates/ds_QBI_chr_bin.nii.gz",
    subjects=[],
    sessions=[],
    tasks=[],
    exclude={},
    include={},
):

    l1_dir = path.expanduser(l1_dir)
    if not l2_dir:
        l2_dir = path.abspath(path.join(l1_dir, "..", "..", "l2"))

    mask = path.abspath(path.expanduser(mask))

    datafind = nio.DataFinder()
    datafind.inputs.root_paths = l1_dir
    datafind.inputs.match_regex = '.+/sub-(?P<sub>[a-zA-Z0-9]+)/ses-(?P<ses>[a-zA-Z0-9]+)/.*?_acq-(?P<acq>[a-zA-Z0-9]+)_task-(?P<task>[a-zA-Z0-9]+)_(?P<mod>[a-zA-Z0-9]+)_(?P<stat>[a-zA-Z0-9]+)\.(?:tsv|nii|nii\.gz)'
    datafind_res = datafind.run()
    data_selection = zip(*[
        datafind_res.outputs.sub, datafind_res.outputs.ses, datafind_res.
        outputs.acq, datafind_res.outputs.task, datafind_res.outputs.mod,
        datafind_res.outputs.stat, datafind_res.outputs.out_paths
    ])
    data_selection = [list(i) for i in data_selection]
    data_selection = pd.DataFrame(data_selection,
                                  columns=('subject', 'session', 'acquisition',
                                           'task', 'modality', 'statistic',
                                           'path'))
    data_selection = data_selection.sort_values(['session', 'subject'],
                                                ascending=[1, 1])
    if exclude:
        for key in exclude:
            data_selection = data_selection[~data_selection[key].
                                            isin(exclude[key])]
    if include:
        for key in include:
            data_selection = data_selection[data_selection[key].isin(
                include[key])]

    copemerge = pe.Node(interface=fsl.Merge(dimension='t'), name="copemerge")
    varcopemerge = pe.Node(interface=fsl.Merge(dimension='t'),
                           name="varcopemerge")

    level2model = pe.Node(interface=fsl.L2Model(), name='level2model')

    flameo = pe.Node(interface=fsl.FLAMEO(), name="flameo")
    flameo.inputs.mask_file = mask
    flameo.inputs.run_mode = "ols"

    datasink = pe.Node(nio.DataSink(), name='datasink')
    datasink.inputs.base_directory = path.join(l2_dir, workflow_name)
    datasink.inputs.substitutions = [
        ('_iterable_', ''),
    ]

    if groupby == "subject":
        subjects = data_selection[['subject'
                                   ]].drop_duplicates().values.tolist()

        infosource = pe.Node(
            interface=util.IdentityInterface(fields=['iterable']),
            name="infosource")
        infosource.iterables = [('iterable', subjects)]
        datasource = pe.Node(interface=nio.DataGrabber(
            infields=[
                "group",
            ], outfields=["copes", "varcbs"]),
                             name="datasource")
        datasource.inputs.template_args = dict(copes=[['group', 'group']],
                                               varcbs=[['group', 'group']])
        datasource.inputs.field_template = dict(
            copes="sub-%s/ses-*/sub-%s_ses-*_task-*_cope.nii.gz",
            varcbs="sub-%s/ses-*/sub-%s_ses-*_task-*_varcb.nii.gz",
        )
        workflow_connections = [
            (infosource, datasource, [('iterable', 'group')]),
            (infosource, copemerge, [(('iterable', add_suffix, "_cope.nii.gz"),
                                      'merged_file')]),
            (infosource, varcopemerge, [(('iterable', add_suffix,
                                          "_varcb.nii.gz"), 'merged_file')]),
        ]
    elif groupby == "subject_task":
        #does not currently work, due to missing iterator combinations (same issue as preprocessing)
        merge = pe.Node(interface=util.Merge(2), name="merge")
        infosource = pe.Node(
            interface=util.IdentityInterface(fields=['subject', 'task']),
            name="infosource")
        infosource.iterables = [('subject', subjects), ('task', tasks)]
        datasource = pe.Node(interface=nio.DataGrabber(
            infields=[
                "subject",
                "task",
            ], outfields=["copes", "varcbs"]),
                             name="datasource")
        datasource.inputs.template_args = dict(copes=[[
            "subject",
            "subject",
            "task",
        ]],
                                               varcbs=[[
                                                   "subject",
                                                   "subject",
                                                   "task",
                                               ]])
        datasource.inputs.field_template = dict(
            copes="sub-%s/ses-*/sub-%s_ses-*_task-%s_cope.nii.gz",
            varcbs="sub-%s/ses-*/sub-%s_ses-*_task-%s_varcb.nii.gz",
        )
        workflow_connections = [
            (infosource, datasource, [('subject', 'subject'),
                                      ('task', 'task')]),
            (infosource, merge, [('subject', 'in1'), ('task', 'in2')]),
            (merge, copemerge, [(('out', add_suffix, "_cope.nii.gz"),
                                 'merged_file')]),
            (merge, varcopemerge, [(('out', add_suffix, "_varcb.nii.gz"),
                                    'merged_file')]),
        ]
    elif groupby == "session":
        sessions = data_selection[['session']].drop_duplicates()
        sessions = sessions.T.to_dict().values()

        infosource = pe.Node(
            interface=util.IdentityInterface(fields=['iterable']),
            name="infosource")
        infosource.iterables = [('iterable', sessions)]

        copes = pe.Node(
            name='copes',
            interface=util.Function(
                function=select_from_datafind_df,
                input_names=inspect.getargspec(select_from_datafind_df)[0],
                output_names=['selection']))
        copes.inputs.bids_dictionary_override = {'statistic': 'cope'}
        copes.inputs.df = data_selection
        copes.inputs.list_output = True

        varcopes = pe.Node(
            name='varcopes',
            interface=util.Function(
                function=select_from_datafind_df,
                input_names=inspect.getargspec(select_from_datafind_df)[0],
                output_names=['selection']))
        varcopes.inputs.bids_dictionary_override = {'statistic': 'varcb'}
        varcopes.inputs.df = data_selection
        varcopes.inputs.list_output = True

        workflow_connections = [
            (infosource, copemerge, [(('iterable', dict_and_suffix, "session",
                                       "_cope.nii.gz"), 'merged_file')]),
            (infosource, varcopemerge, [(('iterable', dict_and_suffix,
                                          "session", "_varcb.nii.gz"),
                                         'merged_file')]),
        ]
    elif groupby == "task":
        infosource = pe.Node(
            interface=util.IdentityInterface(fields=['iterable']),
            name="infosource")
        infosource.iterables = [('iterable', tasks)]
        datasource = pe.Node(interface=nio.DataGrabber(
            infields=[
                "group",
            ], outfields=["copes", "varcbs"]),
                             name="datasource")
        datasource.inputs.template_args = dict(copes=[['group']],
                                               varcbs=[['group']])
        datasource.inputs.field_template = dict(
            copes="sub-*/ses-*/sub-*_ses-*_task-%s_cope.nii.gz ",
            varcbs="sub-*/ses-*/sub-*_ses-*_task-%s_varcb.nii.gz ",
        )
        workflow_connections = [
            (infosource, datasource, [('iterable', 'group')]),
            (infosource, copemerge, [(('iterable', add_suffix, "_cope.nii.gz"),
                                      'merged_file')]),
            (infosource, varcopemerge, [(('iterable', add_suffix,
                                          "_varcb.nii.gz"), 'merged_file')]),
        ]

    workflow_connections.extend([
        (copes, copemerge, [('selection', 'in_files')]),
        (varcopes, varcopemerge, [('selection', 'in_files')]),
        (varcopes, level2model, [(('selection', mylen), 'num_copes')]),
        (copemerge, flameo, [('merged_file', 'cope_file')]),
        (varcopemerge, flameo, [('merged_file', 'var_cope_file')]),
        (level2model, flameo, [('design_mat', 'design_file')]),
        (level2model, flameo, [('design_grp', 'cov_split_file')]),
        (level2model, flameo, [('design_con', 't_con_file')]),
        (flameo, datasink, [('copes', '@copes')]),
        (flameo, datasink, [('fstats', '@fstats')]),
        (flameo, datasink, [('tstats', '@tstats')]),
        (flameo, datasink, [('zstats', '@zstats')]),
    ])

    workdir_name = workflow_name + "_work"
    workflow = pe.Workflow(name=workdir_name)
    workflow.connect(workflow_connections)
    workflow.config = {
        "execution": {
            "crashdump_dir": path.join(l2_dir, "crashdump")
        }
    }
    workflow.base_dir = l2_dir
    workflow.write_graph(dotfilename=path.join(workflow.base_dir, workdir_name,
                                               "graph.dot"),
                         graph2use="hierarchical",
                         format="png")

    if not loud:
        try:
            workflow.run(plugin="MultiProc", plugin_args={'n_procs': nprocs})
        except RuntimeError:
            print(
                "WARNING: Some expected tasks have not been found (or another RuntimeError has occured)."
            )
        for f in listdir(getcwd()):
            if re.search("crash.*?-varcopemerge|-copemerge.*", f):
                remove(path.join(getcwd(), f))
    else:
        workflow.run(plugin="MultiProc", plugin_args={'n_procs': nprocs})

    if not keep_work:
        shutil.rmtree(path.join(l2_dir, workdir_name))
示例#17
0
def init_higherlevel_wf(run_mode="flame1", name="higherlevel",
                        subjects=None, covariates=None,
                        subject_groups=None, group_contrasts=None,
                        outname=None, workdir=None, task=None):
    """

    :param run_mode: mode argument passed to FSL FLAMEO (Default value = "flame1")
    :param name: workflow name (Default value = "higherlevel")
    :param subjects: list of subject names (Default value = None)
    :param covariates: two-level dictionary of covariates by name and subject (Default value = None)
    :param subject_groups: dictionary of subjects by group (Default value = None)
    :param group_contrasts: two-level dictionary of contrasts by contrast name and values by group (Default = None)
    :param outname: names of inputs for higherlevel workflow, names of outputs from firstlevel workflow

    """
    workflow = pe.Workflow(name=name)

    inputnode = pe.Node(
        interface=niu.IdentityInterface(
            fields=["imgs", "varcopes", "dof_files", "mask_files"]
        ),
        name="inputnode"
    )

    outputnode = pe.Node(
        interface=niu.IdentityInterface(
            fields=["imgs", "varcopes", "zstats", "dof_files", "mask_file"]
        ),
        name="outputnode"
    )

    # merge all input nii image files to one big nii file
    maskmerge = pe.Node(
        interface=fsl.Merge(dimension="t"),
        name="maskmerge"
    )
    # calculate the intersection of all masks
    maskagg = pe.Node(
        interface=fsl.ImageMaths(
            op_string="-Tmin -thr 1 -bin"
        ),
        name="maskagg"
    )

    # merge all input nii image files to one big nii file
    imgmerge = pe.Node(
        interface=fsl.Merge(dimension="t"),
        name="imgmerge"
    )

    # we get a text dof_file, but need to transform it to an nii image
    gendofimage = pe.MapNode(
        interface=fsl.ImageMaths(),
        iterfield=["in_file", "op_string"],
        name="gendofimage"
    )

    # merge all input nii image files to one big nii file
    varcopemerge = pe.Node(
        interface=fsl.Merge(dimension="t"),
        name="varcopemerge"
    )

    # merge all generated nii image files to one big nii file
    dofmerge = pe.Node(
        interface=fsl.Merge(dimension="t"),
        name="dofmerge"
    )

    # specify statistical analysis

    # Read qcresults.json and exclude bad subjects from statistics
    excluded_overview = get_qualitycheck_exclude(workdir)
    excluded_subjects = []
    if excluded_overview:
        df_exclude = pd.DataFrame(excluded_overview).transpose()
        excluded_subjects = df_exclude.loc[df_exclude[task] == True].index
        trimmed_subjects = list(subjects)
        for excluded_subject in excluded_subjects:
            trimmed_subjects.remove(excluded_subject)

        # save json file in workdir with list for included subjects if subjects were excluded due to qualitycheck
        # use of sets here for easy substraction of subjects
        included_subjects = list(set(subjects) - set(excluded_subjects))
        df_included_subjects = pd.DataFrame(included_subjects, columns=['Subjects'])
        df_included_subjects = df_included_subjects.sort_values(by=['Subjects'])  # sort by name
        df_included_subjects = df_included_subjects.reset_index(drop=True)  # reindex for ascending numbers
        json_path = workdir + '/included_subjects.json'
        df_included_subjects.to_json(json_path)
        with open(json_path, 'w') as json_file:
            # json is loaded from pandas to json and then dumped to get indent in file
            json.dump(json.loads(df_included_subjects.to_json()), json_file, indent=4)
    else:
        trimmed_subjects = subjects # in case there are no excluded subjects

    # option 1: one-sample t-test
    contrasts = [["mean", "T", ["intercept"], [1]]]
    level2model = pe.Node(
        interface=fsl.MultipleRegressDesign(
            regressors={"intercept": [1.0 for s in trimmed_subjects]},
            contrasts=contrasts
        ),
        name="l2model"
    )

    if covariates is not None:

        # Transform covariates dict to pandas dataframe
        df_covariates = pd.DataFrame(covariates)
        if list(excluded_subjects):
            # Read qcresults.json and exclude bad subjects from covariates and subject_groups
            df_covariates = df_covariates.drop(excluded_subjects)

            for excluded_subject in excluded_subjects:
                subject_groups.pop(excluded_subject, None)

        for covariate in df_covariates:
            # Demean covariates for flameo
            df_covariates[covariate] = df_covariates[covariate] - df_covariates[covariate].mean()
        # transform reduced covariates back to dict for later purposes
        covariates = df_covariates.to_dict()

        # add SubjectGroups and ID to header
        df_subject_group = pd.DataFrame.from_dict(subject_groups, orient='index', columns=['SubjectGroup'])
        df_covariates = pd.concat([df_subject_group, df_covariates], axis=1, sort=True)
        df_covariates = df_covariates.reset_index()  # add id column
        df_covariates = df_covariates.rename(columns={'index': 'Subject_ID'})  # rename subject column

        # save demeaned covariates to csv
        df_covariates.to_csv(workdir + '/demeaned_covariates.csv')

        # transform to dictionary of lists
        regressors = {k: [float(v[s]) for s in trimmed_subjects] for k, v in covariates.items()}
        if (subject_groups is None) or (bool(subject_groups) is False):
            # one-sample t-test with covariates
            regressors["intercept"] = [1.0 for s in trimmed_subjects]
            level2model = pe.Node(
                interface=fsl.MultipleRegressDesign(
                    regressors=regressors,
                    contrasts=contrasts
                ),
                name="l2model"
            )
        else:
            # two-sample t-tests with covariates

            # dummy coding of variables: group names --> numbers in the matrix
            # see fsl feat documentation
            # https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FEAT/UserGuide#Tripled_Two-Group_Difference_.28.22Tripled.22_T-Test.29
            dummies = pd.Series(subject_groups).str.get_dummies().to_dict()
            # transform to dictionary of lists
            dummies = {k: [float(v[s]) for s in trimmed_subjects] for k, v in dummies.items()}
            regressors.update(dummies)

            # transform to dictionary of lists
            contrasts = [[k, "T"] + list(map(list, zip(*v.items()))) for k, v in group_contrasts.items()]

            level2model = pe.Node(
                interface=fsl.MultipleRegressDesign(
                    regressors=regressors,
                    contrasts=contrasts
                ),
                name="l2model"
            )

    contrast_names = [c[0] for c in contrasts]

    # actuallt run FSL FLAME

    if outname not in ["reho", "alff"]:
        flameo = pe.MapNode(
            interface=fsl.FLAMEO(
                run_mode=run_mode
            ),
            name="flameo",
            iterfield=["cope_file", "var_cope_file"]
        )
    else:
        flameo = pe.MapNode(
            interface=fsl.FLAMEO(
                run_mode=run_mode
            ),
            name="flameo",
            iterfield=["cope_file"]
        )

    workflow.connect([
        (inputnode, imgmerge, [
            ("imgs", "in_files")
        ]),

        (inputnode, maskmerge, [
            ("mask_files", "in_files")
        ]),
        (maskmerge, maskagg, [
            ("merged_file", "in_file")
        ]),
    ])
    if outname not in ["reho", "alff"]:
        workflow.connect([
            (inputnode, gendofimage, [
                ("imgs", "in_file"),
                (("dof_files", gen_merge_op_str), "op_string")
            ]),

            (inputnode, varcopemerge, [
                ("varcopes", "in_files")
            ]),

            (gendofimage, dofmerge, [
                ("out_file", "in_files")
            ])])

    workflow.connect([
        (imgmerge, flameo, [
            ("merged_file", "cope_file")
        ])])

    if outname not in ["reho", "alff"]:
        workflow.connect([
            (varcopemerge, flameo, [
                ("merged_file", "var_cope_file")
            ]),
            (dofmerge, flameo, [
                ("merged_file", "dof_var_cope_file")
            ])])

    workflow.connect(([
        (level2model, flameo, [
            ("design_mat", "design_file"),
            ("design_con", "t_con_file"),
            ("design_grp", "cov_split_file")
        ]),

        (flameo, outputnode, [
            (("copes", flatten), "imgs"),
            (("var_copes", flatten), "varcopes"),
            (("zstats", flatten), "zstats"),
            (("tdof", flatten), "dof_files")
        ]),
        (maskagg, flameo, [
            ("out_file", "mask_file")
        ]),
        (maskagg, outputnode, [
            ("out_file", "mask_file")
        ]),
    ]))

    return workflow, contrast_names
示例#18
0
def l2_anova(
    l1_dir,
    keep_work=False,
    l2_dir="",
    loud=False,
    tr=1,
    nprocs=6,
    workflow_name="generic",
    mask="/usr/share/mouse-brain-atlases/dsurqec_200micron_mask.nii",
    exclude={},
    include={},
    match_regex='.+/sub-(?P<sub>[a-zA-Z0-9]+)/ses-(?P<ses>[a-zA-Z0-9]+)/.*?_acq-(?P<acq>[a-zA-Z0-9]+)_task-(?P<task>[a-zA-Z0-9]+)_(?P<mod>[a-zA-Z0-9]+)_(?P<stat>(cope|varcb)+)\.(?:nii|nii\.gz)'
):

    l1_dir = path.expanduser(l1_dir)
    if not l2_dir:
        l2_dir = path.abspath(path.join(l1_dir, "..", "..", "l2"))

    mask = path.abspath(path.expanduser(mask))

    datafind = nio.DataFinder()
    datafind.inputs.root_paths = l1_dir
    datafind.inputs.match_regex = match_regex
    datafind_res = datafind.run()

    data_selection = zip(*[
        datafind_res.outputs.sub, datafind_res.outputs.ses, datafind_res.
        outputs.acq, datafind_res.outputs.task, datafind_res.outputs.mod,
        datafind_res.outputs.stat, datafind_res.outputs.out_paths
    ])
    data_selection = [list(i) for i in data_selection]
    data_selection = pd.DataFrame(data_selection,
                                  columns=('subject', 'session', 'acquisition',
                                           'task', 'modality', 'statistic',
                                           'path'))

    data_selection = data_selection.sort_values(['session', 'subject'],
                                                ascending=[1, 1])
    if exclude:
        for key in exclude:
            data_selection = data_selection[~data_selection[key].
                                            isin(exclude[key])]
    if include:
        for key in include:
            data_selection = data_selection[data_selection[key].isin(
                include[key])]

    copes = data_selection[data_selection['statistic'] ==
                           'cope']['path'].tolist()
    varcopes = data_selection[data_selection['statistic'] ==
                              'varcb']['path'].tolist()

    copemerge = pe.Node(interface=fsl.Merge(dimension='t'), name="copemerge")
    copemerge.inputs.in_files = copes
    copemerge.inputs.merged_file = 'copes.nii.gz'

    varcopemerge = pe.Node(interface=fsl.Merge(dimension='t'),
                           name="varcopemerge")
    varcopemerge.inputs.in_files = varcopes
    varcopemerge.inputs.merged_file = 'varcopes.nii.gz'

    copeonly = data_selection[data_selection['statistic'] == 'cope']
    regressors = {}
    for sub in copeonly['subject'].unique():
        #print(sub)
        regressor = [copeonly['subject'] == sub][0]
        regressor = [int(i) for i in regressor]
        key = "sub-" + str(sub)
        regressors[key] = regressor
    reference = str(copeonly['session'].unique()[0])
    for ses in copeonly['session'].unique()[1:]:
        #print(ses)
        regressor = [copeonly['session'] == ses][0]
        regressor = [int(i) for i in regressor]
        key = "ses-(" + str(ses) + '-' + reference + ')'
        regressors[key] = regressor

    sessions = [[i, 'T', [i], [1]] for i in regressors.keys() if "ses-" in i]
    contrasts = deepcopy(sessions)
    contrasts.append(['anova', 'F', sessions])

    level2model = pe.Node(interface=fsl.MultipleRegressDesign(),
                          name='level2model')
    level2model.inputs.regressors = regressors
    level2model.inputs.contrasts = contrasts
    #print(regressors)
    #print(contrasts)
    #return

    flameo = pe.Node(interface=fsl.FLAMEO(), name="flameo")
    flameo.inputs.mask_file = mask
    # Using 'fe' instead of 'ols' is recommended (https://dpaniukov.github.io/2016/07/14/three-level-analysis-with-fsl-and-ants-2.html)
    # This has also been tested in SAMRI and shown to give better estimates.
    flameo.inputs.run_mode = "flame12"

    substitutions = []
    t_counter = 1
    f_counter = 1
    for contrast in contrasts:
        if contrast[1] == 'T':
            for i in ['cope', 'tstat', 'zstat']:
                substitutions.append(
                    (i + str(t_counter), contrast[0] + "_" + i))
            t_counter += 1
        if contrast[1] == 'F':
            for i in ['zfstat', 'fstat']:
                substitutions.append(
                    (i + str(f_counter), contrast[0] + "_" + i))
            f_counter += 1

    datasink = pe.Node(nio.DataSink(), name='datasink')
    datasink.inputs.base_directory = path.join(l2_dir, workflow_name)
    datasink.inputs.substitutions = substitutions

    workflow_connections = [
        (copemerge, flameo, [('merged_file', 'cope_file')]),
        (varcopemerge, flameo, [('merged_file', 'var_cope_file')]),
        (level2model, flameo, [('design_mat', 'design_file')]),
        (level2model, flameo, [('design_grp', 'cov_split_file')]),
        (level2model, flameo, [('design_fts', 'f_con_file')]),
        (level2model, flameo, [('design_con', 't_con_file')]),
        (flameo, datasink, [('copes', '@copes')]),
        (flameo, datasink, [('tstats', '@tstats')]),
        (flameo, datasink, [('zstats', '@zstats')]),
        (flameo, datasink, [('fstats', '@fstats')]),
        (flameo, datasink, [('zfstats', '@zfstats')]),
    ]

    workdir_name = workflow_name + "_work"
    workflow = pe.Workflow(name=workdir_name)
    workflow.connect(workflow_connections)
    workflow.config = {
        "execution": {
            "crashdump_dir": path.join(l2_dir, "crashdump")
        }
    }
    workflow.base_dir = l2_dir
    workflow.write_graph(dotfilename=path.join(workflow.base_dir, workdir_name,
                                               "graph.dot"),
                         graph2use="hierarchical",
                         format="png")

    if not loud:
        try:
            workflow.run(plugin="MultiProc", plugin_args={'n_procs': nprocs})
        except RuntimeError:
            print(
                "WARNING: Some expected tasks have not been found (or another RuntimeError has occured)."
            )
        for f in listdir(getcwd()):
            if re.search("crash.*?-varcopemerge|-copemerge.*", f):
                remove(path.join(getcwd(), f))
    else:
        workflow.run(plugin="MultiProc", plugin_args={'n_procs': nprocs})

    if not keep_work:
        shutil.rmtree(path.join(l2_dir, workdir_name))
示例#19
0
def create_volume_mixedfx_workflow(name="volume_group",
                                   subject_list=None,
                                   regressors=None,
                                   contrasts=None,
                                   exp_info=None):

    # Handle default arguments
    if subject_list is None:
        subject_list = []
    if regressors is None:
        regressors = dict(group_mean=[])
    if contrasts is None:
        contrasts = [["group_mean", "T", ["group_mean"], [1]]]
    if exp_info is None:
        exp_info = lyman.default_experiment_parameters()

    # Define workflow inputs
    inputnode = Node(
        IdentityInterface(["l1_contrast", "copes", "varcopes", "dofs"]),
        "inputnode")

    # Merge the fixed effect summary images into one 4D image
    merge = Node(MergeAcrossSubjects(regressors=regressors), "merge")

    # Make a simple design
    design = Node(fsl.MultipleRegressDesign(contrasts=contrasts), "design")

    # Fit the mixed effects model
    flameo = Node(fsl.FLAMEO(run_mode=exp_info["flame_mode"]), "flameo")

    # Estimate the smoothness of the data
    smoothest = Node(fsl.SmoothEstimate(), "smoothest")

    # Correct for multiple comparisons
    cluster = Node(
        fsl.Cluster(threshold=exp_info["cluster_zthresh"],
                    pthreshold=exp_info["grf_pthresh"],
                    out_threshold_file=True,
                    out_index_file=True,
                    out_localmax_txt_file=True,
                    peak_distance=exp_info["peak_distance"],
                    use_mm=True), "cluster")

    # Project the mask and thresholded zstat onto the surface
    surfproj = create_surface_projection_workflow(exp_info=exp_info)

    # Segment the z stat image with a watershed algorithm
    watershed = Node(Watershed(), "watershed")

    # Make static report images in the volume
    report = Node(MFXReport(), "report")
    report.inputs.subjects = subject_list

    # Save the experiment info
    saveparams = Node(SaveParameters(exp_info=exp_info), "saveparams")

    # Define the workflow outputs
    outputnode = Node(
        IdentityInterface([
            "copes", "varcopes", "mask_file", "flameo_stats", "thresh_zstat",
            "surf_zstat", "surf_mask", "cluster_image", "seg_file",
            "peak_file", "lut_file", "report", "json_file"
        ]), "outputnode")

    # Define and connect up the workflow
    group = Workflow(name)
    group.connect([
        (inputnode, merge, [("copes", "cope_files"),
                            ("varcopes", "varcope_files"),
                            ("dofs", "dof_files")]),
        (inputnode, saveparams, [("copes", "in_file")]),
        (merge, flameo, [("cope_file", "cope_file"),
                         ("varcope_file", "var_cope_file"),
                         ("dof_file", "dof_var_cope_file"),
                         ("mask_file", "mask_file")]),
        (merge, design, [("regressors", "regressors")]),
        (design, flameo, [("design_con", "t_con_file"),
                          ("design_grp", "cov_split_file"),
                          ("design_mat", "design_file")]),
        (flameo, smoothest, [("zstats", "zstat_file")]),
        (merge, smoothest, [("mask_file", "mask_file")]),
        (smoothest, cluster, [("dlh", "dlh"), ("volume", "volume")]),
        (flameo, cluster, [("zstats", "in_file")]),
        (cluster, watershed, [("threshold_file", "zstat_file"),
                              ("localmax_txt_file", "localmax_file")]),
        (merge, report, [("mask_file", "mask_file"),
                         ("cope_file", "cope_file")]),
        (flameo, report, [("zstats", "zstat_file")]),
        (cluster, report, [("threshold_file", "zstat_thresh_file"),
                           ("localmax_txt_file", "localmax_file")]),
        (watershed, report, [("seg_file", "seg_file")]),
        (merge, surfproj, [("mask_file", "inputs.mask_file")]),
        (cluster, surfproj, [("threshold_file", "inputs.zstat_file")]),
        (merge, outputnode, [("cope_file", "copes"),
                             ("varcope_file", "varcopes"),
                             ("mask_file", "mask_file")]),
        (flameo, outputnode, [("stats_dir", "flameo_stats")]),
        (cluster, outputnode, [("threshold_file", "thresh_zstat"),
                               ("index_file", "cluster_image")]),
        (watershed, outputnode, [("seg_file", "seg_file"),
                                 ("peak_file", "peak_file"),
                                 ("lut_file", "lut_file")]),
        (surfproj, outputnode, [("outputs.surf_zstat", "surf_zstat"),
                                ("outputs.surf_mask", "surf_mask")]),
        (report, outputnode, [("out_files", "report")]),
        (saveparams, outputnode, [("json_file", "json_file")]),
    ])

    return group, inputnode, outputnode
示例#20
0
def second_level_wf(name):
    """second level analysis"""
    workflow = pe.Workflow(name=name)

    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'copes', 'varcopes', 'group_mask', 'design_mat', 'design_con',
        'design_grp'
    ]),
                        name='inputnode')

    outputnode = pe.Node(niu.IdentityInterface(
        fields=['zstat', 'tstat', 'pstat', 'fwe_thres', 'fdr_thres']),
                         name='outputnode')

    copemerge = pe.Node(fsl.Merge(dimension='t'), name='copemerge', mem_gb=40)
    varcopemerge = pe.Node(fsl.Merge(dimension='t'),
                           name='varcopemerge',
                           mem_gb=40)
    flameo = pe.Node(fsl.FLAMEO(run_mode='ols'), name='flameo')
    ztopval = pe.Node(fsl.ImageMaths(op_string='-ztop', suffix='_pval'),
                      name='ztop')

    # FDR
    fdr = pe.Node(FDR(), name='calc_fdr')
    fdr_apply = pe.Node(fsl.ImageMaths(suffix='_thresh_vox_fdr_pstat1'),
                        name='fdr_apply')

    # FWE
    def _reselcount(voxels, resels):
        return float(voxels / resels)

    smoothness = pe.Node(fsl.SmoothEstimate(), name='smoothness')
    rescount = pe.Node(niu.Function(function=_reselcount), name='reselcount')
    ptoz = pe.Node(PtoZ(), name='ptoz')
    fwethres = pe.Node(fsl.Threshold(), name='fwethres')

    # Cluster
    cluster = pe.Node(fsl.Cluster(threshold=3.2,
                                  pthreshold=0.05,
                                  connectivity=26,
                                  use_mm=True),
                      name='cluster')

    def _len(inlist):
        return len(inlist)

    def _lastidx(inlist):
        return len(inlist) - 1

    def _first(inlist):
        if isinstance(inlist, (list, tuple)):
            return inlist[0]
        return inlist

    def _fdr_thres_operator(fdr_th):
        return '-mul -1 -add 1 -thr %f' % (1 - fdr_th)

    # create workflow
    workflow.connect([
        (inputnode, flameo, [('design_mat', 'design_file'),
                             ('design_con', 't_con_file'),
                             ('design_grp', 'cov_split_file')]),
        (inputnode, copemerge, [('copes', 'in_files')]),
        (inputnode, varcopemerge, [('varcopes', 'in_files')]),
        (inputnode, flameo, [('group_mask', 'mask_file')]),
        (copemerge, flameo, [('merged_file', 'cope_file')]),
        (varcopemerge, flameo, [('merged_file', 'var_cope_file')]),
        (flameo, ztopval, [(('zstats', _first), 'in_file')]),
        (ztopval, fdr, [('out_file', 'in_file')]),
        (inputnode, fdr, [('group_mask', 'in_mask')]),
        (inputnode, fdr_apply, [('group_mask', 'mask_file')]),
        (flameo, fdr_apply, [(('zstats', _first), 'in_file')]),
        (fdr, fdr_apply, [(('fdr_val', _fdr_thres_operator), 'op_string')]),
        (inputnode, smoothness, [('group_mask', 'mask_file')]),
        (flameo, smoothness, [(('res4d', _first), 'residual_fit_file')]),
        (inputnode, smoothness, [(('copes', _lastidx), 'dof')]),
        (smoothness, rescount, [('resels', 'resels'), ('volume', 'voxels')]),
        (rescount, ptoz, [('out', 'resels')]),
        (flameo, fwethres, [(('zstats', _first), 'in_file')]),
        (ptoz, fwethres, [('z_val', 'thresh')]),
        (flameo, cluster, [(('zstats', _first), 'in_file'),
                           (('copes', _first), 'cope_file')]),
        (smoothness, cluster, [('dlh', 'dlh'), ('volume', 'volume')]),
        (flameo, outputnode, [
            (('zstats', _first), 'zstat'),
            (('tstats', _first), 'tstat'),
        ]),
        (ztopval, outputnode, [('out_file', 'pstat')]),
        (fdr_apply, outputnode, [('out_file', 'fdr_thres')]),
        (fwethres, outputnode, [('out_file', 'fwe_thres')]),
    ])
    return workflow
示例#21
0
def modelfit_2ndlevel(
        wf_name='2nd_level_modelfit',
        method='flameo',
        standardize=True):  #TODO: standardization in sepatae workflow!

    #method is one of 'flameo' or 'palm' or 'randomise' or 'randomise_parallel'

    model = pe.Workflow(name=wf_name)
    """
        Set up a node to define all inputs required for the preprocessing workflow

    """

    inputnode = pe.Node(
        interface=util.IdentityInterface(
            fields=[
                'copes', 'varcopes', 'func2anat_mat', 'std_brain',
                'anat_to_std_warp', 'std_brain_mask', 'regressors',
                'contrasts', 'groups'
            ],  # TODO: groups!!
            mandatory_inputs=True),
        name='inputspec')
    """
        Set up a node to define outputs for the preprocessing workflow

    """

    outputnode = pe.Node(interface=util.IdentityInterface(
        fields=['zstats'], mandatory_inputs=True),
                         name='outputspec')

    ###################################################################################################
    # merge copes
    copemerge = pe.Node(interface=fsl.Merge(dimension='t'), name="copemerge")

    # standardize copes and varcopes
    if (standardize):

        applyWarpCope = pe.MapNode(
            interface=fsl.ApplyWarp(interp='sinc'),
            name="warp_cope",
            iterfield=['in_file', 'field_file', 'premat'])

        model.connect(inputnode, 'func2anat_mat', applyWarpCope, 'premat')
        model.connect(inputnode, 'copes', applyWarpCope, 'in_file')
        model.connect(inputnode, 'std_brain', applyWarpCope, 'ref_file')
        model.connect(inputnode, 'anat_to_std_warp', applyWarpCope,
                      'field_file')
        model.connect(applyWarpCope, 'out_file', copemerge, 'in_files')
    else:
        model.connect(inputnode, 'copes', copemerge, 'in_files')

    if (method == 'flameo'):  # same for varcopes if flameo

        varcopemerge = pe.Node(interface=fsl.Merge(dimension='t'),
                               name="varcopemerge")

        if (standardize):
            applyWarpVarcope = pe.MapNode(
                interface=fsl.ApplyWarp(interp='sinc'),
                name="warp_varcope",
                iterfield=['in_file', 'field_file', premat])

            model.connect(inputnode, 'func2anat_mat', applyWarpVarcope,
                          'premat')
            model.connect(inputnode, 'varcopes', applyWarpVarcope, 'in_file')
            model.connect(inputnode, 'std_brain', applyWarpVarcope, 'ref_file')
            model.connect(inputnode, 'anat_to_std_warp', applyWarpVarcope,
                          'field_file')

            model.connect(applyWarpVarcope, 'out_file', varcopemerge,
                          'in_files')
        else:
            model.connect(inputnode, 'varcopes', varcopemerge, 'in_files')

    #level2model = pe.Node(interface=fsl.L2Model(num_copes=35),
    #                     name='l2model')

    level2model = pe.Node(interface=fsl.MultipleRegressDesign(), name='design')

    model.connect(inputnode, 'regressors', level2model, 'regressors')
    model.connect(inputnode, 'contrasts', level2model, 'contrasts')
    model.connect(inputnode, 'groups', level2model, 'groups')

    if (method == 'flameo'):
        flameo = pe.Node(interface=fsl.FLAMEO(run_mode='fe'), name="flameo")

        model.connect([
            (inputnode, flameo, [('std_brain_mask', 'mask_file')]),
            (copemerge, flameo, [('merged_file', 'cope_file')]),
            (varcopemerge, flameo, [('merged_file', 'var_cope_file')]),
            (level2model, flameo, [('design_mat', 'design_file'),
                                   ('design_con', 't_con_file'),
                                   ('design_grp', 'cov_split_file')]),
            (flameo, outputnode, [('zstats', 'zstats')])
        ])
    elif (method == 'palm'):
        palm = pe.Node(util.Function(input_names=[
            'cope_file', 'design_file', 'contrast_file', 'group_file',
            'mask_file', 'cluster_threshold'
        ],
                                     output_names=['palm_outputs'],
                                     function=run_palm),
                       name='palm')

        model.connect([(inputnode, palm, [('std_brain_mask', 'mask_file')]),
                       (copemerge, palm, [('merged_file', 'cope_file')]),
                       (level2model, palm, [('design_mat', 'design_file'),
                                            ('design_con', 'contrast_file'),
                                            ('design_grp', 'group_file')]),
                       (palm, outputnode, [('palm_outputs', 'zstats')])])
        palm.inputs.cluster_threshold = 2.3  #TODO: make parametrizable
        palm.plugin_args = {
            'sbatch_args': '-p om_all_nodes -N1 -c2 --mem=10G',
            'overwrite': True
        }
    elif (method == 'randomise'):
        rand = pe.Node(util.Function(input_names=[
            'cope_file', 'design_file', 'contrast_file', 'group_file',
            'mask_file', 'cluster_threshold', 'n'
        ],
                                     output_names=['palm_outputs'],
                                     function=run_rand),
                       name='randomise')

        model.connect([(inputnode, rand, [('std_brain_mask', 'mask_file')]),
                       (copemerge, rand, [('merged_file', 'cope_file')]),
                       (level2model, rand, [('design_mat', 'design_file'),
                                            ('design_con', 'contrast_file'),
                                            ('design_grp', 'group_file')]),
                       (rand, outputnode, [('palm_outputs', 'zstats')])])
        rand.inputs.cluster_threshold = 2.3  #TODO: make parametrizable
        rand.inputs.n = 1000
        #rand.plugin_args = {'sbatch_args': '-p om_all_nodes -N1 -c2 --mem=10G', 'overwrite': True}
    elif (method == 'randomise_parallel'):
        rand = pe.Node(util.Function(input_names=[
            'cope_file', 'design_file', 'contrast_file', 'group_file',
            'mask_file', 'cluster_threshold', 'n'
        ],
                                     output_names=['palm_outputs'],
                                     function=run_rand_par),
                       name='randomise')

        model.connect([(inputnode, rand, [('std_brain_mask', 'mask_file')]),
                       (copemerge, rand, [('merged_file', 'cope_file')]),
                       (level2model, rand, [('design_mat', 'design_file'),
                                            ('design_con', 'contrast_file'),
                                            ('design_grp', 'group_file')]),
                       (rand, outputnode, [('palm_outputs', 'zstats')])])
        rand.inputs.cluster_threshold = 2.3  # TODO: make parametrizable
        rand.inputs.n = 1000
        #rand.plugin_args = {'sbatch_args': '-p om_all_nodes -N1 -c2 --mem=10G', 'overwrite': True}

    else:
        print('Error: No such 2nd-level statistical model method: ' + method)

    return model
NodeHash_28f35280.inputs.warp_resolution = (10, 10, 10)

#Wraps command **applywarp**
NodeHash_29d3f040 = pe.MapNode(interface = fsl.ApplyWarp(), name = 'NodeName_29d3f040', iterfield = ['field_file', 'in_file', 'premat'])
NodeHash_29d3f040.inputs.interp = 'trilinear'

#Wraps command **applywarp**
NodeHash_29ecf020 = pe.MapNode(interface = fsl.ApplyWarp(), name = 'NodeName_29ecf020', iterfield = ['field_file', 'in_file', 'premat'])
NodeHash_29ecf020.inputs.interp = 'trilinear'

#Wraps command **fslmerge**
NodeHash_2ceb9d10 = pe.Node(interface = fsl.Merge(), name = 'NodeName_2ceb9d10')
NodeHash_2ceb9d10.inputs.dimension = 't'

#Wraps command **flameo**
NodeHash_2f149160 = pe.Node(interface = fsl.FLAMEO(), name = 'NodeName_2f149160')
NodeHash_2f149160.inputs.run_mode = 'flame1'

#Wraps command **smoothest**
NodeHash_2fbc52b0 = pe.Node(interface = fsl.SmoothEstimate(), name = 'NodeName_2fbc52b0')

#Wraps command **cluster**
NodeHash_318a61d0 = pe.Node(interface = fsl.Cluster(), name = 'NodeName_318a61d0')
NodeHash_318a61d0.inputs.pthreshold = 0.05
NodeHash_318a61d0.inputs.threshold = 2.3

#Wraps command **fslmerge**
NodeHash_33749690 = pe.Node(interface = fsl.Merge(), name = 'NodeName_33749690')
NodeHash_33749690.inputs.dimension = 't'

#Create a workflow to connect all those nodes
示例#23
0
文件: glm.py 项目: Doeme/SAMRI
def l2_common_effect(
    l1_dir,
    exclude={},
    groupby="session",
    keep_work=False,
    l2_dir="",
    loud=False,
    tr=1,
    nprocs=6,
    workflow_name="generic",
    mask="/home/chymera/ni_data/templates/ds_QBI_chr_bin.nii.gz",
):

    l1_dir = path.expanduser(l1_dir)
    if not l2_dir:
        l2_dir = path.abspath(path.join(l1_dir, "..", "..", "l2"))

    datafind = nio.DataFinder()
    datafind.inputs.root_paths = l1_dir
    datafind.inputs.match_regex = '.+/sub-(?P<sub>.+)/ses-(?P<ses>.+)/.*?_trial-(?P<scan>.+)_cope\.nii.gz'
    datafind_res = datafind.run()
    subjects = set(datafind_res.outputs.sub)
    sessions = set(datafind_res.outputs.ses)
    scans = set(datafind_res.outputs.scan)

    copemerge = pe.Node(interface=fsl.Merge(dimension='t'), name="copemerge")
    varcopemerge = pe.Node(interface=fsl.Merge(dimension='t'),
                           name="varcopemerge")

    level2model = pe.Node(interface=fsl.L2Model(), name='level2model')

    flameo = pe.Node(interface=fsl.FLAMEO(), name="flameo")
    flameo.inputs.mask_file = mask
    flameo.inputs.run_mode = "ols"

    datasink = pe.Node(nio.DataSink(), name='datasink')
    datasink.inputs.base_directory = path.join(l2_dir, workflow_name)
    datasink.inputs.substitutions = [
        ('_iterable_', ''),
    ]

    if groupby == "subject":
        infosource = pe.Node(
            interface=util.IdentityInterface(fields=['iterable']),
            name="infosource")
        infosource.iterables = [('iterable', subjects)]
        datasource = pe.Node(interface=nio.DataGrabber(
            infields=[
                "group",
            ], outfields=["copes", "varcbs"]),
                             name="datasource")
        datasource.inputs.template_args = dict(copes=[['group', 'group']],
                                               varcbs=[['group', 'group']])
        datasource.inputs.field_template = dict(
            copes="sub-%s/ses-*/sub-%s_ses-*_trial-*_cope.nii.gz",
            varcbs="sub-%s/ses-*/sub-%s_ses-*_trial-*_varcb.nii.gz",
        )
        workflow_connections = [
            (infosource, datasource, [('iterable', 'group')]),
            (infosource, copemerge, [(('iterable', add_suffix, "_cope.nii.gz"),
                                      'merged_file')]),
            (infosource, varcopemerge, [(('iterable', add_suffix,
                                          "_varcb.nii.gz"), 'merged_file')]),
        ]
    elif groupby == "subject_scan":
        #does not currently work, due to missing iterator combinations (same issue as preprocessing)
        merge = pe.Node(interface=util.Merge(2), name="merge")
        infosource = pe.Node(
            interface=util.IdentityInterface(fields=['subject', 'scan']),
            name="infosource")
        infosource.iterables = [('subject', subjects), ('scan', scans)]
        datasource = pe.Node(interface=nio.DataGrabber(
            infields=[
                "subject",
                "scan",
            ], outfields=["copes", "varcbs"]),
                             name="datasource")
        datasource.inputs.template_args = dict(copes=[[
            "subject",
            "subject",
            "scan",
        ]],
                                               varcbs=[[
                                                   "subject",
                                                   "subject",
                                                   "scan",
                                               ]])
        datasource.inputs.field_template = dict(
            copes="sub-%s/ses-*/sub-%s_ses-*_trial-%s_cope.nii.gz",
            varcbs="sub-%s/ses-*/sub-%s_ses-*_trial-%s_varcb.nii.gz",
        )
        workflow_connections = [
            (infosource, datasource, [('subject', 'subject'),
                                      ('scan', 'scan')]),
            (infosource, merge, [('subject', 'in1'), ('scan', 'in2')]),
            (merge, copemerge, [(('out', add_suffix, "_cope.nii.gz"),
                                 'merged_file')]),
            (merge, varcopemerge, [(('out', add_suffix, "_varcb.nii.gz"),
                                    'merged_file')]),
        ]
    elif groupby == "session":
        infosource = pe.Node(
            interface=util.IdentityInterface(fields=['iterable']),
            name="infosource")
        infosource.iterables = [('iterable', sessions)]
        datasource = pe.Node(interface=nio.DataGrabber(
            infields=[
                "group",
            ], outfields=["copes", "varcbs"]),
                             name="datasource")
        datasource.inputs.template_args = dict(copes=[['group', 'group']],
                                               varcbs=[['group', 'group']])
        datasource.inputs.field_template = dict(
            copes="sub-*/ses-%s/sub-*_ses-%s_trial-*_cope.nii.gz",
            varcbs="sub-*/ses-%s/sub-*_ses-%s_trial-*_varcb.nii.gz",
        )
        workflow_connections = [
            (infosource, datasource, [('iterable', 'group')]),
            (infosource, copemerge, [(('iterable', add_suffix, "_cope.nii.gz"),
                                      'merged_file')]),
            (infosource, varcopemerge, [(('iterable', add_suffix,
                                          "_varcb.nii.gz"), 'merged_file')]),
        ]
    elif groupby == "scan":
        infosource = pe.Node(
            interface=util.IdentityInterface(fields=['iterable']),
            name="infosource")
        infosource.iterables = [('iterable', scans)]
        datasource = pe.Node(interface=nio.DataGrabber(
            infields=[
                "group",
            ], outfields=["copes", "varcbs"]),
                             name="datasource")
        datasource.inputs.template_args = dict(copes=[['group']],
                                               varcbs=[['group']])
        datasource.inputs.field_template = dict(
            copes="sub-*/ses-*/sub-*_ses-*_trial-%s_cope.nii.gz ",
            varcbs="sub-*/ses-*/sub-*_ses-*_trial-%s_varcb.nii.gz ",
        )
        workflow_connections = [
            (infosource, datasource, [('iterable', 'group')]),
            (infosource, copemerge, [(('iterable', add_suffix, "_cope.nii.gz"),
                                      'merged_file')]),
            (infosource, varcopemerge, [(('iterable', add_suffix,
                                          "_varcb.nii.gz"), 'merged_file')]),
        ]
    datasource.inputs.base_directory = l1_dir
    datasource.inputs.sort_filelist = True
    datasource.inputs.template = "*"

    workflow_connections.extend([
        (datasource, copemerge, [(('copes', datasource_exclude, exclude),
                                  'in_files')]),
        (datasource, varcopemerge, [(('varcbs', datasource_exclude, exclude),
                                     'in_files')]),
        (datasource, level2model, [(('copes', datasource_exclude, exclude,
                                     "len"), 'num_copes')]),
        (copemerge, flameo, [('merged_file', 'cope_file')]),
        (varcopemerge, flameo, [('merged_file', 'var_cope_file')]),
        (level2model, flameo, [('design_mat', 'design_file')]),
        (level2model, flameo, [('design_grp', 'cov_split_file')]),
        (level2model, flameo, [('design_con', 't_con_file')]),
        (flameo, datasink, [('copes', '@copes')]),
        (flameo, datasink, [('fstats', '@fstats')]),
        (flameo, datasink, [('tstats', '@tstats')]),
        (flameo, datasink, [('zstats', '@zstats')]),
    ])

    workdir_name = workflow_name + "_work"
    workflow = pe.Workflow(name=workdir_name)
    workflow.connect(workflow_connections)
    workflow.config = {
        "execution": {
            "crashdump_dir": path.join(l2_dir, "crashdump")
        }
    }
    workflow.base_dir = l2_dir
    workflow.write_graph(dotfilename=path.join(workflow.base_dir, workdir_name,
                                               "graph.dot"),
                         graph2use="hierarchical",
                         format="png")

    if not loud:
        try:
            workflow.run(plugin="MultiProc", plugin_args={'n_procs': nprocs})
        except RuntimeError:
            print(
                "WARNING: Some expected scans have not been found (or another RuntimeError has occured)."
            )
        for f in listdir(getcwd()):
            if re.search("crash.*?-varcopemerge|-copemerge.*", f):
                remove(path.join(getcwd(), f))
    else:
        workflow.run(plugin="MultiProc", plugin_args={'n_procs': nprocs})

    if not keep_work:
        shutil.rmtree(path.join(l2_dir, workdir_name))
                               name='NodeName_2b7ae5e0',
                               iterfield=['field_file', 'in_file', 'premat'])
NodeHash_2b7ae5e0.inputs.interp = 'trilinear'

#Wraps command **applywarp**
NodeHash_2f7b4860 = pe.MapNode(interface=fsl.ApplyWarp(),
                               name='NodeName_2f7b4860',
                               iterfield=['field_file', 'in_file', 'premat'])
NodeHash_2f7b4860.inputs.interp = 'trilinear'

#Wraps command **fslmerge**
NodeHash_2e8e9e00 = pe.Node(interface=fsl.Merge(), name='NodeName_2e8e9e00')
NodeHash_2e8e9e00.inputs.dimension = 't'

#Wraps command **flameo**
NodeHash_313ca880 = pe.Node(interface=fsl.FLAMEO(), name='NodeName_313ca880')
NodeHash_313ca880.inputs.run_mode = 'flame1'

#Wraps command **smoothest**
NodeHash_314ce330 = pe.Node(interface=fsl.SmoothEstimate(),
                            name='NodeName_314ce330')

#Wraps command **cluster**
NodeHash_332d21c0 = pe.Node(interface=fsl.Cluster(), name='NodeName_332d21c0')
NodeHash_332d21c0.inputs.pthreshold = 0.05
NodeHash_332d21c0.inputs.threshold = 2.3

#Wraps command **fslmerge**
NodeHash_33d80690 = pe.Node(interface=fsl.Merge(), name='NodeName_33d80690')
NodeHash_33d80690.inputs.dimension = 't'
示例#25
0
                          name="copemerge")

varcopemerge = pe.Node(interface=fsl.Merge(dimension='t'),
                       name="varcopemerge")

maskemerge = pe.Node(interface=fsl.Merge(dimension='t'),
                       name="maskemerge")
#copeImages = glob.glob('/media/Data/work/firstLevelKPE/_subject_id_*/feat_fit/run0.feat/stats/cope1.nii.gz')
#copemerge.inputs.in_files = copeImages



# Configure FSL 2nd level analysis
l2_model = pe.Node(fsl.L2Model(), name='l2_model')

flameo_ols = pe.Node(fsl.FLAMEO(run_mode='ols'), name='flameo_ols')
def _len(inlist):
    print (len(inlist))
    return len(inlist)
### use randomize
rand = pe.Node(fsl.Randomise(),
                            name = "randomize") 


rand.inputs.mask = '/media/Data/work/KPE_SPM/fslRandomize/group_mask.nii.gz' # group mask file (was created earlier)
rand.inputs.one_sample_group_mean = True
rand.inputs.tfce = True
rand.inputs.vox_p_values = True
rand.inputs.num_perm = 200
# Thresholding - FDR ################################################
# Calculate pvalues with ztop
示例#26
0
NodeHash_1e7cc750.inputs.warp_resolution = (10, 10, 10)

#Wraps command **applywarp**
NodeHash_347043c0 = pe.MapNode(interface = fsl.ApplyWarp(), name = 'NodeName_347043c0', iterfield = ['field_file', 'in_file', 'premat'])
NodeHash_347043c0.inputs.interp = 'trilinear'

#Wraps command **applywarp**
NodeHash_d73fcb0 = pe.MapNode(interface = fsl.ApplyWarp(), name = 'NodeName_d73fcb0', iterfield = ['field_file', 'in_file', 'premat'])
NodeHash_d73fcb0.inputs.interp = 'trilinear'

#Wraps command **fslmerge**
NodeHash_264457d0 = pe.Node(interface = fsl.Merge(), name = 'NodeName_264457d0')
NodeHash_264457d0.inputs.dimension = 't'

#Wraps command **flameo**
NodeHash_882ac40 = pe.Node(interface = fsl.FLAMEO(), name = 'NodeName_882ac40')
NodeHash_882ac40.inputs.run_mode = 'flame1'

#Wraps command **smoothest**
NodeHash_33f1eba0 = pe.Node(interface = fsl.SmoothEstimate(), name = 'NodeName_33f1eba0')

#Wraps command **cluster**
NodeHash_1978f9c0 = pe.Node(interface = fsl.Cluster(), name = 'NodeName_1978f9c0')
NodeHash_1978f9c0.inputs.pthreshold = 0.05
NodeHash_1978f9c0.inputs.threshold = 2.3

#Wraps command **fslmerge**
NodeHash_3c0ae30 = pe.Node(interface = fsl.Merge(), name = 'NodeName_3c0ae30')
NodeHash_3c0ae30.inputs.dimension = 't'

#Create a workflow to connect all those nodes
示例#27
0
                       name="copemerge")

varcopemerge = pe.MapNode(interface=fsl.Merge(dimension='t'),
                          iterfield=['in_files'],
                          name="varcopemerge")
"""
Use :class:`nipype.interfaces.fsl.L2Model` to generate subject and condition
specific level 2 model design files
"""

level2model = pe.Node(interface=fsl.L2Model(), name='l2model')
"""
Use :class:`nipype.interfaces.fsl.FLAMEO` to estimate a second level model
"""

flameo = pe.MapNode(interface=fsl.FLAMEO(run_mode='fe'),
                    name="flameo",
                    iterfield=['cope_file', 'var_cope_file'])

fixed_fx.connect([
    (copemerge, flameo, [('merged_file', 'cope_file')]),
    (varcopemerge, flameo, [('merged_file', 'var_cope_file')]),
    (level2model, flameo, [('design_mat', 'design_file'),
                           ('design_con', 't_con_file'),
                           ('design_grp', 'cov_split_file')]),
])
"""
Set up first-level workflow
---------------------------

"""
def create_run_flow(name='run_flow'):
    """custom made fixed_effects_workflow for investigating run effects
    
    Inputs:

         inputspec.copes : list of list of cope files (one list per contrast)
         inputspec.varcopes : list of list of varcope files (one list per
                              contrast)
         inputspec.dof_files : degrees of freedom files for each run

    Outputs:

         outputspec.res4d : 4d residual time series
         outputspec.copes : contrast parameter estimates
         outputspec.varcopes : variance of contrast parameter estimates
         outputspec.zstats : z statistics of contrasts
         outputspec.tstats : t statistics of contrasts
    """
    from nipype.interfaces.utility import Function
    from nipype.interfaces import fsl
    from nipype import Node, MapNode, Workflow
    from nipype.interfaces.utility import IdentityInterface

    """
    Instantiate Workflow
    """
    runmodel_dir = '/home/data_oli/run-groups/'
    run_flow = Workflow(name=name)
    inputspec = Node(IdentityInterface(fields=['copes',
                                               'varcopes',
                                               'dof_files'
                                               ]),
                     name='inputspec')

    """
    Merge the copes and varcopes for each condition
    """

    copemerge = MapNode(interface=fsl.Merge(dimension='t'),
                           iterfield=['in_files'],
                           name="copemerge")

    varcopemerge = MapNode(interface=fsl.Merge(dimension='t'),
                              iterfield=['in_files'],
                              name="varcopemerge")

    """
    Oli wrote this function to read the EVs / contrasts for the run model from
    a text file and bring them into shape for level2model
    """

    def get_run_contrast(con_file, ev_file):
        """
        Read the files containing regressor values and contrasts for
        2nd level analysis. Returns them in a shape that is accepted by
        'fsl.MultipleRegressDesign()'.
        
        Parameters
        ----------
        con_file:   file
            text file containing the 2nd lvl contrasts. Each row in file is a
            contrast. 
        ev_file:    file
            text file containing regressor values. Header will be ignored.
            First column represents input name (here: run number). Further
            columns represent regressor values. Columns seperated by tabs.
        Returns
        -------
        evdict:     dict
            containing 2nd lvl regressors
        runtrast:   list
            containing 2nd lvl contrasts. 
        """

        # create regressor dict
        with open(ev_file, 'rt') as f:
            evlines = [line.split() for line in f.readlines()]
        evnames = evlines[0][1:]
        evweights = [list(map(float, i[1:])) for i in evlines[1:]]
        evdict = dict()
        for name in evnames:
            evdict[name] = ([i[evnames.index(name)] for i in evweights])

        # create contrast list
        # TODO: this works with simple main effects. Should be made more flexible later on.
        with open(con_file, 'rt') as f:
            conlines = [i.split() for i in f.readlines()]

        runtrast = []

        for conline in conlines:
            if conline[0] == '#':
                continue
            # if contrast is a T-Test
            elif conline[1]=='T':
                runtrast.append(tuple(conline[0:2] + [[conline[2]]] + [[float(conline[3])]]))
        nl2 = len(runtrast)
        return evdict, runtrast, nl2

    run_contrast = Node(Function(input_names=['con_file', 'ev_file'],
                                 output_names=['evdict', 'runtrast', 'nl2'],
                                 function=get_run_contrast),
                        name='run_contrast')

    run_contrast.inputs.con_file = runmodel_dir + 'runcontrast.txt'
    run_contrast.inputs.ev_file = runmodel_dir + 'behav.txt'

    """
    Generate subject and condition specific level 2 model design files
    """
    level2model = Node(interface=fsl.MultipleRegressDesign(),
                       name='runmodel')

    """
    Estimate a second level model
    """

    flameo = MapNode(interface=fsl.FLAMEO(run_mode='fe'), name="flameo",
                     iterfield=['cope_file', 'var_cope_file'])

    def get_dofvolumes(dof_files, cope_files):
        import os
        import nibabel as nb
        import numpy as np
        img = nb.load(cope_files[0])
        if len(img.shape) > 3:
            out_data = np.zeros(img.shape)
        else:
            out_data = np.zeros(list(img.shape) + [1])
        for i in range(out_data.shape[-1]):
            dof = np.loadtxt(dof_files[i])
            out_data[:, :, :, i] = dof
        filename = os.path.join(os.getcwd(), 'dof_file.nii.gz')
        newimg = nb.Nifti1Image(out_data, None, img.header)
        newimg.to_filename(filename)
        return filename

    gendof = Node(Function(input_names=['dof_files', 'cope_files'],
                                output_names=['dof_volume'],
                                function=get_dofvolumes),
                  name='gendofvolume')

    """
    Connect all the Nodes in the workflow
    """

    outputspec = Node(IdentityInterface(fields=['res4d',
                                                'copes', 'varcopes',
                                                'zstats', 'tstats',
                                                'nl2']),
                      name='outputspec')

    run_flow.connect([(inputspec, copemerge, [('copes', 'in_files')]),
                      (inputspec, varcopemerge, [('varcopes', 'in_files')]),
                      (inputspec, gendof, [('dof_files', 'dof_files')]),
                      (copemerge, gendof, [('merged_file', 'cope_files')]),
                      (copemerge, flameo, [('merged_file', 'cope_file')]),
                      (varcopemerge, flameo, [('merged_file',
                                               'var_cope_file')]),
                      (run_contrast, level2model, [('evdict', 'regressors'),
                                                   ('runtrast', 'contrasts')]),
                      (level2model, flameo, [('design_mat', 'design_file'),
                                             ('design_con', 't_con_file'),
                                             ('design_fts', 'f_con_file'),
                                             ('design_grp', 'cov_split_file')]),
                      (gendof, flameo, [('dof_volume', 'dof_var_cope_file')]),
                      (run_contrast, outputspec, [('nl2', 'nl2')]),
                      (flameo, outputspec, [('res4d', 'res4d'),
                                            ('copes', 'copes'),
                                            ('var_copes', 'varcopes'),
                                            ('zstats', 'zstats'),
                                            ('tstats', 'tstats')
                                            ])
                      ])
    return run_flow
示例#29
0
def create_fsl_flame_wf(ftest=False, wf_name='groupAnalysis'):
    """
    FSL `FEAT <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FEAT>`_
    BASED Group Analysis

    Parameters
    ----------
    ftest : boolean, optional(default=False)
        Ftest help investigate several contrasts at the same time
        for example to see whether any of them (or any combination of them) is 
        significantly non-zero. Also, the F-test allows you to compare the 
        contribution of each contrast to the model and decide on significant 
        and non-significant ones
 
    wf_name : string 
        Workflow name
    
    Returns 
    -------
    grp_analysis : workflow object
        Group Analysis workflow object
    
    Notes
    -----
    `Source <https://github.com/openconnectome/C-PAC/blob/master/CPAC/group_analysis/group_analysis_preproc.py>`_
 
    Workflow Inputs::
        
        inputspec.mat_file : string (existing file)
           Mat file containing  matrix for design 
        
        inputspec.con_file : string (existing file)
           Contrast file containing contrast vectors 
        
        inputspec.grp_file : string (existing file)
           file containing matrix specifying the groups the covariance is split into
        
        inputspec.zmap_files : string (existing nifti file)
           derivative or the zmap file for which the group analysis is to be run
        
        inputspec.z_threshold : float
            Z Statistic threshold value for cluster thresholding. It is used to 
            determine what level of activation would be statistically significant. 
            Increasing this will result in higher estimates of required effect.
        
        inputspec.p_threshold : float
            Probability threshold for cluster thresholding.
            
        inputspec.fts_file : string (existing file)
           file containing matrix specifying f-contrasts
           
        inputspec.paramerters : string (tuple)
            tuple containing which MNI and FSLDIR path information
                      
    Workflow Outputs::
    
        outputspec.merged : string (nifti file)
            4D volume file after merging all the derivative 
            files from each specified subject.
            
        outputspec.zstats : list (nifti files)
            Z statistic image for each t contrast
            
        outputspec.zfstats : list (nifti files)
            Z statistic image for each f contrast
        
        outputspec.fstats : list (nifti files)
            F statistic for each contrast  
        
        outputspec.cluster_threshold : list (nifti files)
           the thresholded Z statistic image for each t contrast
        
        outputspec.cluster_index : list (nifti files)
            image of clusters for each t contrast; the values 
            in the clusters are the index numbers as used 
            in the cluster list.
        
        outputspec.cluster_localmax_txt : list (text files)
            local maxima text file for each t contrast, 
            defines the coordinates of maximum value in the cluster
        
        outputspec.overlay_threshold : list (nifti files)
            3D color rendered stats overlay image for t contrast
            After reloading this image, use the Statistics Color 
            Rendering GUI to reload the color look-up-table
        
        outputspec.overlay_rendered_image : list (nifti files)
           2D color rendered stats overlay picture for each t contrast
            
        outputspec.cluster_threshold_zf : list (nifti files)
           the thresholded Z statistic image for each f contrast
        
        outputspec.cluster_index_zf : list (nifti files)
            image of clusters for each f contrast; the values 
            in the clusters are the index numbers as used 
            in the cluster list.
            
        outputspec.cluster_localmax_txt_zf : list (text files)
            local maxima text file for each f contrast, 
            defines the coordinates of maximum value in the cluster
        
        outputspec.overlay_threshold_zf : list (nifti files)
            3D color rendered stats overlay image for f contrast
            After reloading this image, use the Statistics Color 
            Rendering GUI to reload the color look-up-table
        
        outputspec.overlay_rendered_image_zf : list (nifti files)
           2D color rendered stats overlay picture for each f contrast
    
    Order of commands:

    - Merge all the Z-map 3D images into 4D image file.  For details see `fslmerge <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Fslutils>`_::
    
        fslmerge -t sub01/sca/seed1/sca_Z_FWHM_merged.nii 
                    sub02/sca/seed1/sca_Z_FWHM.nii.gz ....  
                    merge.nii.gz
                    
        arguments 
            -t : concatenate images in time
            
    - Create mask specific for analysis. For details see `fslmaths <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Fslutils>`_::
    
        fslmaths merged.nii.gz 
                -abs -Tmin -bin mean_mask.nii.gz
        
        arguments 
             -Tmin  : min across time
             -abs   : absolute value
             -bin   : use (current image>0) to binarise
    
    - FSL FLAMEO to perform higher level analysis.  For details see `flameo <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FEAT>`_::
        
        flameo --copefile = merged.nii.gz --covsplitfile = anova_with_meanFD.grp --designfile = anova_with_meanFD.mat 
               --fcontrastsfile = anova_with_meanFD.fts --ld=stats --maskfile = mean_mask.nii.gz --runmode=ols 
               --tcontrastsfile = anova_with_meanFD.con
           
        arguments
            --copefile        : cope regressor data file
            --designfile      : design matrix file
            --maskfile        : mask file
            --tcontrastsfile  : file containing an ASCII matrix specifying the t contrasts
            --fcontrastsfile  : file containing an ASCII matrix specifying the f contrasts
            --runmode         : Interference to perform (mixed effects - OLS)
            
    - Run FSL Easy thresh 
        
      Easy thresh is a simple script for carrying out cluster-based thresholding and colour activation overlaying::
        
        easythresh <raw_zstat> <brain_mask> <z_thresh> <prob_thresh> <background_image> <output_root> [--mm]
      
      A seperate workflow called easythresh is called to run easythresh steps.
      
    .. exec::
        from CPAC.group_analysis import create_fsl_flame_wf
        wf = create_fsl_flame_wf()
        wf.write_graph(
            graph2use='orig',
            dotfilename='./images/generated/group_analysis.dot'
        )

    High Level Workflow Graph:
    
    .. image:: ../../images/generated/group_analysis.png
       :width: 800
    
    
    Detailed Workflow Graph:
    
    .. image:: ../../images/generated/group_analysis_detailed.png
       :width: 800

    Examples
    --------
    
    >>> from group_analysis_preproc import create_group_analysis
    >>> preproc = create_group_analysis()
    >>> preproc.inputs.inputspec.mat_file = '../group_models/anova_with_meanFD/anova_with_meanFD.mat'
    >>> preproc.inputs.inputspec.con_file = '../group_models/anova_with_meanFD/anova_with_meanFD.con'
    >>> preproc.inputs.inputspec.grp_file = '../group_models/anova_with_meanFD/anova_with_meanFD.grp'
    >>> preproc.inputs.inputspec.zmap_files = ['subjects/sub01/seeds_rest_Dickstein_DLPFC/sca_Z_FWHM.nii.gz', 
                                               'subjects/sub02/seeds_rest_Dickstein_DLPFC/sca_Z_FWHM.nii.gz']
    >>> preproc.inputs.inputspec.z_threshold = 2.3
    >>> preproc.inputs.inputspec.p_threshold = 0.05
    >>> preproc.inputs.inputspec.parameters = ('/usr/local/fsl/', 'MNI152')
    >>> preproc.run()  -- SKIP doctest
            
    """
    grp_analysis = pe.Workflow(name=wf_name)

    inputnode = pe.Node(util.IdentityInterface(fields=[
        'merged_file', 'merge_mask', 'mat_file', 'con_file', 'grp_file',
        'fts_file', 'z_threshold', 'p_threshold', 'parameters'
    ]),
                        name='inputspec')

    outputnode = pe.Node(util.IdentityInterface(fields=[
        'merged', 'zstats', 'zfstats', 'fstats', 'cluster_threshold',
        'cluster_index', 'cluster_localmax_txt', 'overlay_threshold',
        'rendered_image', 'cluster_localmax_txt_zf', 'cluster_threshold_zf',
        'cluster_index_zf', 'overlay_threshold_zf', 'rendered_image_zf'
    ]),
                         name='outputspec')
    '''
    merge_to_4d = pe.Node(interface=fsl.Merge(),
                          name='merge_to_4d')
    merge_to_4d.inputs.dimension = 't'

    ### create analysis specific mask
    #-Tmin: min across time
    # -abs: absolute value
    #-bin: use (current image>0) to binarise
    merge_mask = pe.Node(interface=fsl.ImageMaths(),
                         name='merge_mask')
    merge_mask.inputs.op_string = '-abs -Tmin -bin'
    '''

    fsl_flameo = pe.Node(interface=fsl.FLAMEO(), name='fsl_flameo')
    fsl_flameo.inputs.run_mode = 'ols'

    # rename the FLAME zstat outputs after the contrast string labels for
    # easier interpretation
    label_zstat_imports = ["import os"]
    label_zstat = pe.Node(util.Function(input_names=['zstat_list', 'con_file'],
                                        output_names=['new_zstat_list'],
                                        function=label_zstat_files,
                                        imports=label_zstat_imports),
                          name='label_zstat')

    rename_zstats = pe.MapNode(interface=util.Rename(),
                               name='rename_zstats',
                               iterfield=['in_file', 'format_string'])
    rename_zstats.inputs.keep_ext = True

    # create analysis specific mask
    # fslmaths merged.nii.gz -abs -bin -Tmean -mul volume out.nii.gz
    # -Tmean: mean across time
    # create group_reg file
    # this file can provide an idea of how well the subjects
    # in our analysis overlay with each other and the MNI brain.
    # e.g., maybe there is one subject with limited coverage.
    # not attached to sink currently
    merge_mean_mask = pe.Node(interface=fsl.ImageMaths(),
                              name='merge_mean_mask')

    # function node to get the operation string for fslmaths command
    get_opstring = pe.Node(util.Function(input_names=['in_file'],
                                         output_names=['out_file'],
                                         function=get_operation),
                           name='get_opstring')

    # connections
    '''
    grp_analysis.connect(inputnode, 'zmap_files',
                         merge_to_4d, 'in_files')
    grp_analysis.connect(merge_to_4d, 'merged_file',
                         merge_mask, 'in_file')
    '''
    grp_analysis.connect(inputnode, 'merged_file', fsl_flameo, 'cope_file')
    grp_analysis.connect(inputnode, 'merge_mask', fsl_flameo, 'mask_file')
    grp_analysis.connect(inputnode, 'mat_file', fsl_flameo, 'design_file')
    grp_analysis.connect(inputnode, 'con_file', fsl_flameo, 't_con_file')
    grp_analysis.connect(inputnode, 'grp_file', fsl_flameo, 'cov_split_file')

    grp_analysis.connect(fsl_flameo, 'zstats', label_zstat, 'zstat_list')
    grp_analysis.connect(inputnode, 'con_file', label_zstat, 'con_file')

    grp_analysis.connect(fsl_flameo, 'zstats', rename_zstats, 'in_file')

    grp_analysis.connect(label_zstat, 'new_zstat_list', rename_zstats,
                         'format_string')

    if ftest:
        grp_analysis.connect(inputnode, 'fts_file', fsl_flameo, 'f_con_file')

        easy_thresh_zf = easy_thresh('easy_thresh_zf')

        grp_analysis.connect(fsl_flameo, 'zfstats', easy_thresh_zf,
                             'inputspec.z_stats')
        grp_analysis.connect(inputnode, 'merge_mask', easy_thresh_zf,
                             'inputspec.merge_mask')
        grp_analysis.connect(inputnode, 'z_threshold', easy_thresh_zf,
                             'inputspec.z_threshold')
        grp_analysis.connect(inputnode, 'p_threshold', easy_thresh_zf,
                             'inputspec.p_threshold')
        grp_analysis.connect(inputnode, 'parameters', easy_thresh_zf,
                             'inputspec.parameters')
        grp_analysis.connect(easy_thresh_zf, 'outputspec.cluster_threshold',
                             outputnode, 'cluster_threshold_zf')
        grp_analysis.connect(easy_thresh_zf, 'outputspec.cluster_index',
                             outputnode, 'cluster_index_zf')
        grp_analysis.connect(easy_thresh_zf, 'outputspec.cluster_localmax_txt',
                             outputnode, 'cluster_localmax_txt_zf')
        grp_analysis.connect(easy_thresh_zf, 'outputspec.overlay_threshold',
                             outputnode, 'overlay_threshold_zf')
        grp_analysis.connect(easy_thresh_zf, 'outputspec.rendered_image',
                             outputnode, 'rendered_image_zf')

    # calling easythresh for zstats files
    easy_thresh_z = easy_thresh('easy_thresh_z')
    grp_analysis.connect(rename_zstats, 'out_file', easy_thresh_z,
                         'inputspec.z_stats')
    grp_analysis.connect(inputnode, 'merge_mask', easy_thresh_z,
                         'inputspec.merge_mask')
    grp_analysis.connect(inputnode, 'z_threshold', easy_thresh_z,
                         'inputspec.z_threshold')
    grp_analysis.connect(inputnode, 'p_threshold', easy_thresh_z,
                         'inputspec.p_threshold')
    grp_analysis.connect(inputnode, 'parameters', easy_thresh_z,
                         'inputspec.parameters')

    grp_analysis.connect(inputnode, 'merged_file', get_opstring, 'in_file')
    grp_analysis.connect(inputnode, 'merged_file', merge_mean_mask, 'in_file')
    grp_analysis.connect(get_opstring, 'out_file', merge_mean_mask,
                         'op_string')

    grp_analysis.connect(fsl_flameo, 'zfstats', outputnode, 'zfstats')
    grp_analysis.connect(fsl_flameo, 'fstats', outputnode, 'fstats')
    grp_analysis.connect(inputnode, 'merged_file', outputnode, 'merged')

    grp_analysis.connect(rename_zstats, 'out_file', outputnode, 'zstats')

    grp_analysis.connect(easy_thresh_z, 'outputspec.cluster_threshold',
                         outputnode, 'cluster_threshold')
    grp_analysis.connect(easy_thresh_z, 'outputspec.cluster_index', outputnode,
                         'cluster_index')
    grp_analysis.connect(easy_thresh_z, 'outputspec.cluster_localmax_txt',
                         outputnode, 'cluster_localmax_txt')
    grp_analysis.connect(easy_thresh_z, 'outputspec.overlay_threshold',
                         outputnode, 'overlay_threshold')
    grp_analysis.connect(easy_thresh_z, 'outputspec.rendered_image',
                         outputnode, 'rendered_image')

    return grp_analysis
# Contrasts
cont01 = ['incong>cong', 'T', list(dictReg.keys()), [1]]
cont02 = ['cong>incong', 'T', list(dictReg.keys()), [-1]]

contrastList = [cont01, cont02]

# Setting up the second level analysis model node
level2design = Node(fsl.MultipleRegressDesign(contrasts=contrastList,
                                              regressors=dictReg),
                    name='level2design')

# Model calculation by FLAMEO
flameo = Node(
    fsl.FLAMEO(
        mask_file=fileMask,  # specifying mask image in flameo
        run_mode='ols'),
    name="flameo")

###########
#
# NODES FOR THE MERGING IMAGES
#
###########
# merging cope files
copemerge = Node(fsl.Merge(dimension='t', in_files=listCopeFiles),
                 name="copemerge")

# merging varcope files
varcopemerge = Node(fsl.Merge(dimension='t', in_files=listVarcopeFiles),
                    name="varcopemerge")