Exemplo n.º 1
0
def create_2lvl(name="group"):
    import nipype.interfaces.fsl as fsl
    import nipype.pipeline.engine as pe
    import nipype.interfaces.utility as niu
    import nipype.interfaces.io as nio
    wk = pe.Workflow(name=name)

    inputspec = pe.Node(
        niu.IdentityInterface(fields=['copes', 'varcopes', 'template']),
        name='inputspec')

    model = pe.Node(fsl.L2Model(), name='l2model')

    wk.connect(inputspec, ('copes', get_len), model, 'num_copes')

    mergecopes = pe.Node(fsl.Merge(dimension='t'), name='merge_copes')
    mergevarcopes = pe.Node(fsl.Merge(dimension='t'), name='merge_varcopes')

    flame = pe.Node(fsl.FLAMEO(run_mode='ols'), name='flameo')
    wk.connect(inputspec, 'copes', mergecopes, 'in_files')
    wk.connect(inputspec, 'varcopes', mergevarcopes, 'in_files')
    wk.connect(model, 'design_mat', flame, 'design_file')
    wk.connect(model, 'design_con', flame, 't_con_file')
    wk.connect(mergecopes, 'merged_file', flame, 'cope_file')
    wk.connect(mergevarcopes, 'merged_file', flame, 'var_cope_file')
    wk.connect(model, 'design_grp', flame, 'cov_split_file')

    bet = pe.Node(fsl.BET(mask=True, frac=0.3), name="template_brainmask")
    wk.connect(inputspec, 'template', bet, 'in_file')
    wk.connect(bet, 'mask_file', flame, 'mask_file')

    outputspec = pe.Node(niu.IdentityInterface(fields=[
        'zstat', 'tstat', 'cope', 'varcope', 'mrefvars', 'pes', 'res4d',
        'mask', 'tdof', 'weights', 'pstat'
    ]),
                         name='outputspec')

    wk.connect(flame, 'copes', outputspec, 'cope')
    wk.connect(flame, 'var_copes', outputspec, 'varcope')
    wk.connect(flame, 'mrefvars', outputspec, 'mrefvars')
    wk.connect(flame, 'pes', outputspec, 'pes')
    wk.connect(flame, 'res4d', outputspec, 'res4d')
    wk.connect(flame, 'weights', outputspec, 'weights')
    wk.connect(flame, 'zstats', outputspec, 'zstat')
    wk.connect(flame, 'tstats', outputspec, 'tstat')
    wk.connect(flame, 'tdof', outputspec, 'tdof')
    wk.connect(bet, 'mask_file', outputspec, 'mask')

    ztopval = pe.MapNode(interface=fsl.ImageMaths(op_string='-ztop',
                                                  suffix='_pval'),
                         name='z2pval',
                         iterfield=['in_file'])

    wk.connect(flame, 'zstats', ztopval, 'in_file')
    wk.connect(ztopval, 'out_file', outputspec, 'pstat')

    return wk
Exemplo n.º 2
0
def create_2lvl_rand(name="group_randomize"):
    import nipype.interfaces.fsl as fsl
    import nipype.pipeline.engine as pe
    import nipype.interfaces.utility as niu
    import nipype.interfaces.io as nio
    wk = pe.Workflow(name=name)

    inputspec = pe.Node(
        niu.IdentityInterface(fields=['copes', 'varcopes', 'template']),
        name='inputspec')

    model = pe.Node(fsl.L2Model(), name='l2model')

    wk.connect(inputspec, ('copes', get_len), model, 'num_copes')

    mergecopes = pe.Node(fsl.Merge(dimension='t'), name='merge_copes')
    mergevarcopes = pe.Node(fsl.Merge(dimension='t'), name='merge_varcopes')

    rand = pe.Node(fsl.Randomise(base_name='OneSampleT',
                                 raw_stats_imgs=True,
                                 tfce=True),
                   name='randomize')

    wk.connect(inputspec, 'copes', mergecopes, 'in_files')
    wk.connect(inputspec, 'varcopes', mergevarcopes, 'in_files')
    wk.connect(model, 'design_mat', rand, 'design_mat')
    wk.connect(model, 'design_con', rand, 'tcon')
    wk.connect(mergecopes, 'merged_file', rand, 'in_file')
    #wk.connect(model,'design_grp',rand,'cov_split_file')

    bet = pe.Node(fsl.BET(mask=True, frac=0.3), name="template_brainmask")
    wk.connect(inputspec, 'template', bet, 'in_file')
    wk.connect(bet, 'mask_file', rand, 'mask')

    outputspec = pe.Node(niu.IdentityInterface(fields=[
        'f_corrected_p_files', 'f_p_files', 'fstat_files',
        't_corrected_p_files', 't_p_files', 'tstat_file', 'mask'
    ]),
                         name='outputspec')

    wk.connect(rand, 'f_corrected_p_files', outputspec, 'f_corrected_p_files')
    wk.connect(rand, 'f_p_files', outputspec, 'f_p_files')
    wk.connect(rand, 'fstat_files', outputspec, 'fstat_files')
    wk.connect(rand, 't_corrected_p_files', outputspec, 't_corrected_p_files')
    wk.connect(rand, 't_p_files', outputspec, 't_p_files')
    wk.connect(rand, 'tstat_files', outputspec, 'tstat_file')
    wk.connect(bet, 'mask_file', outputspec, 'mask')

    return wk
Exemplo n.º 3
0
    def _run_interface(self, runtime):

        n_con = len(self.inputs.contrasts)

        # Find the basic geometry of the image
        img = nib.load(self.inputs.copes[0])
        x, y, z = img.shape
        aff, hdr = img.get_affine(), img.get_header()

        # Get lists of files for each contrast
        copes = self._unpack_files(self.inputs.copes, n_con)
        varcopes = self._unpack_files(self.inputs.varcopes, n_con)

        # Make an image with the DOF for each run
        dofs = np.array([np.loadtxt(f) for f in self.inputs.dofs])
        dof_data = np.ones((x, y, z, len(dofs))) * dofs

        # Find the intersection of the masks
        mask_data = [nib.load(f).get_data() for f in self.inputs.masks]
        common_mask = np.all(mask_data, axis=0)
        nib.Nifti1Image(common_mask, aff, hdr).to_filename("mask.nii.gz")

        # Run the flame models
        flame_results = []
        zstat_files = []
        for i, contrast in enumerate(self.inputs.contrasts):

            # Load each run of cope and varcope files into a list
            cs = [nib.load(f).get_data()[..., np.newaxis] for f in copes[i]]
            vs = [nib.load(f).get_data()[..., np.newaxis] for f in varcopes[i]]

            # Find all of the nonzero copes
            # This handles cases where there were no events for some of
            # the runs for the contrast we're currently dealing with
            good_cs = [not np.allclose(d, 0) for d in cs]
            good_vs = [not np.allclose(d, 0) for d in vs]
            good = np.all([good_cs, good_vs], axis=0)

            # Handle the case where no events occured for this contrast
            if not good.any():
                good = np.ones(len(cs), bool)

            # Concatenate the cope and varcope data, save only the good frames
            c_data = np.concatenate(cs, axis=-1)[:, :, :, good]
            v_data = np.concatenate(vs, axis=-1)[:, :, :, good]

            # Write out the concatenated copes and varcopes
            nib.Nifti1Image(c_data, aff, hdr).to_filename("cope_4d.nii.gz")
            nib.Nifti1Image(v_data, aff, hdr).to_filename("varcope_4d.nii.gz")

            # Write out a correctly sized design for this contrast
            fsl.L2Model(num_copes=int(good.sum())).run()

            # Mask the DOF data and write it out for this run
            contrast_dof = dof_data[:, :, :, good]
            nib.Nifti1Image(contrast_dof, aff, hdr).to_filename("dof.nii.gz")

            # Build the flamo commandline and run
            flamecmd = ["flameo",
                        "--cope=cope_4d.nii.gz",
                        "--varcope=varcope_4d.nii.gz",
                        "--mask=mask.nii.gz",
                        "--dvc=dof.nii.gz",
                        "--runmode=fe",
                        "--dm=design.mat",
                        "--tc=design.con",
                        "--cs=design.grp",
                        "--ld=" + contrast,
                        "--npo"]
            runtime = submit_cmdline(runtime, flamecmd)

            # Rename the written file and append to the outputs
            for kind in ["cope", "varcope"]:
                os.rename(kind + "_4d.nii.gz",
                          "%s/%s_4d.nii.gz" % (contrast, kind))

            # Put the zstats and mask on the surface
            for hemi in ["lh", "rh"]:
                projcmd = ["mri_vol2surf",
                           "--mov", "%s/zstat1.nii.gz" % contrast,
                           "--reg", self.inputs.reg_file,
                           "--hemi", hemi,
                           "--projfrac-avg", "0", "1", ".1",
                           "--o", "%s/%s.zstat1.mgz" % (contrast, hemi)]
                submit_cmdline(runtime, projcmd)

                # Mask image
                projcmd = ["mri_vol2surf",
                           "--mov", "%s/mask.nii.gz" % contrast,
                           "--reg", self.inputs.reg_file,
                           "--hemi", hemi,
                           "--projfrac-max", "0", "1", ".1",
                           "--o", "%s/%s.mask.mgz" % (contrast, hemi)]
                submit_cmdline(runtime, projcmd)

            flame_results.append(op.abspath(contrast))
            zstat_files.append(op.abspath("%s/zstat1.nii.gz" % contrast))

        self.flame_results = flame_results
        self.zstat_files = zstat_files

        return runtime
Exemplo n.º 4
0
varcopes for each condition
"""

copemerge = pe.MapNode(interface=fsl.Merge(dimension='t'),
                       iterfield=['in_files'],
                       name="copemerge")

varcopemerge = pe.MapNode(interface=fsl.Merge(dimension='t'),
                          iterfield=['in_files'],
                          name="varcopemerge")
"""
Use :class:`nipype.interfaces.fsl.L2Model` to generate subject and condition
specific level 2 model design files
"""

level2model = pe.Node(interface=fsl.L2Model(), name='l2model')
"""
Use :class:`nipype.interfaces.fsl.FLAMEO` to estimate a second level model
"""

flameo = pe.MapNode(interface=fsl.FLAMEO(run_mode='fe'),
                    name="flameo",
                    iterfield=['cope_file', 'var_cope_file'])

fixed_fx.connect([
    (copemerge, flameo, [('merged_file', 'cope_file')]),
    (varcopemerge, flameo, [('merged_file', 'var_cope_file')]),
    (level2model, flameo, [('design_mat', 'design_file'),
                           ('design_con', 't_con_file'),
                           ('design_grp', 'cov_split_file')]),
])
Exemplo n.º 5
0
"""

copemerge = pe.MapNode(interface=fsl.Merge(dimension='t'),
                       iterfield=['in_files'],
                       name="copemerge")

varcopemerge = pe.MapNode(interface=fsl.Merge(dimension='t'),
                          iterfield=['in_files'],
                          name="varcopemerge")

"""
Use :class:`nipype.interfaces.fsl.L2Model` to generate subject and condition
specific level 2 model design files
"""

level2model = pe.Node(interface=fsl.L2Model(),
                      name='l2model')

"""
Use :class:`nipype.interfaces.fsl.FLAMEO` to estimate a second level model
"""

flameo = pe.MapNode(interface=fsl.FLAMEO(run_mode='fe'), name="flameo",
                    iterfield=['cope_file', 'var_cope_file'])

fixed_fx.connect([(copemerge, flameo, [('merged_file', 'cope_file')]),
                  (varcopemerge, flameo, [('merged_file', 'var_cope_file')]),
                  (level2model, flameo, [('design_mat', 'design_file'),
                                         ('design_con', 't_con_file'),
                                         ('design_grp', 'cov_split_file')]),
                  ])
Exemplo n.º 6
0
copemerge    = pe.Node(interface=fsl.Merge(dimension='t'),
                          name="copemerge")

varcopemerge = pe.Node(interface=fsl.Merge(dimension='t'),
                       name="varcopemerge")

maskemerge = pe.Node(interface=fsl.Merge(dimension='t'),
                       name="maskemerge")
#copeImages = glob.glob('/media/Data/work/firstLevelKPE/_subject_id_*/feat_fit/run0.feat/stats/cope1.nii.gz')
#copemerge.inputs.in_files = copeImages



# Configure FSL 2nd level analysis
l2_model = pe.Node(fsl.L2Model(), name='l2_model')

flameo_ols = pe.Node(fsl.FLAMEO(run_mode='ols'), name='flameo_ols')
def _len(inlist):
    print (len(inlist))
    return len(inlist)
### use randomize
rand = pe.Node(fsl.Randomise(),
                            name = "randomize") 


rand.inputs.mask = '/media/Data/work/KPE_SPM/fslRandomize/group_mask.nii.gz' # group mask file (was created earlier)
rand.inputs.one_sample_group_mean = True
rand.inputs.tfce = True
rand.inputs.vox_p_values = True
rand.inputs.num_perm = 200
Exemplo n.º 7
0
Arquivo: glm.py Projeto: Doeme/SAMRI
def l2_common_effect(
    l1_dir,
    exclude={},
    groupby="session",
    keep_work=False,
    l2_dir="",
    loud=False,
    tr=1,
    nprocs=6,
    workflow_name="generic",
    mask="/home/chymera/ni_data/templates/ds_QBI_chr_bin.nii.gz",
):

    l1_dir = path.expanduser(l1_dir)
    if not l2_dir:
        l2_dir = path.abspath(path.join(l1_dir, "..", "..", "l2"))

    datafind = nio.DataFinder()
    datafind.inputs.root_paths = l1_dir
    datafind.inputs.match_regex = '.+/sub-(?P<sub>.+)/ses-(?P<ses>.+)/.*?_trial-(?P<scan>.+)_cope\.nii.gz'
    datafind_res = datafind.run()
    subjects = set(datafind_res.outputs.sub)
    sessions = set(datafind_res.outputs.ses)
    scans = set(datafind_res.outputs.scan)

    copemerge = pe.Node(interface=fsl.Merge(dimension='t'), name="copemerge")
    varcopemerge = pe.Node(interface=fsl.Merge(dimension='t'),
                           name="varcopemerge")

    level2model = pe.Node(interface=fsl.L2Model(), name='level2model')

    flameo = pe.Node(interface=fsl.FLAMEO(), name="flameo")
    flameo.inputs.mask_file = mask
    flameo.inputs.run_mode = "ols"

    datasink = pe.Node(nio.DataSink(), name='datasink')
    datasink.inputs.base_directory = path.join(l2_dir, workflow_name)
    datasink.inputs.substitutions = [
        ('_iterable_', ''),
    ]

    if groupby == "subject":
        infosource = pe.Node(
            interface=util.IdentityInterface(fields=['iterable']),
            name="infosource")
        infosource.iterables = [('iterable', subjects)]
        datasource = pe.Node(interface=nio.DataGrabber(
            infields=[
                "group",
            ], outfields=["copes", "varcbs"]),
                             name="datasource")
        datasource.inputs.template_args = dict(copes=[['group', 'group']],
                                               varcbs=[['group', 'group']])
        datasource.inputs.field_template = dict(
            copes="sub-%s/ses-*/sub-%s_ses-*_trial-*_cope.nii.gz",
            varcbs="sub-%s/ses-*/sub-%s_ses-*_trial-*_varcb.nii.gz",
        )
        workflow_connections = [
            (infosource, datasource, [('iterable', 'group')]),
            (infosource, copemerge, [(('iterable', add_suffix, "_cope.nii.gz"),
                                      'merged_file')]),
            (infosource, varcopemerge, [(('iterable', add_suffix,
                                          "_varcb.nii.gz"), 'merged_file')]),
        ]
    elif groupby == "subject_scan":
        #does not currently work, due to missing iterator combinations (same issue as preprocessing)
        merge = pe.Node(interface=util.Merge(2), name="merge")
        infosource = pe.Node(
            interface=util.IdentityInterface(fields=['subject', 'scan']),
            name="infosource")
        infosource.iterables = [('subject', subjects), ('scan', scans)]
        datasource = pe.Node(interface=nio.DataGrabber(
            infields=[
                "subject",
                "scan",
            ], outfields=["copes", "varcbs"]),
                             name="datasource")
        datasource.inputs.template_args = dict(copes=[[
            "subject",
            "subject",
            "scan",
        ]],
                                               varcbs=[[
                                                   "subject",
                                                   "subject",
                                                   "scan",
                                               ]])
        datasource.inputs.field_template = dict(
            copes="sub-%s/ses-*/sub-%s_ses-*_trial-%s_cope.nii.gz",
            varcbs="sub-%s/ses-*/sub-%s_ses-*_trial-%s_varcb.nii.gz",
        )
        workflow_connections = [
            (infosource, datasource, [('subject', 'subject'),
                                      ('scan', 'scan')]),
            (infosource, merge, [('subject', 'in1'), ('scan', 'in2')]),
            (merge, copemerge, [(('out', add_suffix, "_cope.nii.gz"),
                                 'merged_file')]),
            (merge, varcopemerge, [(('out', add_suffix, "_varcb.nii.gz"),
                                    'merged_file')]),
        ]
    elif groupby == "session":
        infosource = pe.Node(
            interface=util.IdentityInterface(fields=['iterable']),
            name="infosource")
        infosource.iterables = [('iterable', sessions)]
        datasource = pe.Node(interface=nio.DataGrabber(
            infields=[
                "group",
            ], outfields=["copes", "varcbs"]),
                             name="datasource")
        datasource.inputs.template_args = dict(copes=[['group', 'group']],
                                               varcbs=[['group', 'group']])
        datasource.inputs.field_template = dict(
            copes="sub-*/ses-%s/sub-*_ses-%s_trial-*_cope.nii.gz",
            varcbs="sub-*/ses-%s/sub-*_ses-%s_trial-*_varcb.nii.gz",
        )
        workflow_connections = [
            (infosource, datasource, [('iterable', 'group')]),
            (infosource, copemerge, [(('iterable', add_suffix, "_cope.nii.gz"),
                                      'merged_file')]),
            (infosource, varcopemerge, [(('iterable', add_suffix,
                                          "_varcb.nii.gz"), 'merged_file')]),
        ]
    elif groupby == "scan":
        infosource = pe.Node(
            interface=util.IdentityInterface(fields=['iterable']),
            name="infosource")
        infosource.iterables = [('iterable', scans)]
        datasource = pe.Node(interface=nio.DataGrabber(
            infields=[
                "group",
            ], outfields=["copes", "varcbs"]),
                             name="datasource")
        datasource.inputs.template_args = dict(copes=[['group']],
                                               varcbs=[['group']])
        datasource.inputs.field_template = dict(
            copes="sub-*/ses-*/sub-*_ses-*_trial-%s_cope.nii.gz ",
            varcbs="sub-*/ses-*/sub-*_ses-*_trial-%s_varcb.nii.gz ",
        )
        workflow_connections = [
            (infosource, datasource, [('iterable', 'group')]),
            (infosource, copemerge, [(('iterable', add_suffix, "_cope.nii.gz"),
                                      'merged_file')]),
            (infosource, varcopemerge, [(('iterable', add_suffix,
                                          "_varcb.nii.gz"), 'merged_file')]),
        ]
    datasource.inputs.base_directory = l1_dir
    datasource.inputs.sort_filelist = True
    datasource.inputs.template = "*"

    workflow_connections.extend([
        (datasource, copemerge, [(('copes', datasource_exclude, exclude),
                                  'in_files')]),
        (datasource, varcopemerge, [(('varcbs', datasource_exclude, exclude),
                                     'in_files')]),
        (datasource, level2model, [(('copes', datasource_exclude, exclude,
                                     "len"), 'num_copes')]),
        (copemerge, flameo, [('merged_file', 'cope_file')]),
        (varcopemerge, flameo, [('merged_file', 'var_cope_file')]),
        (level2model, flameo, [('design_mat', 'design_file')]),
        (level2model, flameo, [('design_grp', 'cov_split_file')]),
        (level2model, flameo, [('design_con', 't_con_file')]),
        (flameo, datasink, [('copes', '@copes')]),
        (flameo, datasink, [('fstats', '@fstats')]),
        (flameo, datasink, [('tstats', '@tstats')]),
        (flameo, datasink, [('zstats', '@zstats')]),
    ])

    workdir_name = workflow_name + "_work"
    workflow = pe.Workflow(name=workdir_name)
    workflow.connect(workflow_connections)
    workflow.config = {
        "execution": {
            "crashdump_dir": path.join(l2_dir, "crashdump")
        }
    }
    workflow.base_dir = l2_dir
    workflow.write_graph(dotfilename=path.join(workflow.base_dir, workdir_name,
                                               "graph.dot"),
                         graph2use="hierarchical",
                         format="png")

    if not loud:
        try:
            workflow.run(plugin="MultiProc", plugin_args={'n_procs': nprocs})
        except RuntimeError:
            print(
                "WARNING: Some expected scans have not been found (or another RuntimeError has occured)."
            )
        for f in listdir(getcwd()):
            if re.search("crash.*?-varcopemerge|-copemerge.*", f):
                remove(path.join(getcwd(), f))
    else:
        workflow.run(plugin="MultiProc", plugin_args={'n_procs': nprocs})

    if not keep_work:
        shutil.rmtree(path.join(l2_dir, workdir_name))
Exemplo n.º 8
0
def create_fixed_effects_flow(name='fixedfx'):
    """Create a fixed-effects workflow

    This workflow is used to combine registered copes and varcopes across runs
    for an individual subject

    Example
    -------

    >>> fixedfx = create_fixed_effects_flow()
    >>> fixedfx.base_dir = '.'
    >>> fixedfx.inputs.inputspec.copes = [['cope1run1.nii.gz', 'cope1run2.nii.gz'], ['cope2run1.nii.gz', 'cope2run2.nii.gz']] # per contrast
    >>> fixedfx.inputs.inputspec.varcopes = [['varcope1run1.nii.gz', 'varcope1run2.nii.gz'], ['varcope2run1.nii.gz', 'varcope2run2.nii.gz']] # per contrast
    >>> fixedfx.inputs.inputspec.dof_files = ['dofrun1', 'dofrun2'] # per run
    >>> fixedfx.run() #doctest: +SKIP

    Inputs::

         inputspec.copes : list of list of cope files (one list per contrast)
         inputspec.varcopes : list of list of varcope files (one list per
                              contrast)
         inputspec.dof_files : degrees of freedom files for each run

    Outputs::

         outputspec.res4d : 4d residual time series
         outputspec.copes : contrast parameter estimates
         outputspec.varcopes : variance of contrast parameter estimates
         outputspec.zstats : z statistics of contrasts
         outputspec.tstats : t statistics of contrasts
    """

    fixed_fx = pe.Workflow(name=name)

    inputspec = pe.Node(util.IdentityInterface(fields=['copes',
                                                       'varcopes',
                                                       'dof_files'
                                                       ]),
                        name='inputspec')

    """
    Use :class:`nipype.interfaces.fsl.Merge` to merge the copes and
    varcopes for each condition
    """

    copemerge = pe.MapNode(interface=fsl.Merge(dimension='t'),
                           iterfield=['in_files'],
                           name="copemerge")

    varcopemerge = pe.MapNode(interface=fsl.Merge(dimension='t'),
                           iterfield=['in_files'],
                           name="varcopemerge")

    """
    Use :class:`nipype.interfaces.fsl.L2Model` to generate subject and condition
    specific level 2 model design files
    """

    level2model = pe.Node(interface=fsl.L2Model(),
                          name='l2model')

    """
    Use :class:`nipype.interfaces.fsl.FLAMEO` to estimate a second level model
    """

    flameo = pe.MapNode(interface=fsl.FLAMEO(run_mode='fe'), name="flameo",
                        iterfield=['cope_file', 'var_cope_file'])

    def get_dofvolumes(dof_files, cope_files):
        import os
        import nibabel as nb
        import numpy as np
        img = nb.load(cope_files[0])
        out_data = np.zeros(img.get_shape())
        for i in range(out_data.shape[-1]):
            dof = np.loadtxt(dof_files[i])
            out_data[:, :, :, i] = dof
        filename = os.path.join(os.getcwd(), 'dof_file.nii.gz')
        newimg = nb.Nifti1Image(out_data, None, img.get_header())
        newimg.to_filename(filename)
        return filename

    gendof = pe.Node(util.Function(input_names=['dof_files', 'cope_files'],
                                   output_names=['dof_volume'],
                                   function=get_dofvolumes),
                     name='gendofvolume')

    outputspec = pe.Node(util.IdentityInterface(fields=['res4d',
                                                        'copes', 'varcopes',
                                                        'zstats', 'tstats']),
                         name='outputspec')

    fixed_fx.connect([(inputspec, copemerge, [('copes', 'in_files')]),
                      (inputspec, varcopemerge, [('varcopes', 'in_files')]),
                      (inputspec, gendof, [('dof_files', 'dof_files')]),
                      (copemerge, gendof, [('merged_file', 'cope_files')]),
                      (copemerge, flameo, [('merged_file', 'cope_file')]),
                      (varcopemerge, flameo, [('merged_file',
                                               'var_cope_file')]),
                      (level2model, flameo, [('design_mat', 'design_file'),
                                            ('design_con', 't_con_file'),
                                            ('design_grp', 'cov_split_file')]),
                      (gendof, flameo, [('dof_volume', 'dof_var_cope_file')]),
                      (flameo, outputspec, [('res4d', 'res4d'),
                                            ('copes', 'copes'),
                                            ('var_copes', 'varcopes'),
                                            ('zstats', 'zstats'),
                                            ('tstats', 'tstats')
                                            ])
                      ])
    return fixed_fx
Exemplo n.º 9
0
def second_level_wf(output_dir, bids_ref, name='wf_2nd_level'):
    workflow = pe.Workflow(name=name)

    inputnode = pe.Node(niu.IdentityInterface(
        fields=['group_mask', 'in_copes', 'in_varcopes']),
                        name='inputnode')

    outputnode = pe.Node(niu.IdentityInterface(fields=[
        'zstats_raw', 'zstats_fwe', 'zstats_clust', 'clust_index_file',
        'clust_localmax_txt_file'
    ]),
                         name='outputnode')

    # Configure FSL 2nd level analysis
    l2_model = pe.Node(fsl.L2Model(), name='l2_model')
    flameo_ols = pe.Node(fsl.FLAMEO(run_mode='ols'), name='flameo_ols')

    merge_copes = pe.Node(fsl.Merge(dimension='t'), name='merge_copes')
    merge_varcopes = pe.Node(fsl.Merge(dimension='t'), name='merge_varcopes')

    # Thresholding - FDR ################################################
    # Calculate pvalues with ztop
    fdr_ztop = pe.Node(fsl.ImageMaths(op_string='-ztop', suffix='_pval'),
                       name='fdr_ztop')
    # Find FDR threshold: fdr -i zstat1_pval -m <group_mask> -q 0.05
    # fdr_th = <write Nipype interface for fdr>
    # Apply threshold:
    # fslmaths zstat1_pval -mul -1 -add 1 -thr <fdr_th> -mas <group_mask> \
    #     zstat1_thresh_vox_fdr_pstat1

    # Thresholding - FWE ################################################
    # smoothest -r %s -d %i -m %s
    smoothness = pe.Node(fsl.SmoothEstimate(), name='smoothness')
    # ptoz 0.025 -g %f
    # p = 0.05 / 2 for 2-tailed test
    fwe_ptoz = pe.Node(PtoZ(pvalue=0.025), name='fwe_ptoz')
    # fslmaths %s -uthr %s -thr %s nonsignificant
    # fslmaths %s -sub nonsignificant zstat1_thresh
    fwe_nonsig0 = pe.Node(fsl.Threshold(direction='above'), name='fwe_nonsig0')
    fwe_nonsig1 = pe.Node(fsl.Threshold(direction='below'), name='fwe_nonsig1')
    fwe_thresh = pe.Node(fsl.BinaryMaths(operation='sub'), name='fwe_thresh')

    # Thresholding - Cluster ############################################
    # cluster -i %s -c %s -t 3.2 -p 0.025 -d %s --volume=%s  \
    #     --othresh=thresh_cluster_fwe_zstat1 --connectivity=26 --mm
    cluster_kwargs = {
        'connectivity': 26,
        'threshold': 3.2,
        'pthreshold': 0.025,
        'out_threshold_file': True,
        'out_index_file': True,
        'out_localmax_txt_file': True
    }
    cluster_pos = pe.Node(fsl.Cluster(**cluster_kwargs), name='cluster_pos')
    cluster_neg = pe.Node(fsl.Cluster(**cluster_kwargs), name='cluster_neg')
    zstat_inv = pe.Node(fsl.BinaryMaths(operation='mul', operand_value=-1),
                        name='zstat_inv')
    cluster_inv = pe.Node(fsl.BinaryMaths(operation='mul', operand_value=-1),
                          name='cluster_inv')
    cluster_all = pe.Node(fsl.BinaryMaths(operation='add'), name='cluster_all')

    ds_zraw = pe.Node(GroupDerivativesDataSink(base_directory=str(output_dir),
                                               keep_dtype=False,
                                               suffix='zstat',
                                               sub='all'),
                      name='ds_zraw',
                      run_without_submitting=True)
    ds_zraw.inputs.source_file = bids_ref

    ds_zfwe = pe.Node(GroupDerivativesDataSink(base_directory=str(output_dir),
                                               keep_dtype=False,
                                               suffix='zstat',
                                               desc='fwe',
                                               sub='all'),
                      name='ds_zfwe',
                      run_without_submitting=True)
    ds_zfwe.inputs.source_file = bids_ref

    ds_zclust = pe.Node(GroupDerivativesDataSink(
        base_directory=str(output_dir),
        keep_dtype=False,
        suffix='zstat',
        desc='clust',
        sub='all'),
                        name='ds_zclust',
                        run_without_submitting=True)
    ds_zclust.inputs.source_file = bids_ref

    ds_clustidx_pos = pe.Node(GroupDerivativesDataSink(
        base_directory=str(output_dir),
        keep_dtype=False,
        suffix='pclusterindex',
        sub='all'),
                              name='ds_clustidx_pos',
                              run_without_submitting=True)
    ds_clustidx_pos.inputs.source_file = bids_ref

    ds_clustlmax_pos = pe.Node(GroupDerivativesDataSink(
        base_directory=str(output_dir),
        keep_dtype=False,
        suffix='plocalmax',
        desc='intask',
        sub='all'),
                               name='ds_clustlmax_pos',
                               run_without_submitting=True)
    ds_clustlmax_pos.inputs.source_file = bids_ref

    ds_clustidx_neg = pe.Node(GroupDerivativesDataSink(
        base_directory=str(output_dir),
        keep_dtype=False,
        suffix='nclusterindex',
        sub='all'),
                              name='ds_clustidx_neg',
                              run_without_submitting=True)
    ds_clustidx_neg.inputs.source_file = bids_ref

    ds_clustlmax_neg = pe.Node(GroupDerivativesDataSink(
        base_directory=str(output_dir),
        keep_dtype=False,
        suffix='nlocalmax',
        desc='intask',
        sub='all'),
                               name='ds_clustlmax_neg',
                               run_without_submitting=True)
    ds_clustlmax_neg.inputs.source_file = bids_ref

    workflow.connect([
        (inputnode, l2_model, [(('in_copes', _len), 'num_copes')]),
        (inputnode, flameo_ols, [('group_mask', 'mask_file')]),
        (inputnode, smoothness, [('group_mask', 'mask_file'),
                                 (('in_copes', _dof), 'dof')]),
        (inputnode, merge_copes, [('in_copes', 'in_files')]),
        (inputnode, merge_varcopes, [('in_varcopes', 'in_files')]),
        (l2_model, flameo_ols, [('design_mat', 'design_file'),
                                ('design_con', 't_con_file'),
                                ('design_grp', 'cov_split_file')]),
        (merge_copes, flameo_ols, [('merged_file', 'cope_file')]),
        (merge_varcopes, flameo_ols, [('merged_file', 'var_cope_file')]),
        (flameo_ols, smoothness, [('res4d', 'residual_fit_file')]),
        (flameo_ols, fwe_nonsig0, [('zstats', 'in_file')]),
        (fwe_nonsig0, fwe_nonsig1, [('out_file', 'in_file')]),
        (smoothness, fwe_ptoz, [('resels', 'resels')]),
        (fwe_ptoz, fwe_nonsig0, [('zstat', 'thresh')]),
        (fwe_ptoz, fwe_nonsig1, [(('zstat', _neg), 'thresh')]),
        (flameo_ols, fwe_thresh, [('zstats', 'in_file')]),
        (fwe_nonsig1, fwe_thresh, [('out_file', 'operand_file')]),
        (flameo_ols, cluster_pos, [('zstats', 'in_file')]),
        (merge_copes, cluster_pos, [('merged_file', 'cope_file')]),
        (smoothness, cluster_pos, [('volume', 'volume'), ('dlh', 'dlh')]),
        (flameo_ols, zstat_inv, [('zstats', 'in_file')]),
        (zstat_inv, cluster_neg, [('out_file', 'in_file')]),
        (cluster_neg, cluster_inv, [('threshold_file', 'in_file')]),
        (merge_copes, cluster_neg, [('merged_file', 'cope_file')]),
        (smoothness, cluster_neg, [('volume', 'volume'), ('dlh', 'dlh')]),
        (cluster_pos, cluster_all, [('threshold_file', 'in_file')]),
        (cluster_inv, cluster_all, [('out_file', 'operand_file')]),
        (flameo_ols, ds_zraw, [('zstats', 'in_file')]),
        (fwe_thresh, ds_zfwe, [('out_file', 'in_file')]),
        (cluster_all, ds_zclust, [('out_file', 'in_file')]),
        (cluster_pos, ds_clustidx_pos, [('index_file', 'in_file')]),
        (cluster_pos, ds_clustlmax_pos, [('localmax_txt_file', 'in_file')]),
        (cluster_neg, ds_clustidx_neg, [('index_file', 'in_file')]),
        (cluster_neg, ds_clustlmax_neg, [('localmax_txt_file', 'in_file')]),
    ])
    return workflow
Exemplo n.º 10
0
NodeHash_2df82970.inputs.dimension = 'T'

#Generic datasink module to store structured outputs
NodeHash_33a4bec0 = pe.Node(interface = io.DataSink(), name = 'NodeName_33a4bec0')
NodeHash_33a4bec0.inputs.base_directory = '/tmp/FIRSTLEVEL'

#Basic interface class to select specific elements from a list
NodeHash_7caa820 = pe.MapNode(interface = utility.Select(), name = 'NodeName_7caa820', iterfield = ['inlist'])
NodeHash_7caa820.inputs.index = 0

#Basic interface class to select specific elements from a list
NodeHash_b8ed090 = pe.MapNode(interface = utility.Select(), name = 'NodeName_b8ed090', iterfield = ['inlist'])
NodeHash_b8ed090.inputs.index = 0

#Generate subject specific second level model
NodeHash_3e74ed0 = pe.Node(interface = fsl.L2Model(), name = 'NodeName_3e74ed0')
NodeHash_3e74ed0.inputs.num_copes = 20

#Custom interface wrapping function Find_fsl_mni_files
NodeHash_2e292140 = pe.Node(interface = firstlevelhelpers.Find_fsl_mni_files, name = 'NodeName_2e292140')

#Wraps command **flirt**
NodeHash_2c370b0 = pe.MapNode(interface = fsl.FLIRT(), name = 'NodeName_2c370b0', iterfield = ['in_file'])
NodeHash_2c370b0.inputs.cost = 'corratio'
NodeHash_2c370b0.inputs.dof = 12
NodeHash_2c370b0.inputs.no_resample = True
NodeHash_2c370b0.inputs.searchr_x = [-90, 90]
NodeHash_2c370b0.inputs.searchr_y = [-90, 90]
NodeHash_2c370b0.inputs.searchr_z = [-90, 90]

#Wraps command **convert_xfm**
NodeHash_23ed28b0.inputs.base_directory = '/tmp/FIRSTLEVEL'

#Basic interface class to select specific elements from a list
NodeHash_256f1c70 = pe.MapNode(interface=utility.Select(),
                               name='NodeName_256f1c70',
                               iterfield=['inlist'])
NodeHash_256f1c70.inputs.index = 0

#Basic interface class to select specific elements from a list
NodeHash_111366a0 = pe.MapNode(interface=utility.Select(),
                               name='NodeName_111366a0',
                               iterfield=['inlist'])
NodeHash_111366a0.inputs.index = 0

#Generate subject specific second level model
NodeHash_263f71d0 = pe.Node(interface=fsl.L2Model(), name='NodeName_263f71d0')
NodeHash_263f71d0.inputs.num_copes = 20

#Custom interface wrapping function Find_fsl_mni_files
NodeHash_26dc8f20 = pe.Node(interface=firstlevelhelpers.Find_fsl_mni_files,
                            name='NodeName_26dc8f20')

#Wraps command **flirt**
NodeHash_25be7e50 = pe.MapNode(interface=fsl.FLIRT(),
                               name='NodeName_25be7e50',
                               iterfield=['in_file'])
NodeHash_25be7e50.inputs.cost = 'corratio'
NodeHash_25be7e50.inputs.dof = 12
NodeHash_25be7e50.inputs.no_resample = True
NodeHash_25be7e50.inputs.searchr_x = [-90, 90]
NodeHash_25be7e50.inputs.searchr_y = [-90, 90]
NodeHash_218a6ba0.inputs.dimension = 'T'

#Generic datasink module to store structured outputs
NodeHash_218b7a50 = pe.Node(interface = io.DataSink(), name = 'NodeName_218b7a50')
NodeHash_218b7a50.inputs.base_directory = '/tmp/FIRSTLEVEL'

#Basic interface class to select specific elements from a list
NodeHash_23ccf700 = pe.MapNode(interface = utility.Select(), name = 'NodeName_23ccf700', iterfield = ['inlist'])
NodeHash_23ccf700.inputs.index = 0

#Basic interface class to select specific elements from a list
NodeHash_f6fb890 = pe.MapNode(interface = utility.Select(), name = 'NodeName_f6fb890', iterfield = ['inlist'])
NodeHash_f6fb890.inputs.index = 0

#Generate subject specific second level model
NodeHash_245bbc70 = pe.Node(interface = fsl.L2Model(), name = 'NodeName_245bbc70')
NodeHash_245bbc70.inputs.num_copes = 20

#Custom interface wrapping function Find_fsl_mni_files
NodeHash_25021bd0 = pe.Node(interface = firstlevelhelpers.Find_fsl_mni_files, name = 'NodeName_25021bd0')

#Wraps command **flirt**
NodeHash_241acd10 = pe.MapNode(interface = fsl.FLIRT(), name = 'NodeName_241acd10', iterfield = ['in_file'])
NodeHash_241acd10.inputs.cost = 'corratio'
NodeHash_241acd10.inputs.dof = 12
NodeHash_241acd10.inputs.no_resample = True
NodeHash_241acd10.inputs.searchr_x = [-90, 90]
NodeHash_241acd10.inputs.searchr_y = [-90, 90]
NodeHash_241acd10.inputs.searchr_z = [-90, 90]

#Wraps command **convert_xfm**
Exemplo n.º 13
0
def l2_common_effect(
    l1_dir,
    groupby="session",
    keep_work=False,
    l2_dir="",
    loud=False,
    tr=1,
    nprocs=6,
    workflow_name="generic",
    mask="~/ni_data/templates/ds_QBI_chr_bin.nii.gz",
    subjects=[],
    sessions=[],
    tasks=[],
    exclude={},
    include={},
):

    l1_dir = path.expanduser(l1_dir)
    if not l2_dir:
        l2_dir = path.abspath(path.join(l1_dir, "..", "..", "l2"))

    mask = path.abspath(path.expanduser(mask))

    datafind = nio.DataFinder()
    datafind.inputs.root_paths = l1_dir
    datafind.inputs.match_regex = '.+/sub-(?P<sub>[a-zA-Z0-9]+)/ses-(?P<ses>[a-zA-Z0-9]+)/.*?_acq-(?P<acq>[a-zA-Z0-9]+)_task-(?P<task>[a-zA-Z0-9]+)_(?P<mod>[a-zA-Z0-9]+)_(?P<stat>[a-zA-Z0-9]+)\.(?:tsv|nii|nii\.gz)'
    datafind_res = datafind.run()
    data_selection = zip(*[
        datafind_res.outputs.sub, datafind_res.outputs.ses, datafind_res.
        outputs.acq, datafind_res.outputs.task, datafind_res.outputs.mod,
        datafind_res.outputs.stat, datafind_res.outputs.out_paths
    ])
    data_selection = [list(i) for i in data_selection]
    data_selection = pd.DataFrame(data_selection,
                                  columns=('subject', 'session', 'acquisition',
                                           'task', 'modality', 'statistic',
                                           'path'))
    data_selection = data_selection.sort_values(['session', 'subject'],
                                                ascending=[1, 1])
    if exclude:
        for key in exclude:
            data_selection = data_selection[~data_selection[key].
                                            isin(exclude[key])]
    if include:
        for key in include:
            data_selection = data_selection[data_selection[key].isin(
                include[key])]

    copemerge = pe.Node(interface=fsl.Merge(dimension='t'), name="copemerge")
    varcopemerge = pe.Node(interface=fsl.Merge(dimension='t'),
                           name="varcopemerge")

    level2model = pe.Node(interface=fsl.L2Model(), name='level2model')

    flameo = pe.Node(interface=fsl.FLAMEO(), name="flameo")
    flameo.inputs.mask_file = mask
    flameo.inputs.run_mode = "ols"

    datasink = pe.Node(nio.DataSink(), name='datasink')
    datasink.inputs.base_directory = path.join(l2_dir, workflow_name)
    datasink.inputs.substitutions = [
        ('_iterable_', ''),
    ]

    if groupby == "subject":
        subjects = data_selection[['subject'
                                   ]].drop_duplicates().values.tolist()

        infosource = pe.Node(
            interface=util.IdentityInterface(fields=['iterable']),
            name="infosource")
        infosource.iterables = [('iterable', subjects)]
        datasource = pe.Node(interface=nio.DataGrabber(
            infields=[
                "group",
            ], outfields=["copes", "varcbs"]),
                             name="datasource")
        datasource.inputs.template_args = dict(copes=[['group', 'group']],
                                               varcbs=[['group', 'group']])
        datasource.inputs.field_template = dict(
            copes="sub-%s/ses-*/sub-%s_ses-*_task-*_cope.nii.gz",
            varcbs="sub-%s/ses-*/sub-%s_ses-*_task-*_varcb.nii.gz",
        )
        workflow_connections = [
            (infosource, datasource, [('iterable', 'group')]),
            (infosource, copemerge, [(('iterable', add_suffix, "_cope.nii.gz"),
                                      'merged_file')]),
            (infosource, varcopemerge, [(('iterable', add_suffix,
                                          "_varcb.nii.gz"), 'merged_file')]),
        ]
    elif groupby == "subject_task":
        #does not currently work, due to missing iterator combinations (same issue as preprocessing)
        merge = pe.Node(interface=util.Merge(2), name="merge")
        infosource = pe.Node(
            interface=util.IdentityInterface(fields=['subject', 'task']),
            name="infosource")
        infosource.iterables = [('subject', subjects), ('task', tasks)]
        datasource = pe.Node(interface=nio.DataGrabber(
            infields=[
                "subject",
                "task",
            ], outfields=["copes", "varcbs"]),
                             name="datasource")
        datasource.inputs.template_args = dict(copes=[[
            "subject",
            "subject",
            "task",
        ]],
                                               varcbs=[[
                                                   "subject",
                                                   "subject",
                                                   "task",
                                               ]])
        datasource.inputs.field_template = dict(
            copes="sub-%s/ses-*/sub-%s_ses-*_task-%s_cope.nii.gz",
            varcbs="sub-%s/ses-*/sub-%s_ses-*_task-%s_varcb.nii.gz",
        )
        workflow_connections = [
            (infosource, datasource, [('subject', 'subject'),
                                      ('task', 'task')]),
            (infosource, merge, [('subject', 'in1'), ('task', 'in2')]),
            (merge, copemerge, [(('out', add_suffix, "_cope.nii.gz"),
                                 'merged_file')]),
            (merge, varcopemerge, [(('out', add_suffix, "_varcb.nii.gz"),
                                    'merged_file')]),
        ]
    elif groupby == "session":
        sessions = data_selection[['session']].drop_duplicates()
        sessions = sessions.T.to_dict().values()

        infosource = pe.Node(
            interface=util.IdentityInterface(fields=['iterable']),
            name="infosource")
        infosource.iterables = [('iterable', sessions)]

        copes = pe.Node(
            name='copes',
            interface=util.Function(
                function=select_from_datafind_df,
                input_names=inspect.getargspec(select_from_datafind_df)[0],
                output_names=['selection']))
        copes.inputs.bids_dictionary_override = {'statistic': 'cope'}
        copes.inputs.df = data_selection
        copes.inputs.list_output = True

        varcopes = pe.Node(
            name='varcopes',
            interface=util.Function(
                function=select_from_datafind_df,
                input_names=inspect.getargspec(select_from_datafind_df)[0],
                output_names=['selection']))
        varcopes.inputs.bids_dictionary_override = {'statistic': 'varcb'}
        varcopes.inputs.df = data_selection
        varcopes.inputs.list_output = True

        workflow_connections = [
            (infosource, copemerge, [(('iterable', dict_and_suffix, "session",
                                       "_cope.nii.gz"), 'merged_file')]),
            (infosource, varcopemerge, [(('iterable', dict_and_suffix,
                                          "session", "_varcb.nii.gz"),
                                         'merged_file')]),
        ]
    elif groupby == "task":
        infosource = pe.Node(
            interface=util.IdentityInterface(fields=['iterable']),
            name="infosource")
        infosource.iterables = [('iterable', tasks)]
        datasource = pe.Node(interface=nio.DataGrabber(
            infields=[
                "group",
            ], outfields=["copes", "varcbs"]),
                             name="datasource")
        datasource.inputs.template_args = dict(copes=[['group']],
                                               varcbs=[['group']])
        datasource.inputs.field_template = dict(
            copes="sub-*/ses-*/sub-*_ses-*_task-%s_cope.nii.gz ",
            varcbs="sub-*/ses-*/sub-*_ses-*_task-%s_varcb.nii.gz ",
        )
        workflow_connections = [
            (infosource, datasource, [('iterable', 'group')]),
            (infosource, copemerge, [(('iterable', add_suffix, "_cope.nii.gz"),
                                      'merged_file')]),
            (infosource, varcopemerge, [(('iterable', add_suffix,
                                          "_varcb.nii.gz"), 'merged_file')]),
        ]

    workflow_connections.extend([
        (copes, copemerge, [('selection', 'in_files')]),
        (varcopes, varcopemerge, [('selection', 'in_files')]),
        (varcopes, level2model, [(('selection', mylen), 'num_copes')]),
        (copemerge, flameo, [('merged_file', 'cope_file')]),
        (varcopemerge, flameo, [('merged_file', 'var_cope_file')]),
        (level2model, flameo, [('design_mat', 'design_file')]),
        (level2model, flameo, [('design_grp', 'cov_split_file')]),
        (level2model, flameo, [('design_con', 't_con_file')]),
        (flameo, datasink, [('copes', '@copes')]),
        (flameo, datasink, [('fstats', '@fstats')]),
        (flameo, datasink, [('tstats', '@tstats')]),
        (flameo, datasink, [('zstats', '@zstats')]),
    ])

    workdir_name = workflow_name + "_work"
    workflow = pe.Workflow(name=workdir_name)
    workflow.connect(workflow_connections)
    workflow.config = {
        "execution": {
            "crashdump_dir": path.join(l2_dir, "crashdump")
        }
    }
    workflow.base_dir = l2_dir
    workflow.write_graph(dotfilename=path.join(workflow.base_dir, workdir_name,
                                               "graph.dot"),
                         graph2use="hierarchical",
                         format="png")

    if not loud:
        try:
            workflow.run(plugin="MultiProc", plugin_args={'n_procs': nprocs})
        except RuntimeError:
            print(
                "WARNING: Some expected tasks have not been found (or another RuntimeError has occured)."
            )
        for f in listdir(getcwd()):
            if re.search("crash.*?-varcopemerge|-copemerge.*", f):
                remove(path.join(getcwd(), f))
    else:
        workflow.run(plugin="MultiProc", plugin_args={'n_procs': nprocs})

    if not keep_work:
        shutil.rmtree(path.join(l2_dir, workdir_name))
Exemplo n.º 14
0
                for x in featdirs
            ]
            varcopes = [
                os.path.join(x, 'stats', '%s%i.nii.gz' % ('varcope', contrast))
                for x in featdirs
            ]

            # define nodes

            copemerge = Node(interface=fsl.Merge(dimension='t',
                                                 in_files=copes),
                             name='copemerge')
            varcopemerge = Node(interface=fsl.Merge(dimension='t',
                                                    in_files=varcopes),
                                name='varcopemerge')
            level2model = Node(interface=fsl.L2Model(num_copes=len(copes)),
                               name='l2model')
            OLS = Node(interface=fsl.FLAMEO(run_mode='ols',
                                            mask_file=groupmaskfile),
                       name='OLS')

            # create workflow

            CNPgroup = Workflow(name='cnp_group')
            CNPgroup.base_dir = outcopedir
            CNPgroup.connect([
                (copemerge, OLS, [('merged_file', 'cope_file')]),
                (varcopemerge, OLS, [('merged_file', 'var_cope_file')]),
                (level2model, OLS, [('design_mat', 'design_file'),
                                    ('design_con', 't_con_file'),
                                    ('design_grp', 'cov_split_file')]),
Exemplo n.º 15
0
def l2_common_effect(l1_dir,
	groupby="session",
	keep_work=False,
	loud=False,
	tr=1,
	nprocs=6,
	mask="/usr/share/mouse-brain-atlases/dsurqec_200micron_mask.nii",
	match={},
	n_jobs_percentage=1,
	out_base="",
	subjects=[],
	sessions=[],
	tasks=[],
	exclude={},
	include={},
	workflow_name="generic",
	debug=False,
	target_set=[],
	):
	"""Determine the common effect in a sample of 3D feature maps.

	Parameters
	----------

	n_jobs_percentage : float, optional
		Percentage of the cores present on the machine which to maximally use for deploying jobs in parallel.
	"""

	from samri.pipelines.utils import bids_data_selection

	l1_dir = path.abspath(path.expanduser(l1_dir))
	out_base = path.abspath(path.expanduser(out_base))
	mask=path.abspath(path.expanduser(mask))

	data_selection = bids_data_selection(l1_dir,
		structural_match=False,
		functional_match=match,
		subjects=False,
		sessions=False,
		verbose=True,
		)
	ind = data_selection.index.tolist()

	out_dir = path.join(out_base,workflow_name)
	workdir_name = workflow_name+'_work'
	workdir = path.join(out_base,workdir_name)
	if not os.path.exists(workdir):
		os.makedirs(workdir)

	data_selection = data_selection.sort_values(['session', 'subject'], ascending=[1, 1])
	if exclude:
		for key in exclude:
			data_selection = data_selection[~data_selection[key].isin(exclude[key])]
	if include:
		for key in include:
			data_selection = data_selection[data_selection[key].isin(include[key])]
	data_selection.to_csv(path.join(workdir,'data_selection.csv'))

	copemerge = pe.Node(interface=fsl.Merge(dimension='t'),name="copemerge")
	varcopemerge = pe.Node(interface=fsl.Merge(dimension='t'),name="varcopemerge")

	level2model = pe.Node(interface=fsl.L2Model(),name='level2model')

	flameo = pe.Node(interface=fsl.FLAMEO(), name="flameo")
	flameo.inputs.mask_file = mask
	flameo.inputs.run_mode = "ols"

	datasink = pe.Node(nio.DataSink(), name='datasink')
	datasink.inputs.base_directory = out_dir
	datasink_substitutions = [('_iterable_', '')]

	if groupby == "subject_set":
		datasink_substitutions.extend([('subject', 'sub-')])
		common_fields = ''
		common_fields += 'acq-'+data_selection.acq.drop_duplicates().item()
		try:
			common_fields += '_run-'+data_selection.run.drop_duplicates().item()
		except ValueError:
			pass

		infosource = pe.Node(interface=util.IdentityInterface(fields=['iterable']), name="infosource")
		infosource.iterables = [('iterable', target_set)]

		copes = pe.Node(name='copes', interface=util.Function(function=select_from_datafind_df, input_names=inspect.getargspec(select_from_datafind_df)[0], output_names=['selection']))
		copes.inputs.bids_dictionary_override = {'modality':'cope'}
		copes.inputs.df = data_selection
		copes.inputs.list_output = True

		varcopes = pe.Node(name='varcopes', interface=util.Function(function=select_from_datafind_df, input_names=inspect.getargspec(select_from_datafind_df)[0], output_names=['selection']))
		varcopes.inputs.bids_dictionary_override = {'modality':'varcb'}
		varcopes.inputs.df = data_selection
		varcopes.inputs.list_output = True

		workflow_connections = [
			(infosource, copes, [('iterable', 'bids_dictionary')]),
			(infosource, varcopes, [('iterable', 'bids_dictionary')]),
			(infosource, copemerge, [(('iterable',dict_and_suffix,"subject","_cope.nii.gz"), 'merged_file')]),
			(infosource, varcopemerge, [(('iterable',dict_and_suffix,"subject","_varcb.nii.gz"), 'merged_file')]),
			]
	if groupby == "subject":
		datasink_substitutions.extend([('subject', 'sub-')])
		common_fields = ''
		common_fields += 'acq-'+data_selection.acq.drop_duplicates().item()
		try:
			common_fields += '_run-'+data_selection.run.drop_duplicates().item()
		except ValueError:
			pass

		subjects = data_selection[['subject']].drop_duplicates()
		# TODO: could not find a better way to convert pandas df column into list of dicts
		subjects_ = subjects.T.to_dict()
		subjects = [subjects_[i] for i in subjects_.keys()]

		infosource = pe.Node(interface=util.IdentityInterface(fields=['iterable']), name="infosource")
		infosource.iterables = [('iterable', subjects)]

		copes = pe.Node(name='copes', interface=util.Function(function=select_from_datafind_df, input_names=inspect.getargspec(select_from_datafind_df)[0], output_names=['selection']))
		copes.inputs.bids_dictionary_override = {'modality':'cope'}
		copes.inputs.df = data_selection
		copes.inputs.list_output = True

		varcopes = pe.Node(name='varcopes', interface=util.Function(function=select_from_datafind_df, input_names=inspect.getargspec(select_from_datafind_df)[0], output_names=['selection']))
		varcopes.inputs.bids_dictionary_override = {'modality':'varcb'}
		varcopes.inputs.df = data_selection
		varcopes.inputs.list_output = True

		workflow_connections = [
			(infosource, copes, [('iterable', 'bids_dictionary')]),
			(infosource, varcopes, [('iterable', 'bids_dictionary')]),
			(infosource, copemerge, [(('iterable',dict_and_suffix,"subject","_cope.nii.gz"), 'merged_file')]),
			(infosource, varcopemerge, [(('iterable',dict_and_suffix,"subject","_varcb.nii.gz"), 'merged_file')]),
			]
	elif groupby == "subject_task":
		#does not currently work, due to missing iterator combinations (same issue as preprocessing)
		merge = pe.Node(interface=util.Merge(2), name="merge")
		infosource = pe.Node(interface=util.IdentityInterface(fields=['subject','task']), name="infosource")
		infosource.iterables = [('subject', subjects),('task', tasks)]
		datasource = pe.Node(interface=nio.DataGrabber(infields=["subject","task",], outfields=["copes", "varcbs"]), name="datasource")
		datasource.inputs.template_args = dict(
			copes=[["subject","subject","task",]],
			varcbs=[["subject","subject","task",]]
			)
		datasource.inputs.field_template = dict(
			copes="sub-%s/ses-*/sub-%s_ses-*_task-%s_cope.nii.gz",
			varcbs="sub-%s/ses-*/sub-%s_ses-*_task-%s_varcb.nii.gz",
			)
		workflow_connections = [
			(infosource, datasource, [('subject', 'subject'),('task','task')]),
			(infosource, merge, [('subject', 'in1'),('task','in2')]),
			(merge, copemerge, [(('out',add_suffix,"_cope.nii.gz"), 'merged_file')]),
			(merge, varcopemerge, [(('out',add_suffix,"_varcb.nii.gz"), 'merged_file')]),
			]
	elif groupby == "session":
		datasink_substitutions.extend([('session', 'ses-')])
		common_fields = ''
		common_fields += 'acq-'+data_selection.acq.drop_duplicates().item()
		try:
			common_fields += '_run-'+data_selection.run.drop_duplicates().item()
		except ValueError:
			pass

		sessions = data_selection[['session']].drop_duplicates()
		# TODO: could not find a better way to convert pandas df column into list of dicts
		sessions_ = sessions.T.to_dict()
		sessions = [sessions_[i] for i in sessions_.keys()]

		infosource = pe.Node(interface=util.IdentityInterface(fields=['iterable']), name="infosource")
		infosource.iterables = [('iterable', sessions)]

		copes = pe.Node(name='copes', interface=util.Function(function=select_from_datafind_df, input_names=inspect.getargspec(select_from_datafind_df)[0], output_names=['selection']))
		copes.inputs.bids_dictionary_override = {'modality':'cope'}
		copes.inputs.df = data_selection
		copes.inputs.list_output = True

		varcopes = pe.Node(name='varcopes', interface=util.Function(function=select_from_datafind_df, input_names=inspect.getargspec(select_from_datafind_df)[0], output_names=['selection']))
		varcopes.inputs.bids_dictionary_override = {'modality':'varcb'}
		varcopes.inputs.df = data_selection
		varcopes.inputs.list_output = True

		workflow_connections = [
			(infosource, copes, [('iterable', 'bids_dictionary')]),
			(infosource, varcopes, [('iterable', 'bids_dictionary')]),
			(infosource, copemerge, [(('iterable',dict_and_suffix,"session","_cope.nii.gz"), 'merged_file')]),
			(infosource, varcopemerge, [(('iterable',dict_and_suffix,"session","_varcb.nii.gz"), 'merged_file')]),
			]
	elif groupby == "task":
		datasink_substitutions.extend([('task', 'task-')])
		common_fields = ''
		common_fields += 'acq-'+data_selection.acq.drop_duplicates().item()
		try:
			common_fields += '_run-'+data_selection.run.drop_duplicates().item()
		except ValueError:
			pass
		try:
			common_fields += '_ses-'+data_selection.session.drop_duplicates().item()
		except ValueError:
			pass

		iters = data_selection[['task']].drop_duplicates()
		# TODO: could not find a better way to convert pandas df column into list of dicts
		iters_ = iters.T.to_dict()
		iters = [iters_[i] for i in iters_.keys()]

		infosource = pe.Node(interface=util.IdentityInterface(fields=['iterable']), name="infosource")
		infosource.iterables = [('iterable', iters)]

		copes = pe.Node(name='copes', interface=util.Function(function=select_from_datafind_df, input_names=inspect.getargspec(select_from_datafind_df)[0], output_names=['selection']))
		copes.inputs.bids_dictionary_override = {'modality':'cope'}
		copes.inputs.df = data_selection
		copes.inputs.list_output = True

		varcopes = pe.Node(name='varcopes', interface=util.Function(function=select_from_datafind_df, input_names=inspect.getargspec(select_from_datafind_df)[0], output_names=['selection']))
		varcopes.inputs.bids_dictionary_override = {'modality':'varcb'}
		varcopes.inputs.df = data_selection
		varcopes.inputs.list_output = True

		workflow_connections = [
			(infosource, copes, [('iterable', 'bids_dictionary')]),
			(infosource, varcopes, [('iterable', 'bids_dictionary')]),
			(infosource, copemerge, [(('iterable',dict_and_suffix,"task","_cope.nii.gz"), 'merged_file')]),
			(infosource, varcopemerge, [(('iterable',dict_and_suffix,"task","_varcb.nii.gz"), 'merged_file')]),
			]
	elif groupby == "none":
		common_fields = ''
		common_fields += 'acq-'+data_selection.acq.drop_duplicates().item()
		common_fields += '_run-'+data_selection.run.drop_duplicates().item()

		datasink_substitutions.extend([('cope1.nii.gz', common_fields+'_'+'cope.nii.gz')])
		datasink_substitutions.extend([('tstat1.nii.gz', common_fields+'_'+'tstat.nii.gz')])
		datasink_substitutions.extend([('zstat1.nii.gz', common_fields+'_'+'zstat.nii.gz')])
		datasink.inputs.substitutions = datasink_substitutions

		copes = pe.Node(name='copes', interface=util.Function(function=select_from_datafind_df, input_names=inspect.getargspec(select_from_datafind_df)[0], output_names=['selection']))
		copes.inputs.bids_dictionary_override = {'modality':'cope'}
		copes.inputs.df = data_selection
		copes.inputs.list_output = True

		varcopes = pe.Node(name='varcopes', interface=util.Function(function=select_from_datafind_df, input_names=inspect.getargspec(select_from_datafind_df)[0], output_names=['selection']))
		varcopes.inputs.bids_dictionary_override = {'modality':'varcb'}
		varcopes.inputs.df = data_selection
		varcopes.inputs.list_output = True

		copemerge.inputs.merged_file = 'cope.nii.gz'
		varcopemerge.inputs.merged_file = 'varcb.nii.gz'

		workflow_connections = []

	elif groupby == "mtask":
		infosource = pe.Node(interface=util.IdentityInterface(fields=['iterable']), name="infosource")
		infosource.iterables = [('iterable', tasks)]
		datasource = pe.Node(interface=nio.DataGrabber(infields=["group",], outfields=["copes", "varcbs"]), name="datasource")
		datasource.inputs.template_args = dict(
			copes=[['group']],
			varcbs=[['group']]
			)
		datasource.inputs.field_template = dict(
			copes="sub-*/ses-*/sub-*_ses-*_task-%s_cope.nii.gz ",
			varcbs="sub-*/ses-*/sub-*_ses-*_task-%s_varcb.nii.gz ",
			)
		workflow_connections = [
			(infosource, datasource, [('iterable', 'group')]),
			(infosource, copemerge, [(('iterable',add_suffix,"_cope.nii.gz"), 'merged_file')]),
			(infosource, varcopemerge, [(('iterable',add_suffix,"_varcb.nii.gz"), 'merged_file')]),
			]

	datasink_substitutions.extend([('cope1.nii.gz', common_fields+'_'+'cope.nii.gz')])
	datasink_substitutions.extend([('tstat1.nii.gz', common_fields+'_'+'tstat.nii.gz')])
	datasink_substitutions.extend([('zstat1.nii.gz', common_fields+'_'+'zstat.nii.gz')])
	datasink.inputs.substitutions = datasink_substitutions

	workflow_connections.extend([
		(copes, copemerge, [('selection', 'in_files')]),
		(varcopes, varcopemerge, [('selection', 'in_files')]),
		(varcopes, level2model, [(('selection',mylen), 'num_copes')]),
		(copemerge,flameo,[('merged_file','cope_file')]),
		(varcopemerge,flameo,[('merged_file','var_cope_file')]),
		(level2model,flameo, [('design_mat','design_file')]),
		(level2model,flameo, [('design_grp','cov_split_file')]),
		(level2model,flameo, [('design_con','t_con_file')]),
		(flameo, datasink, [('copes', '@copes')]),
		(flameo, datasink, [('fstats', '@fstats')]),
		(flameo, datasink, [('tstats', '@tstats')]),
		(flameo, datasink, [('zstats', '@zstats')]),
		])

	workflow_config = {'execution': {'crashdump_dir': path.join(out_base,'crashdump'),}}
	if debug:
		workflow_config['logging'] = {
			'workflow_level':'DEBUG',
			'utils_level':'DEBUG',
			'interface_level':'DEBUG',
			'filemanip_level':'DEBUG',
			'log_to_file':'true',
			}

	workflow = pe.Workflow(name=workdir_name)
	workflow.connect(workflow_connections)
	workflow.base_dir = out_base
	workflow.config = workflow_config
	workflow.write_graph(dotfilename=path.join(workflow.base_dir,workdir_name,"graph.dot"), graph2use="hierarchical", format="png")

	n_jobs = max(int(round(mp.cpu_count()*n_jobs_percentage)),2)
	if not loud:
		try:
			workflow.run(plugin="MultiProc", plugin_args={'n_procs' : nprocs})
		except RuntimeError:
			print("WARNING: Some expected tasks have not been found (or another RuntimeError has occured).")
		for f in listdir(getcwd()):
			if re.search("crash.*?-varcopemerge|-copemerge.*", f):
				remove(path.join(getcwd(), f))
	else:
		workflow.run(plugin="MultiProc", plugin_args={'n_procs' : n_jobs})
	if not keep_work:
		shutil.rmtree(path.join(out_base,workdir_name))