コード例 #1
0
def get_signal(substitutions_a, substitutions_b,
	functional_file_template="~/ni_data/ofM.dr/preprocessing/{preprocessing_dir}/sub-{subject}/ses-{session}/func/sub-{subject}_ses-{session}_trial-{scan}.nii.gz",
	mask="~/ni_data/templates/DSURQEc_200micron_bin.nii.gz",
	):

	mask = path.abspath(path.expanduser(mask))

	out_t_names = []
	out_cope_names = []
	out_varcb_names = []
	for substitution in substitutions_a+substitutions_b:
		ts_name = path.abspath(path.expanduser("{subject}_{session}.mat".format(**substitution)))
		out_t_name = path.abspath(path.expanduser("{subject}_{session}_tstat.nii.gz".format(**substitution)))
		out_cope_name = path.abspath(path.expanduser("{subject}_{session}_cope.nii.gz".format(**substitution)))
		out_varcb_name = path.abspath(path.expanduser("{subject}_{session}_varcb.nii.gz".format(**substitution)))
		out_t_names.append(out_t_name)
		out_cope_names.append(out_cope_name)
		out_varcb_names.append(out_varcb_name)
		functional_file = path.abspath(path.expanduser(functional_file_template.format(**substitution)))
		if not path.isfile(ts_name):
			masker = NiftiMasker(mask_img=mask)
			ts = masker.fit_transform(functional_file).T
			ts = np.mean(ts, axis=0)
			header = "/NumWaves 1\n/NumPoints 1490\n/PPheights 1.308540e+01 4.579890e+00\n\n/Matrix"
			np.savetxt(ts_name, ts, delimiter="\n", header=header, comments="")
		glm = fsl.GLM(in_file=functional_file, design=ts_name, output_type='NIFTI_GZ')
		glm.inputs.contrasts = path.abspath(path.expanduser("run0.con"))
		glm.inputs.out_t_name = out_t_name
		glm.inputs.out_cope = out_cope_name
		glm.inputs.out_varcb_name = out_varcb_name
		print(glm.cmdline)
		glm_run=glm.run()

	copemerge = fsl.Merge(dimension='t')
	varcopemerge = fsl.Merge(dimension='t')
コード例 #2
0
def dr_stage1(i, **kwargs):
	subj, mask = i
	ics = os.path.join(OUTPUT, 'melodic_IC.nii.gz')
	desnorm = False
	demean=True
	for i in kwargs.keys():
		if i == 'desnorm':
			if kwargs[i]:
				desnorm = True
			else:
				desnorm = False
		if i == 'ics':
			ics = kwargs[i]
	if not os.path.exists(ics):
		print "cannot locate melodic component maps for dual regression...must exit.."
		print "looked in: ", ics
		sys.exit(0)
	iFile = os.path.join(os.path.dirname(INPUT), subj, feat_pfix + subj + feat_sfix, ff_data_name)
	oFile = os.path.join(OUTPUT, 'stage1', 'dr_stage1_idc_' + subj + '.txt')
	if os.path.exists(oFile):
	    return
	mask = os.path.join(OUTPUT, 'stage1', 'mask.nii.gz')
	fsl_glm = fsl.GLM(in_file=iFile, design=ics, terminal_output='stream', out_file=oFile, des_norm=desnorm, demean=demean, mask=mask, output_type='NIFTI_GZ')
	cmd_out = os.path.join(OUTPUT, 'stage1', 'stage1_fslglm_idc_' + subj + '.out')
	write_cmd_out(fsl_glm.cmdline, cmd_out)
	fsl_glm.run()
コード例 #3
0
 def dr_stage1(self, **kwargs):
     desnorm = True
     for i in kwargs.keys():
         if i == 'desnorm':
             if kwargs[i]:
                 desnorm = True
             else:
                 desnorm = False
     if desnorm:
         opts_str = '--demean --des_norm'
     else:
         opst_str = '--demean'
     for subj in self.subjects:
         iFile = os.path.join(os.path.dirname(self.indir), subj,
                              'idc_' + subj + self.featdir_sfix,
                              self.ff_data_name)
         oFile = os.path.join(self.outdir, 'stage1',
                              'dr_stage1_idc_' + subj + '.txt')
         mask = os.path.join(self.outdir, 'stage1', 'mask.nii.gz')
         fsl_glm = fsl.GLM(in_file=iFile,
                           design=self.ics,
                           terminal_output='stream',
                           out_file=oFile,
                           mask=mask,
                           options=opts_str,
                           output_type='NIFTI_GZ')
         fsl_glm.run()
コード例 #4
0
def dr_stage2(i, **kwargs):
	subj, mask = i
	regress_moco = True
	desnorm = True
	demean = True
	for i in kwargs.keys():
		if i == 'regress_moco':
			if kwargs[i]:
				regress_moco = True
			else:
				regress_moco = False
		if i == 'desnorm':
			if kwargs[i]:
				desnorm = True
			else:
				desnorm = False
	dr_s1_txt = os.path.join(OUTPUT, 'stage1', 'dr_stage1_idc_' + subj + '.txt')
	moco_txt = os.path.join(os.path.dirname(INPUT), subj, feat_pfix + subj + feat_sfix, 'mc', 'prefiltered_func_data_mcf.par')
	dr_s1 = read_txt_file(dr_s1_txt)
	if regress_moco:
		moco_pars = read_txt_file(moco_txt)
		for i in range(0, len(dr_s1)):
			for j in range(0,6):
				dr_s1[i].append(moco_pars[i][j])
		dr_s1_moco_txt = os.path.join(OUTPUT, 'stage1', 'dr_stage1_moco_idc_' + subj + '.txt')
		write_txt_file(dr_s1, dr_s1_moco_txt)
		designFile = dr_s1_moco_txt
	else:
		designFile = dr_s1_txt
	iFile = os.path.join(os.path.dirname(INPUT), subj, feat_pfix + subj + feat_sfix, ff_data_name)
	oFile = os.path.join(OUTPUT, 'stage2', 'dr_stage2_idc_' + subj + '.nii.gz')
	ozFile = os.path.join(OUTPUT, 'stage2', 'dr_stage2_Z_idc_' + subj + '.nii.gz')
	if os.path.exists(oFile) and os.path.exists(ozFile):
	    return
	mask = os.path.join(OUTPUT, 'stage1', 'mask.nii.gz')
	fsl_glm = fsl.GLM(in_file=iFile, design=designFile, terminal_output='stream', out_file=oFile, out_z_name=ozFile, mask=mask, des_norm=desnorm, demean=demean, output_type='NIFTI_GZ')
	cmd_out = os.path.join(OUTPUT, 'stage2', 'stage2_fslglm_idc_' + subj + '.out')
	write_cmd_out(fsl_glm.cmdline, cmd_out)
	fsl_glm.run()
	obname = os.path.join(OUTPUT, 'stage2', 'dr_stage2_idc_' + subj + '_ic')
	fslsplit = fsl.Split(dimension='t', in_file=oFile, out_base_name=obname, terminal_output='stream', output_type='NIFTI_GZ')
	fslsplit.run()
	obname = os.path.join(OUTPUT, 'stage2', 'dr_stage2_idc_Z' + subj + '_ic')
	fslsplit = fsl.Split(dimension='t', in_file=ozFile, out_base_name=obname, terminal_output='stream', output_type='NIFTI_GZ')
	fslsplit.run()
コード例 #5
0
ファイル: rest.py プロジェクト: ebohorqu/pipeline
def init_seedconnectivity_wf(seeds, use_mov_pars, name="firstlevel"):
    """
    create workflow to calculate seed connectivity maps
    for resting state functional scans

    :param seeds: dictionary of filenames by user-defined names 
        of binary masks that define the seed regions
    :param use_mov_pars: if true, regress out movement parameters when 
        calculating seed connectivity
    :param name: workflow name (Default value = "firstlevel")

    """
    workflow = pe.Workflow(name=name)

    # inputs are the bold file, the mask file and the confounds file
    # that contains the movement parameters
    inputnode = pe.Node(niu.IdentityInterface(
        fields=["bold_file", "mask_file", "confounds_file"]),
                        name="inputnode")

    # make two (ordered) lists from (unordered) dictionary of seeds
    seednames = list(seeds.keys())  # contains the keys (seed names)
    seed_paths = [seeds[k]
                  for k in seednames]  # contains the values (filenames)

    # calculate the mean time series of the region defined by each mask
    meants = pe.MapNode(interface=fsl.ImageMeants(),
                        name="meants",
                        iterfield=["mask"])
    meants.inputs.mask = seed_paths

    # calculate the regression of the mean time series onto the functional image
    # the result is the seed connectivity map
    glm = pe.MapNode(interface=fsl.GLM(out_file="beta.nii.gz",
                                       out_cope="cope.nii.gz",
                                       out_varcb_name="varcope.nii.gz",
                                       out_z_name="zstat.nii.gz",
                                       demean=True),
                     name="glm",
                     iterfield=["design"])

    # generate dof text file
    gendoffile = pe.Node(interface=Dof(num_regressors=1), name="gendoffile")

    # split regression outputs by name
    splitimgs = pe.Node(
        interface=niu.Split(splits=[1 for seedname in seednames]),
        name="splitimgs")
    splitvarcopes = pe.Node(
        interface=niu.Split(splits=[1 for seedname in seednames]),
        name="splitvarcopes")
    splitzstats = pe.Node(
        interface=niu.Split(splits=[1 for seedname in seednames]),
        name="splitzstats")

    # outputs are cope, varcope and zstat for each seed region and a dof_file
    outputnode = pe.Node(niu.IdentityInterface(fields=sum(
        [["%s_img" % seedname,
          "%s_varcope" % seedname,
          "%s_zstat" % seedname]
         for seedname in seednames], []) + ["dof_file"]),
                         name="outputnode")

    workflow.connect([
        (inputnode, meants, [("bold_file", "in_file")]),
        (inputnode, glm, [("bold_file", "in_file"), ("mask_file", "mask")]),
        (meants, glm, [("out_file", "design")]),
        (glm, splitimgs, [
            ("out_cope", "inlist"),
        ]),
        (glm, splitvarcopes, [
            ("out_varcb", "inlist"),
        ]),
        (glm, splitzstats, [
            ("out_z", "inlist"),
        ]),
        (inputnode, gendoffile, [
            ("bold_file", "in_file"),
        ]),
        (gendoffile, outputnode, [
            ("out_file", "dof_file"),
        ]),
    ])

    # connect outputs named for the seeds
    for i, seedname in enumerate(seednames):
        workflow.connect(splitimgs, "out%i" % (i + 1), outputnode,
                         "%s_img" % seedname)
        workflow.connect(splitvarcopes, "out%i" % (i + 1), outputnode,
                         "%s_varcope" % seedname)
        workflow.connect(splitzstats, "out%i" % (i + 1), outputnode,
                         "%s_zstat" % seedname)

    return workflow, seednames
コード例 #6
0
ファイル: rest.py プロジェクト: ebohorqu/pipeline
def init_dualregression_wf(componentsfile, use_mov_pars, name="firstlevel"):
    """
    create a workflow to calculate dual regression for ICA seeds

    :param componentsfile: 4d image file with ica components
    :param use_mov_pars: if true, regress out movement parameters when 
        calculating dual regression
    :param name: workflow name (Default value = "firstlevel")

    """
    workflow = pe.Workflow(name=name)

    # inputs are the bold file, the mask file and the confounds file
    # that contains the movement parameters
    inputnode = pe.Node(niu.IdentityInterface(
        fields=["bold_file", "mask_file", "confounds_file"]),
                        name="inputnode")

    # extract number of ICA components from 4d image and name them
    ncomponents = nib.load(componentsfile).shape[3]
    fname, _ = _splitext(os.path.basename(componentsfile))
    componentnames = ["%s_%d" % (fname, i) for i in range(ncomponents)]

    # first step, calculate spatial regression of ICA components on to the
    # bold file
    glm0 = pe.Node(interface=fsl.GLM(out_file="beta", demean=True),
                   name="glm0")
    glm0.inputs.design = componentsfile

    # second step, calculate the temporal regression of the time series
    # from the first step on to the bold file
    glm1 = pe.Node(interface=fsl.GLM(out_file="beta.nii.gz",
                                     out_cope="cope.nii.gz",
                                     out_varcb_name="varcope.nii.gz",
                                     out_z_name="zstat.nii.gz",
                                     demean=True),
                   name="glm1")

    # split regression outputs into individual images
    splitimgsimage = pe.Node(interface=fsl.Split(dimension="t"),
                             name="splitimgsimage")
    splitvarcopesimage = pe.Node(interface=fsl.Split(dimension="t"),
                                 name="splitvarcopesimage")
    splitzstatsimage = pe.Node(interface=fsl.Split(dimension="t"),
                               name="splitzstatsimage")

    # generate dof text file
    gendoffile = pe.Node(interface=Dof(num_regressors=1), name="gendoffile")

    # outputs are cope, varcope and zstat for each ICA component and a dof_file
    outputnode = pe.Node(niu.IdentityInterface(fields=sum([[
        "%s_img" % componentname,
        "%s_varcope" % componentname,
        "%s_zstat" % componentname
    ] for componentname in componentnames], []) + ["dof_file"]),
                         name="outputnode")

    # split regression outputs by name
    splitimgs = pe.Node(
        interface=niu.Split(splits=[1 for componentname in componentnames]),
        name="splitimgs")
    splitvarcopes = pe.Node(
        interface=niu.Split(splits=[1 for componentname in componentnames]),
        name="splitvarcopes")
    splitzstats = pe.Node(
        interface=niu.Split(splits=[1 for componentname in componentnames]),
        name="splitzstats")

    workflow.connect([
        (inputnode, glm0, [("bold_file", "in_file"), ("mask_file", "mask")]),
        (inputnode, glm1, [("bold_file", "in_file"), ("mask_file", "mask")]),
        (glm0, glm1, [("out_file", "design")]),
        (glm1, splitimgsimage, [
            ("out_cope", "in_file"),
        ]),
        (glm1, splitvarcopesimage, [
            ("out_varcb", "in_file"),
        ]),
        (glm1, splitzstatsimage, [
            ("out_z", "in_file"),
        ]),
        (splitimgsimage, splitimgs, [
            ("out_files", "inlist"),
        ]),
        (splitvarcopesimage, splitvarcopes, [
            ("out_files", "inlist"),
        ]),
        (splitzstatsimage, splitzstats, [
            ("out_files", "inlist"),
        ]),
        (inputnode, gendoffile, [
            ("bold_file", "in_file"),
        ]),
        (gendoffile, outputnode, [
            ("out_file", "dof_file"),
        ]),
    ])

    # connect outputs named for the ICA components
    for i, componentname in enumerate(componentnames):
        workflow.connect(splitimgs, "out%i" % (i + 1), outputnode,
                         "%s_img" % componentname)
        workflow.connect(splitvarcopes, "out%i" % (i + 1), outputnode,
                         "%s_varcope" % componentname)
        workflow.connect(splitzstats, "out%i" % (i + 1), outputnode,
                         "%s_zstat" % componentname)

    return workflow, componentnames
コード例 #7
0
ファイル: glm.py プロジェクト: setina42/SAMRI
def l1(preprocessing_dir,
	bf_path = '~/ni_data/irfs/chr_beta1.txt',
	debug=False,
	exclude={},
	habituation='confound',
	highpass_sigma=225,
	lowpass_sigma=False,
	include={},
	keep_work=False,
	out_base="",
	mask="",
	match={},
	tr=1,
	workflow_name="generic",
	modality="cbv",
	n_jobs_percentage=1,
	invert=False,
	):
	"""Calculate subject level GLM statistic scores.

	Parameters
	----------

	bf_path : str, optional
		Basis set path. It should point to a text file in the so-called FEAT/FSL "#2" format (1 entry per volume).
	exclude : dict
		A dictionary with any combination of "sessions", "subjects", "tasks" as keys and corresponding identifiers as values.
		If this is specified matching entries will be excluded in the analysis.
	debug : bool, optional
		Whether to enable nipype debug mode.
		This increases logging.
	habituation : {"", "confound", "separate_contrast", "in_main_contrast"}, optional
		How the habituation regressor should be handled.
		Anything which evaluates as False (though we recommend "") means no habituation regressor will be introduced.
	highpass_sigma : int, optional
		Highpass threshold (in seconds).
	include : dict
		A dictionary with any combination of "sessions", "subjects", "tasks" as keys and corresponding identifiers as values.
		If this is specified only matching entries will be included in the analysis.
	invert : bool
		If true the values will be inverted with respect to zero.
		This is commonly used for iron nano-particle Cerebral Blood Volume (CBV) measurements.
	keep_work : bool, optional
		Whether to keep the work directory (containing all the intermediary workflow steps, as managed by nipypye).
		This is useful for debugging and quality control.
	out_base : str, optional
		Path to the directory inside which both the working directory and the output directory will be created.
	mask : str, optional
		Path to the brain mask which shall be used to define the brain volume in the analysis.
		This has to point to an existing NIfTI file containing zero and one values only.
	n_jobs_percentage : float, optional
		Percentage of the cores present on the machine which to maximally use for deploying jobs in parallel.
	tr : int, optional
		Repetition time, in seconds.
	workflow_name : str, optional
		Name of the workflow; this will also be the name of the final output directory produced under `out_dir`.
	"""

	from samri.pipelines.utils import bids_data_selection

	preprocessing_dir = path.abspath(path.expanduser(preprocessing_dir))
	out_base = path.abspath(path.expanduser(out_base))

	data_selection = bids_data_selection(preprocessing_dir, structural_match=False, functional_match=match, subjects=False, sessions=False)
	ind = data_selection.index.tolist()

	out_dir = path.join(out_base,workflow_name)
	workdir_name = workflow_name+'_work'
	workdir = path.join(out_base,workdir_name)
	if not os.path.exists(workdir):
		os.makedirs(workdir)
	data_selection.to_csv(path.join(workdir,'data_selection.csv'))

	get_scan = pe.Node(name='get_scan', interface=util.Function(function=get_bids_scan,input_names=inspect.getargspec(get_bids_scan)[0], output_names=['scan_path','scan_type','task', 'nii_path', 'nii_name', 'events_name', 'subject_session', 'metadata_filename', 'dict_slice']))
	get_scan.inputs.ignore_exception = True
	get_scan.inputs.data_selection = data_selection
	get_scan.inputs.bids_base = preprocessing_dir
	get_scan.iterables = ("ind_type", ind)

	eventfile = pe.Node(name='eventfile', interface=util.Function(function=corresponding_eventfile,input_names=inspect.getargspec(corresponding_eventfile)[0], output_names=['eventfile']))

	if invert:
		invert = pe.Node(interface=fsl.ImageMaths(), name="invert")
		invert.inputs.op_string = '-mul -1'

	specify_model = pe.Node(interface=SpecifyModel(), name="specify_model")
	specify_model.inputs.input_units = 'secs'
	specify_model.inputs.time_repetition = tr
	specify_model.inputs.high_pass_filter_cutoff = highpass_sigma

	level1design = pe.Node(interface=Level1Design(), name="level1design")
	level1design.inputs.interscan_interval = tr
	if bf_path:
		bf_path = path.abspath(path.expanduser(bf_path))
		level1design.inputs.bases = {"custom": {"bfcustompath":bf_path}}
	else:
		# We are not adding derivatives here, as these conflict with the habituation option.
		# !!! This is not difficult to solve, and would only require the addition of an elif condition to the habituator definition, which would add multiple column copies for each of the derivs.
		level1design.inputs.bases = {'gamma': {'derivs':True, 'gammasigma':30, 'gammadelay':10}}
	level1design.inputs.model_serial_correlations = True

	modelgen = pe.Node(interface=fsl.FEATModel(), name='modelgen')
	#modelgen.inputs.ignore_exception = True

	glm = pe.Node(interface=fsl.GLM(), name='glm', iterfield='design')
#	glm.inputs.out_cope = "cope.nii.gz"
#	glm.inputs.out_varcb_name = "varcb.nii.gz"
#	#not setting a betas output file might lead to beta export in lieu of COPEs
#	glm.inputs.out_file = "betas.nii.gz"
#	glm.inputs.out_t_name = "t_stat.nii.gz"
#	glm.inputs.out_p_name = "p_stat.nii.gz"
	if mask == 'mouse':
		mask = '/usr/share/mouse-brain-atlases/dsurqec_200micron_mask.nii'
	else:
		glm.inputs.mask = path.abspath(path.expanduser(mask))
	glm.interface.mem_gb = 6
	#glm.inputs.ignore_exception = True

	out_file_name_base = 'sub-{{subject}}_ses-{{session}}_task-{{task}}_acq-{{acquisition}}_run-{{run}}_{{modality}}_{}.{}'

	betas_filename = pe.Node(name='betas_filename', interface=util.Function(function=bids_dict_to_source,input_names=inspect.getargspec(bids_dict_to_source)[0], output_names=['filename']))
	betas_filename.inputs.source_format = out_file_name_base.format('betas','nii.gz')
	cope_filename = pe.Node(name='cope_filename', interface=util.Function(function=bids_dict_to_source,input_names=inspect.getargspec(bids_dict_to_source)[0], output_names=['filename']))
	cope_filename.inputs.source_format = out_file_name_base.format('cope','nii.gz')
	varcb_filename = pe.Node(name='varcb_filename', interface=util.Function(function=bids_dict_to_source,input_names=inspect.getargspec(bids_dict_to_source)[0], output_names=['filename']))
	varcb_filename.inputs.source_format = out_file_name_base.format('varcb','nii.gz')
	tstat_filename = pe.Node(name='tstat_filename', interface=util.Function(function=bids_dict_to_source,input_names=inspect.getargspec(bids_dict_to_source)[0], output_names=['filename']))
	tstat_filename.inputs.source_format = out_file_name_base.format('tstat','nii.gz')
	zstat_filename = pe.Node(name='zstat_filename', interface=util.Function(function=bids_dict_to_source,input_names=inspect.getargspec(bids_dict_to_source)[0], output_names=['filename']))
	zstat_filename.inputs.source_format = out_file_name_base.format('zstat','nii.gz')
	pstat_filename = pe.Node(name='pstat_filename', interface=util.Function(function=bids_dict_to_source,input_names=inspect.getargspec(bids_dict_to_source)[0], output_names=['filename']))
	pstat_filename.inputs.source_format = out_file_name_base.format('pstat','nii.gz')
	pfstat_filename = pe.Node(name='pfstat_filename', interface=util.Function(function=bids_dict_to_source,input_names=inspect.getargspec(bids_dict_to_source)[0], output_names=['filename']))
	pfstat_filename.inputs.source_format = out_file_name_base.format('pfstat','nii.gz')
	design_filename = pe.Node(name='design', interface=util.Function(function=bids_dict_to_source,input_names=inspect.getargspec(bids_dict_to_source)[0], output_names=['filename']))
	design_filename.inputs.source_format = out_file_name_base.format('design','mat')

	design_rename = pe.Node(interface=util.Rename(), name='design_rename')

	datasink = pe.Node(nio.DataSink(), name='datasink')
	datasink.inputs.base_directory = path.join(out_base,workflow_name)
	datasink.inputs.parameterization = False

	workflow_connections = [
		(get_scan, eventfile, [('nii_path', 'timecourse_file')]),
		(specify_model, level1design, [('session_info', 'session_info')]),
		(level1design, modelgen, [('ev_files', 'ev_files')]),
		(level1design, modelgen, [('fsf_files', 'fsf_file')]),
		(modelgen, glm, [('design_file', 'design')]),
		(modelgen, glm, [('con_file', 'contrasts')]),
		(get_scan, datasink, [(('dict_slice',bids_dict_to_dir), 'container')]),
		(get_scan, betas_filename, [('dict_slice', 'bids_dictionary')]),
		(get_scan, cope_filename, [('dict_slice', 'bids_dictionary')]),
		(get_scan, varcb_filename, [('dict_slice', 'bids_dictionary')]),
		(get_scan, tstat_filename, [('dict_slice', 'bids_dictionary')]),
		(get_scan, zstat_filename, [('dict_slice', 'bids_dictionary')]),
		(get_scan, pstat_filename, [('dict_slice', 'bids_dictionary')]),
		(get_scan, pfstat_filename, [('dict_slice', 'bids_dictionary')]),
		(get_scan, design_filename, [('dict_slice', 'bids_dictionary')]),
		(betas_filename, glm, [('filename', 'out_file')]),
		(cope_filename, glm, [('filename', 'out_cope')]),
		(varcb_filename, glm, [('filename', 'out_varcb_name')]),
		(tstat_filename, glm, [('filename', 'out_t_name')]),
		(zstat_filename, glm, [('filename', 'out_z_name')]),
		(pstat_filename, glm, [('filename', 'out_p_name')]),
		(pfstat_filename, glm, [('filename', 'out_pf_name')]),
		(modelgen, design_rename, [('design_file', 'in_file')]),
		(design_filename, design_rename, [('filename', 'format_string')]),
		(glm, datasink, [('out_pf', '@pfstat')]),
		(glm, datasink, [('out_p', '@pstat')]),
		(glm, datasink, [('out_z', '@zstat')]),
		(glm, datasink, [('out_t', '@tstat')]),
		(glm, datasink, [('out_cope', '@cope')]),
		(glm, datasink, [('out_varcb', '@varcb')]),
		(glm, datasink, [('out_file', '@betas')]),
		(design_rename, datasink, [('out_file', '@design')]),
		]

	if habituation:
		level1design.inputs.orthogonalization = {1: {0:0,1:0,2:0}, 2: {0:1,1:1,2:0}}
		specify_model.inputs.bids_condition_column = 'samri_l1_regressors'
		specify_model.inputs.bids_amplitude_column = 'samri_l1_amplitude'
		add_habituation = pe.Node(name='add_habituation', interface=util.Function(function=eventfile_add_habituation,input_names=inspect.getargspec(eventfile_add_habituation)[0], output_names=['out_file']))
		# Regressor names need to be prefixed with "e" plus a numerator so that Level1Design will be certain to conserve the order.
		add_habituation.inputs.original_stimulation_value='1stim'
		add_habituation.inputs.habituation_value='2habituation'
		workflow_connections.extend([
			(eventfile, add_habituation, [('eventfile', 'in_file')]),
			(add_habituation, specify_model, [('out_file', 'bids_event_file')]),
			])
	if not habituation:
		specify_model.inputs.bids_condition_column = ''
		level1design.inputs.contrasts = [('allStim','T', ['ev0'],[1])]
		workflow_connections.extend([
			(eventfile, specify_model, [('eventfile', 'bids_event_file')]),
			])
	#condition names as defined in eventfile_add_habituation:
	elif habituation=="separate_contrast":
		level1design.inputs.contrasts = [('stim','T', ['1stim','2habituation'],[1,0]),('hab','T', ['1stim','2habituation'],[0,1])]
	elif habituation=="in_main_contrast":
		level1design.inputs.contrasts = [('all','T', ['1stim','2habituation'],[1,1])]
	elif habituation=="confound":
		level1design.inputs.contrasts = [('stim','T', ["1stim", "2habituation"],[1,0])]
	else:
		print(habituation)
		raise ValueError('The value you have provided for the `habituation` parameter, namely "{}", is invalid. Please choose one of: {{None, False,"","confound","in_main_contrast","separate_contrast"}}'.format(habituation))

	if highpass_sigma or lowpass_sigma:
		bandpass = pe.Node(interface=fsl.maths.TemporalFilter(), name="bandpass")
		bandpass.inputs.highpass_sigma = highpass_sigma
		bandpass.interface.mem_gb = 16
		if lowpass_sigma:
			bandpass.inputs.lowpass_sigma = lowpass_sigma
		else:
			bandpass.inputs.lowpass_sigma = tr
		if invert:
			workflow_connections.extend([
				(get_scan, invert, [('nii_path', 'in_file')]),
				(invert, bandpass, [('out_file', 'in_file')]),
				(bandpass, specify_model, [('out_file', 'functional_runs')]),
				(bandpass, glm, [('out_file', 'in_file')]),
				(bandpass, datasink, [('out_file', '@ts_file')]),
				(get_scan, bandpass, [('nii_name', 'out_file')]),
				])
		else:
			workflow_connections.extend([
				(get_scan, bandpass, [('nii_path', 'in_file')]),
				(bandpass, specify_model, [('out_file', 'functional_runs')]),
				(bandpass, glm, [('out_file', 'in_file')]),
				(bandpass, datasink, [('out_file', '@ts_file')]),
				(get_scan, bandpass, [('nii_name', 'out_file')]),
				])
	else:
		if invert:
			workflow_connections.extend([
				(get_scan, invert, [('nii_path', 'in_file')]),
				(invert, specify_model, [('out_file', 'functional_runs')]),
				(invert, glm, [('out_file', 'in_file')]),
				(invert, datasink, [('out_file', '@ts_file')]),
				(get_scan, invert, [('nii_name', 'out_file')]),
				])
		else:
			workflow_connections.extend([
				(get_scan, specify_model, [('nii_path', 'functional_runs')]),
				(get_scan, glm, [('nii_path', 'in_file')]),
				(get_scan, datasink, [('nii_path', '@ts_file')]),
				])


	workflow_config = {'execution': {'crashdump_dir': path.join(out_base,'crashdump'),}}
	if debug:
		workflow_config['logging'] = {
			'workflow_level':'DEBUG',
			'utils_level':'DEBUG',
			'interface_level':'DEBUG',
			'filemanip_level':'DEBUG',
			'log_to_file':'true',
			}

	workflow = pe.Workflow(name=workdir_name)
	workflow.connect(workflow_connections)
	workflow.base_dir = out_base
	workflow.config = workflow_config
	workflow.write_graph(dotfilename=path.join(workflow.base_dir,workdir_name,"graph.dot"), graph2use="hierarchical", format="png")

	n_jobs = max(int(round(mp.cpu_count()*n_jobs_percentage)),2)
	workflow.run(plugin="MultiProc", plugin_args={'n_procs' : n_jobs})
	if not keep_work:
		shutil.rmtree(path.join(out_base,workdir_name))
コード例 #8
0
def create_workflow(files,
                    target_file,
                    subject_id,
                    TR,
                    slice_times,
                    norm_threshold=1,
                    num_components=5,
                    vol_fwhm=None,
                    surf_fwhm=None,
                    lowpass_freq=-1,
                    highpass_freq=-1,
                    subjects_dir=None,
                    sink_directory=os.getcwd(),
                    target_subject=['fsaverage3', 'fsaverage4'],
                    name='resting'):

    wf = Workflow(name=name)

    # Rename files in case they are named identically
    name_unique = MapNode(Rename(format_string='rest_%(run)02d'),
                          iterfield=['in_file', 'run'],
                          name='rename')
    name_unique.inputs.keep_ext = True
    name_unique.inputs.run = list(range(1, len(files) + 1))
    name_unique.inputs.in_file = files

    realign = Node(interface=spm.Realign(), name="realign")
    realign.inputs.jobtype = 'estwrite'

    num_slices = len(slice_times)
    slice_timing = Node(interface=spm.SliceTiming(), name="slice_timing")
    slice_timing.inputs.num_slices = num_slices
    slice_timing.inputs.time_repetition = TR
    slice_timing.inputs.time_acquisition = TR - TR / float(num_slices)
    slice_timing.inputs.slice_order = (np.argsort(slice_times) + 1).tolist()
    slice_timing.inputs.ref_slice = int(num_slices / 2)

    # Comute TSNR on realigned data regressing polynomials upto order 2
    tsnr = MapNode(TSNR(regress_poly=2), iterfield=['in_file'], name='tsnr')
    wf.connect(slice_timing, 'timecorrected_files', tsnr, 'in_file')

    # Compute the median image across runs
    calc_median = Node(Function(input_names=['in_files'],
                                output_names=['median_file'],
                                function=median,
                                imports=imports),
                       name='median')
    wf.connect(tsnr, 'detrended_file', calc_median, 'in_files')
    """Segment and Register
    """

    registration = create_reg_workflow(name='registration')
    wf.connect(calc_median, 'median_file', registration,
               'inputspec.mean_image')
    registration.inputs.inputspec.subject_id = subject_id
    registration.inputs.inputspec.subjects_dir = subjects_dir
    registration.inputs.inputspec.target_image = target_file
    """Use :class:`nipype.algorithms.rapidart` to determine which of the
    images in the functional series are outliers based on deviations in
    intensity or movement.
    """

    art = Node(interface=ArtifactDetect(), name="art")
    art.inputs.use_differences = [True, True]
    art.inputs.use_norm = True
    art.inputs.norm_threshold = norm_threshold
    art.inputs.zintensity_threshold = 9
    art.inputs.mask_type = 'spm_global'
    art.inputs.parameter_source = 'SPM'
    """Here we are connecting all the nodes together. Notice that we add the merge node only if you choose
    to use 4D. Also `get_vox_dims` function is passed along the input volume of normalise to set the optimal
    voxel sizes.
    """

    wf.connect([
        (name_unique, realign, [('out_file', 'in_files')]),
        (realign, slice_timing, [('realigned_files', 'in_files')]),
        (slice_timing, art, [('timecorrected_files', 'realigned_files')]),
        (realign, art, [('realignment_parameters', 'realignment_parameters')]),
    ])

    def selectindex(files, idx):
        import numpy as np
        from nipype.utils.filemanip import filename_to_list, list_to_filename
        return list_to_filename(
            np.array(filename_to_list(files))[idx].tolist())

    mask = Node(fsl.BET(), name='getmask')
    mask.inputs.mask = True
    wf.connect(calc_median, 'median_file', mask, 'in_file')

    # get segmentation in normalized functional space

    def merge_files(in1, in2):
        out_files = filename_to_list(in1)
        out_files.extend(filename_to_list(in2))
        return out_files

    # filter some noise

    # Compute motion regressors
    motreg = Node(Function(
        input_names=['motion_params', 'order', 'derivatives'],
        output_names=['out_files'],
        function=motion_regressors,
        imports=imports),
                  name='getmotionregress')
    wf.connect(realign, 'realignment_parameters', motreg, 'motion_params')

    # Create a filter to remove motion and art confounds
    createfilter1 = Node(Function(
        input_names=['motion_params', 'comp_norm', 'outliers', 'detrend_poly'],
        output_names=['out_files'],
        function=build_filter1,
        imports=imports),
                         name='makemotionbasedfilter')
    createfilter1.inputs.detrend_poly = 2
    wf.connect(motreg, 'out_files', createfilter1, 'motion_params')
    wf.connect(art, 'norm_files', createfilter1, 'comp_norm')
    wf.connect(art, 'outlier_files', createfilter1, 'outliers')

    filter1 = MapNode(fsl.GLM(out_f_name='F_mcart.nii',
                              out_pf_name='pF_mcart.nii',
                              demean=True),
                      iterfield=['in_file', 'design', 'out_res_name'],
                      name='filtermotion')

    wf.connect(slice_timing, 'timecorrected_files', filter1, 'in_file')
    wf.connect(slice_timing, ('timecorrected_files', rename, '_filtermotart'),
               filter1, 'out_res_name')
    wf.connect(createfilter1, 'out_files', filter1, 'design')

    createfilter2 = MapNode(Function(input_names=[
        'realigned_file', 'mask_file', 'num_components', 'extra_regressors'
    ],
                                     output_names=['out_files'],
                                     function=extract_noise_components,
                                     imports=imports),
                            iterfield=['realigned_file', 'extra_regressors'],
                            name='makecompcorrfilter')
    createfilter2.inputs.num_components = num_components

    wf.connect(createfilter1, 'out_files', createfilter2, 'extra_regressors')
    wf.connect(filter1, 'out_res', createfilter2, 'realigned_file')
    wf.connect(registration,
               ('outputspec.segmentation_files', selectindex, [0, 2]),
               createfilter2, 'mask_file')

    filter2 = MapNode(fsl.GLM(out_f_name='F.nii',
                              out_pf_name='pF.nii',
                              demean=True),
                      iterfield=['in_file', 'design', 'out_res_name'],
                      name='filter_noise_nosmooth')
    wf.connect(filter1, 'out_res', filter2, 'in_file')
    wf.connect(filter1, ('out_res', rename, '_cleaned'), filter2,
               'out_res_name')
    wf.connect(createfilter2, 'out_files', filter2, 'design')
    wf.connect(mask, 'mask_file', filter2, 'mask')

    bandpass = Node(Function(
        input_names=['files', 'lowpass_freq', 'highpass_freq', 'fs'],
        output_names=['out_files'],
        function=bandpass_filter,
        imports=imports),
                    name='bandpass_unsmooth')
    bandpass.inputs.fs = 1. / TR
    bandpass.inputs.highpass_freq = highpass_freq
    bandpass.inputs.lowpass_freq = lowpass_freq
    wf.connect(filter2, 'out_res', bandpass, 'files')
    """Smooth the functional data using
    :class:`nipype.interfaces.spm.Smooth`.
    """

    smooth = Node(interface=spm.Smooth(), name="smooth")
    smooth.inputs.fwhm = vol_fwhm

    wf.connect(bandpass, 'out_files', smooth, 'in_files')

    collector = Node(Merge(2), name='collect_streams')
    wf.connect(smooth, 'smoothed_files', collector, 'in1')
    wf.connect(bandpass, 'out_files', collector, 'in2')
    """
    Transform the remaining images. First to anatomical and then to target
    """

    warpall = MapNode(ants.ApplyTransforms(),
                      iterfield=['input_image'],
                      name='warpall')
    warpall.inputs.input_image_type = 3
    warpall.inputs.interpolation = 'Linear'
    warpall.inputs.invert_transform_flags = [False, False]
    warpall.inputs.terminal_output = 'file'
    warpall.inputs.reference_image = target_file
    warpall.inputs.args = '--float'
    warpall.inputs.num_threads = 1

    # transform to target
    wf.connect(collector, 'out', warpall, 'input_image')
    wf.connect(registration, 'outputspec.transforms', warpall, 'transforms')

    mask_target = Node(fsl.ImageMaths(op_string='-bin'), name='target_mask')

    wf.connect(registration, 'outputspec.anat2target', mask_target, 'in_file')

    maskts = MapNode(fsl.ApplyMask(), iterfield=['in_file'], name='ts_masker')
    wf.connect(warpall, 'output_image', maskts, 'in_file')
    wf.connect(mask_target, 'out_file', maskts, 'mask_file')

    # map to surface
    # extract aparc+aseg ROIs
    # extract subcortical ROIs
    # extract target space ROIs
    # combine subcortical and cortical rois into a single cifti file

    #######
    # Convert aparc to subject functional space

    # Sample the average time series in aparc ROIs
    sampleaparc = MapNode(
        freesurfer.SegStats(default_color_table=True),
        iterfield=['in_file', 'summary_file', 'avgwf_txt_file'],
        name='aparc_ts')
    sampleaparc.inputs.segment_id = ([8] + list(range(10, 14)) +
                                     [17, 18, 26, 47] + list(range(49, 55)) +
                                     [58] + list(range(1001, 1036)) +
                                     list(range(2001, 2036)))

    wf.connect(registration, 'outputspec.aparc', sampleaparc,
               'segmentation_file')
    wf.connect(collector, 'out', sampleaparc, 'in_file')

    def get_names(files, suffix):
        """Generate appropriate names for output files
        """
        from nipype.utils.filemanip import (split_filename, filename_to_list,
                                            list_to_filename)
        out_names = []
        for filename in files:
            _, name, _ = split_filename(filename)
            out_names.append(name + suffix)
        return list_to_filename(out_names)

    wf.connect(collector, ('out', get_names, '_avgwf.txt'), sampleaparc,
               'avgwf_txt_file')
    wf.connect(collector, ('out', get_names, '_summary.stats'), sampleaparc,
               'summary_file')

    # Sample the time series onto the surface of the target surface. Performs
    # sampling into left and right hemisphere
    target = Node(IdentityInterface(fields=['target_subject']), name='target')
    target.iterables = ('target_subject', filename_to_list(target_subject))

    samplerlh = MapNode(freesurfer.SampleToSurface(),
                        iterfield=['source_file'],
                        name='sampler_lh')
    samplerlh.inputs.sampling_method = "average"
    samplerlh.inputs.sampling_range = (0.1, 0.9, 0.1)
    samplerlh.inputs.sampling_units = "frac"
    samplerlh.inputs.interp_method = "trilinear"
    samplerlh.inputs.smooth_surf = surf_fwhm
    # samplerlh.inputs.cortex_mask = True
    samplerlh.inputs.out_type = 'niigz'
    samplerlh.inputs.subjects_dir = subjects_dir

    samplerrh = samplerlh.clone('sampler_rh')

    samplerlh.inputs.hemi = 'lh'
    wf.connect(collector, 'out', samplerlh, 'source_file')
    wf.connect(registration, 'outputspec.out_reg_file', samplerlh, 'reg_file')
    wf.connect(target, 'target_subject', samplerlh, 'target_subject')

    samplerrh.set_input('hemi', 'rh')
    wf.connect(collector, 'out', samplerrh, 'source_file')
    wf.connect(registration, 'outputspec.out_reg_file', samplerrh, 'reg_file')
    wf.connect(target, 'target_subject', samplerrh, 'target_subject')

    # Combine left and right hemisphere to text file
    combiner = MapNode(Function(input_names=['left', 'right'],
                                output_names=['out_file'],
                                function=combine_hemi,
                                imports=imports),
                       iterfield=['left', 'right'],
                       name="combiner")
    wf.connect(samplerlh, 'out_file', combiner, 'left')
    wf.connect(samplerrh, 'out_file', combiner, 'right')

    # Sample the time series file for each subcortical roi
    ts2txt = MapNode(Function(
        input_names=['timeseries_file', 'label_file', 'indices'],
        output_names=['out_file'],
        function=extract_subrois,
        imports=imports),
                     iterfield=['timeseries_file'],
                     name='getsubcortts')
    ts2txt.inputs.indices = [8] + list(range(10, 14)) + [17, 18, 26, 47] +\
        list(range(49, 55)) + [58]
    ts2txt.inputs.label_file = \
        os.path.abspath(('OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152_'
                         '2mm_v2.nii.gz'))
    wf.connect(maskts, 'out_file', ts2txt, 'timeseries_file')

    ######

    substitutions = [('_target_subject_', ''),
                     ('_filtermotart_cleaned_bp_trans_masked', ''),
                     ('_filtermotart_cleaned_bp', '')]
    regex_subs = [
        ('_ts_masker.*/sar', '/smooth/'),
        ('_ts_masker.*/ar', '/unsmooth/'),
        ('_combiner.*/sar', '/smooth/'),
        ('_combiner.*/ar', '/unsmooth/'),
        ('_aparc_ts.*/sar', '/smooth/'),
        ('_aparc_ts.*/ar', '/unsmooth/'),
        ('_getsubcortts.*/sar', '/smooth/'),
        ('_getsubcortts.*/ar', '/unsmooth/'),
        ('series/sar', 'series/smooth/'),
        ('series/ar', 'series/unsmooth/'),
        ('_inverse_transform./', ''),
    ]
    # Save the relevant data into an output directory
    datasink = Node(interface=DataSink(), name="datasink")
    datasink.inputs.base_directory = sink_directory
    datasink.inputs.container = subject_id
    datasink.inputs.substitutions = substitutions
    datasink.inputs.regexp_substitutions = regex_subs  # (r'(/_.*(\d+/))', r'/run\2')
    wf.connect(realign, 'realignment_parameters', datasink,
               'resting.qa.motion')
    wf.connect(art, 'norm_files', datasink, 'resting.qa.art.@norm')
    wf.connect(art, 'intensity_files', datasink, 'resting.qa.art.@intensity')
    wf.connect(art, 'outlier_files', datasink, 'resting.qa.art.@outlier_files')
    wf.connect(registration, 'outputspec.segmentation_files', datasink,
               'resting.mask_files')
    wf.connect(registration, 'outputspec.anat2target', datasink,
               'resting.qa.ants')
    wf.connect(mask, 'mask_file', datasink, 'resting.mask_files.@brainmask')
    wf.connect(mask_target, 'out_file', datasink, 'resting.mask_files.target')
    wf.connect(filter1, 'out_f', datasink, 'resting.qa.compmaps.@mc_F')
    wf.connect(filter1, 'out_pf', datasink, 'resting.qa.compmaps.@mc_pF')
    wf.connect(filter2, 'out_f', datasink, 'resting.qa.compmaps')
    wf.connect(filter2, 'out_pf', datasink, 'resting.qa.compmaps.@p')
    wf.connect(bandpass, 'out_files', datasink,
               'resting.timeseries.@bandpassed')
    wf.connect(smooth, 'smoothed_files', datasink,
               'resting.timeseries.@smoothed')
    wf.connect(createfilter1, 'out_files', datasink,
               'resting.regress.@regressors')
    wf.connect(createfilter2, 'out_files', datasink,
               'resting.regress.@compcorr')
    wf.connect(maskts, 'out_file', datasink, 'resting.timeseries.target')
    wf.connect(sampleaparc, 'summary_file', datasink,
               'resting.parcellations.aparc')
    wf.connect(sampleaparc, 'avgwf_txt_file', datasink,
               'resting.parcellations.aparc.@avgwf')
    wf.connect(ts2txt, 'out_file', datasink,
               'resting.parcellations.grayo.@subcortical')

    datasink2 = Node(interface=DataSink(), name="datasink2")
    datasink2.inputs.base_directory = sink_directory
    datasink2.inputs.container = subject_id
    datasink2.inputs.substitutions = substitutions
    datasink2.inputs.regexp_substitutions = regex_subs  # (r'(/_.*(\d+/))', r'/run\2')
    wf.connect(combiner, 'out_file', datasink2,
               'resting.parcellations.grayo.@surface')
    return wf
コード例 #9
0
def create_denoise_pipeline(name='denoise'):
    # workflow
    denoise = Workflow(name='denoise')
    # Define nodes
    inputnode = Node(interface=util.IdentityInterface(fields=[
        'anat_brain', 'brain_mask', 'epi2anat_dat', 'unwarped_mean',
        'epi_coreg', 'moco_par', 'highpass_sigma', 'lowpass_sigma', 'tr'
    ]),
                     name='inputnode')
    outputnode = Node(interface=util.IdentityInterface(fields=[
        'wmcsf_mask', 'brain_mask_resamp', 'brain_mask2epi', 'combined_motion',
        'outlier_files', 'intensity_files', 'outlier_stats', 'outlier_plots',
        'mc_regressor', 'mc_F', 'mc_pF', 'comp_regressor', 'comp_F', 'comp_pF',
        'normalized_file'
    ]),
                      name='outputnode')
    # run fast to get tissue probability classes
    fast = Node(fsl.FAST(), name='fast')
    denoise.connect([(inputnode, fast, [('anat_brain', 'in_files')])])

    # functions to select tissue classes
    def selectindex(files, idx):
        import numpy as np
        from nipype.utils.filemanip import filename_to_list, list_to_filename
        return list_to_filename(
            np.array(filename_to_list(files))[idx].tolist())

    def selectsingle(files, idx):
        return files[idx]

    # resample tissue classes
    resample_tissue = MapNode(afni.Resample(resample_mode='NN',
                                            outputtype='NIFTI_GZ'),
                              iterfield=['in_file'],
                              name='resample_tissue')
    denoise.connect([
        (inputnode, resample_tissue, [('epi_coreg', 'master')]),
        (fast, resample_tissue, [(('partial_volume_files', selectindex,
                                   [0, 2]), 'in_file')]),
    ])
    # binarize tissue classes
    binarize_tissue = MapNode(
        fsl.ImageMaths(op_string='-nan -thr 0.99 -ero -bin'),
        iterfield=['in_file'],
        name='binarize_tissue')
    denoise.connect([
        (resample_tissue, binarize_tissue, [('out_file', 'in_file')]),
    ])
    # combine tissue classes to noise mask
    wmcsf_mask = Node(fsl.BinaryMaths(operation='add',
                                      out_file='wmcsf_mask_lowres.nii.gz'),
                      name='wmcsf_mask')
    denoise.connect([(binarize_tissue, wmcsf_mask,
                      [(('out_file', selectsingle, 0), 'in_file'),
                       (('out_file', selectsingle, 1), 'operand_file')]),
                     (wmcsf_mask, outputnode, [('out_file', 'wmcsf_mask')])])
    # resample brain mask
    resample_brain = Node(afni.Resample(
        resample_mode='NN',
        outputtype='NIFTI_GZ',
        out_file='T1_brain_mask_lowres.nii.gz'),
                          name='resample_brain')
    denoise.connect([(inputnode, resample_brain, [('brain_mask', 'in_file'),
                                                  ('epi_coreg', 'master')]),
                     (resample_brain, outputnode, [('out_file',
                                                    'brain_mask_resamp')])])
    # project brain mask into original epi space fpr quality assessment
    brainmask2epi = Node(fs.ApplyVolTransform(
        interp='nearest',
        inverse=True,
        transformed_file='T1_brain_mask2epi.nii.gz',
    ),
                         name='brainmask2epi')
    denoise.connect([
        (inputnode, brainmask2epi, [('brain_mask', 'target_file'),
                                    ('epi2anat_dat', 'reg_file'),
                                    ('unwarped_mean', 'source_file')]),
        (brainmask2epi, outputnode, [('transformed_file', 'brain_mask2epi')])
    ])
    # perform artefact detection
    artefact = Node(ra.ArtifactDetect(save_plot=True,
                                      use_norm=True,
                                      parameter_source='FSL',
                                      mask_type='file',
                                      norm_threshold=1,
                                      zintensity_threshold=3,
                                      use_differences=[True, False]),
                    name='artefact')
    artefact.plugin_args = {'submit_specs': 'request_memory = 17000'}
    denoise.connect([
        (inputnode, artefact, [('epi_coreg', 'realigned_files'),
                               ('moco_par', 'realignment_parameters')]),
        (resample_brain, artefact, [('out_file', 'mask_file')]),
        (artefact, outputnode, [('norm_files', 'combined_motion'),
                                ('outlier_files', 'outlier_files'),
                                ('intensity_files', 'intensity_files'),
                                ('statistic_files', 'outlier_stats'),
                                ('plot_files', 'outlier_plots')])
    ])
    # Compute motion regressors
    motreg = Node(util.Function(
        input_names=['motion_params', 'order', 'derivatives'],
        output_names=['out_files'],
        function=motion_regressors),
                  name='getmotionregress')
    motreg.plugin_args = {'submit_specs': 'request_memory = 17000'}
    denoise.connect([(inputnode, motreg, [('moco_par', 'motion_params')])])
    # Create a filter to remove motion and art confounds
    createfilter1 = Node(util.Function(
        input_names=['motion_params', 'comp_norm', 'outliers', 'detrend_poly'],
        output_names=['out_files'],
        function=build_filter1),
                         name='makemotionbasedfilter')
    createfilter1.inputs.detrend_poly = 2
    createfilter1.plugin_args = {'submit_specs': 'request_memory = 17000'}
    denoise.connect([
        (motreg, createfilter1, [('out_files', 'motion_params')]),
        (
            artefact,
            createfilter1,
            [  #('norm_files', 'comp_norm'),
                ('outlier_files', 'outliers')
            ]),
        (createfilter1, outputnode, [('out_files', 'mc_regressor')])
    ])
    # regress out motion and art confounds
    filter1 = Node(fsl.GLM(out_f_name='F_mcart.nii.gz',
                           out_pf_name='pF_mcart.nii.gz',
                           out_res_name='rest_mc_denoised.nii.gz',
                           demean=True),
                   name='filtermotion')
    filter1.plugin_args = {'submit_specs': 'request_memory = 17000'}
    denoise.connect([(inputnode, filter1, [('epi_coreg', 'in_file')]),
                     (createfilter1, filter1,
                      [(('out_files', list_to_filename), 'design')]),
                     (filter1, outputnode, [('out_f', 'mc_F'),
                                            ('out_pf', 'mc_pF')])])
    # create filter with compcor components
    createfilter2 = Node(util.Function(input_names=[
        'realigned_file', 'mask_file', 'num_components', 'extra_regressors'
    ],
                                       output_names=['out_files'],
                                       function=extract_noise_components),
                         name='makecompcorfilter')
    createfilter2.inputs.num_components = 6
    createfilter2.plugin_args = {'submit_specs': 'request_memory = 17000'}
    denoise.connect([
        (createfilter1, createfilter2, [(('out_files', list_to_filename),
                                         'extra_regressors')]),
        (filter1, createfilter2, [('out_res', 'realigned_file')]),
        (wmcsf_mask, createfilter2, [('out_file', 'mask_file')]),
        (createfilter2, outputnode, [('out_files', 'comp_regressor')]),
    ])
    # regress compcor and other noise components
    filter2 = Node(fsl.GLM(out_f_name='F_noise.nii.gz',
                           out_pf_name='pF_noise.nii.gz',
                           out_res_name='rest2anat_denoised.nii.gz',
                           demean=True),
                   name='filternoise')
    filter2.plugin_args = {'submit_specs': 'request_memory = 17000'}
    denoise.connect([(filter1, filter2, [('out_res', 'in_file')]),
                     (createfilter2, filter2, [('out_files', 'design')]),
                     (resample_brain, filter2, [('out_file', 'mask')]),
                     (filter2, outputnode, [('out_f', 'comp_F'),
                                            ('out_pf', 'comp_pF')])])
    # bandpass filter denoised file
    bandpass_filter = Node(
        fsl.TemporalFilter(out_file='rest_denoised_bandpassed.nii.gz'),
        name='bandpass_filter')
    bandpass_filter.plugin_args = {'submit_specs': 'request_memory = 17000'}
    denoise.connect([(inputnode, bandpass_filter,
                      [('highpass_sigma', 'highpass_sigma'),
                       ('lowpass_sigma', 'lowpass_sigma')]),
                     (filter2, bandpass_filter, [('out_res', 'in_file')])])
    # time-normalize scans
    normalize_time = Node(util.Function(input_names=['in_file', 'tr'],
                                        output_names=['out_file'],
                                        function=time_normalizer),
                          name='normalize_time')
    normalize_time.plugin_args = {'submit_specs': 'request_memory = 17000'}
    denoise.connect([
        (inputnode, normalize_time, [('tr', 'tr')]),
        (bandpass_filter, normalize_time, [('out_file', 'in_file')]),
        (normalize_time, outputnode, [('out_file', 'normalized_file')])
    ])
    return denoise
コード例 #10
0
def get_spatial_map_timeseries(wf_name='spatial_map_timeseries'):
    """
    Workflow to regress each provided spatial
    map to the subjects functional 4D file in order
    to return a timeseries for each of the maps

    Parameters
    ----------
    wf_name : string
        name of the workflow

    Returns
    -------
    wflow : workflow object
        workflow object

    Notes
    -----
    `Source <https://github.com/FCP-INDI/C-PAC/blob/master/CPAC/timeseries/timeseries_analysis.py>`_

    Workflow Inputs::

        inputspec.subject_rest : string  (nifti file)
            path to input functional data
        inputspec.subject_mask : string (nifti file)
            path to subject functional mask
        inputspec.spatial_map : string (nifti file)
            path to Spatial Maps
        inputspec.demean : Boolean
            control whether to demean model and data

    Workflow Outputs::

        outputspec.subject_timeseries: string (txt file)
            list of time series stored in a space separated
            txt file
            the columns are spatial maps, rows are timepoints


    Example
    -------
    >>> import CPAC.timeseries.timeseries_analysis as t
    >>> wf = t.get_spatial_map_timeseries()
    >>> wf.inputs.inputspec.subject_rest = '/home/data/rest.nii.gz'
    >>> wf.inputs.inputspec.subject_mask = '/home/data/rest_mask.nii.gz'
    >>> wf.inputs.inputspec.ICA_map = '/home/data/spatialmaps/spatial_map.nii.gz'
    >>> wf.inputs.inputspec.demean = True
    >>> wf.base_dir = './'
    >>> wf.run()

    """

    wflow = pe.Workflow(name=wf_name)

    inputNode = pe.Node(util.IdentityInterface
                        (fields=['subject_rest',
                                 'subject_mask',
                                 'spatial_map',
                                 'demean']),
                        name='inputspec')

    outputNode = pe.Node(util.IdentityInterface
                         (fields=['subject_timeseries']),
                          name='outputspec')

    spatialReg = pe.Node(interface=fsl.GLM(),
                         name='spatial_regression')

    spatialReg.inputs.out_file = 'spatial_map_timeseries.txt'

    wflow.connect(inputNode, 'subject_rest',
                spatialReg, 'in_file')
    wflow.connect(inputNode, 'subject_mask',
                spatialReg, 'mask')
    wflow.connect(inputNode, 'spatial_map',
                spatialReg, 'design')
    wflow.connect(inputNode, 'demean',
                spatialReg, 'demean')

    wflow.connect(spatialReg, 'out_file',
                  outputNode, 'subject_timeseries')

    return wflow
コード例 #11
0
ファイル: glm.py プロジェクト: Doeme/SAMRI
def l1(
    preprocessing_dir,
    highpass_sigma=225,
    include={},
    exclude={},
    keep_work=False,
    l1_dir="",
    nprocs=10,
    mask="/home/chymera/ni_data/templates/ds_QBI_chr_bin.nii.gz",
    per_stimulus_contrast=False,
    habituation="",
    tr=1,
    workflow_name="generic",
):
    """Calculate subject level GLM statistics.

	Parameters
	----------

	include : dict
	A dictionary with any combination of "sessions", "subjects", "trials" as keys and corresponding identifiers as values.
	If this is specified ony matching entries will be included in the analysis.

	exclude : dict
	A dictionary with any combination of "sessions", "subjects", "trials" as keys and corresponding identifiers as values.
	If this is specified ony non-matching entries will be included in the analysis.

	habituation : string
	One value of "confound", "in_main_contrast", "separate_contrast", "" indicating how the habituation regressor should be handled.
	"" or any other value which evaluates to False will mean no habituation regressor is used int he model
	"""

    preprocessing_dir = path.expanduser(preprocessing_dir)
    if not l1_dir:
        l1_dir = path.abspath(path.join(preprocessing_dir, "..", "..", "l1"))

    datafind = nio.DataFinder()
    datafind.inputs.root_paths = preprocessing_dir
    datafind.inputs.match_regex = '.+/sub-(?P<sub>.+)/ses-(?P<ses>.+)/func/.*?_trial-(?P<scan>.+)\.nii.gz'
    datafind_res = datafind.run()
    iterfields = zip(*[
        datafind_res.outputs.sub, datafind_res.outputs.ses,
        datafind_res.outputs.scan
    ])

    if include:
        iterfields = iterfield_selector(iterfields, include, "include")
    if exclude:
        iterfields = iterfield_selector(iterfields, exclude, "exclude")

    infosource = pe.Node(
        interface=util.IdentityInterface(fields=['subject_session_scan']),
        name="infosource")
    infosource.iterables = [('subject_session_scan', iterfields)]

    datafile_source = pe.Node(
        name='datafile_source',
        interface=util.Function(
            function=sss_to_source,
            input_names=inspect.getargspec(sss_to_source)[0],
            output_names=['out_file']))
    datafile_source.inputs.base_directory = preprocessing_dir
    datafile_source.inputs.source_format = "sub-{0}/ses-{1}/func/sub-{0}_ses-{1}_trial-{2}.nii.gz"

    eventfile_source = pe.Node(
        name='eventfile_source',
        interface=util.Function(
            function=sss_to_source,
            input_names=inspect.getargspec(sss_to_source)[0],
            output_names=['out_file']))
    eventfile_source.inputs.base_directory = preprocessing_dir
    eventfile_source.inputs.source_format = "sub-{0}/ses-{1}/func/sub-{0}_ses-{1}_trial-{2}_events.tsv"

    specify_model = pe.Node(interface=SpecifyModel(), name="specify_model")
    specify_model.inputs.input_units = 'secs'
    specify_model.inputs.time_repetition = tr
    specify_model.inputs.high_pass_filter_cutoff = highpass_sigma
    specify_model.inputs.one_condition_file = not per_stimulus_contrast
    specify_model.inputs.habituation_regressor = bool(habituation)

    level1design = pe.Node(interface=Level1Design(), name="level1design")
    level1design.inputs.interscan_interval = tr
    level1design.inputs.bases = {
        "custom": {
            "bfcustompath": "/mnt/data/ni_data/irfs/chr_beta1.txt"
        }
    }
    # level1design.inputs.bases = {'gamma': {'derivs':False, 'gammasigma':10, 'gammadelay':5}}
    level1design.inputs.orthogonalization = {
        1: {
            0: 0,
            1: 0,
            2: 0
        },
        2: {
            0: 1,
            1: 1,
            2: 0
        }
    }
    level1design.inputs.model_serial_correlations = True
    if per_stimulus_contrast:
        level1design.inputs.contrasts = [
            ('allStim', 'T', ["e0", "e1", "e2", "e3", "e4",
                              "e5"], [1, 1, 1, 1, 1, 1])
        ]  #condition names as defined in specify_model
    elif habituation == "separate_contrast":
        level1design.inputs.contrasts = [
            ('allStim', 'T', ["e0"], [1]), ('allStim', 'T', ["e1"], [1])
        ]  #condition names as defined in specify_model
    elif habituation == "in_main_contrast":
        level1design.inputs.contrasts = [
            ('allStim', 'T', ["e0", "e1"], [1, 1])
        ]  #condition names as defined in specify_model
    elif habituation == "confound":
        level1design.inputs.contrasts = [
            ('allStim', 'T', ["e0"], [1])
        ]  #condition names as defined in specify_model
    else:
        level1design.inputs.contrasts = [
            ('allStim', 'T', ["e0"], [1])
        ]  #condition names as defined in specify_model

    modelgen = pe.Node(interface=fsl.FEATModel(), name='modelgen')

    glm = pe.Node(interface=fsl.GLM(), name='glm', iterfield='design')
    glm.inputs.out_cope = "cope.nii.gz"
    glm.inputs.out_varcb_name = "varcb.nii.gz"
    #not setting a betas output file might lead to beta export in lieu of COPEs
    glm.inputs.out_file = "betas.nii.gz"
    glm.inputs.out_t_name = "t_stat.nii.gz"
    glm.inputs.out_p_name = "p_stat.nii.gz"
    if mask:
        glm.inputs.mask = mask

    cope_filename = pe.Node(
        name='cope_filename',
        interface=util.Function(
            function=sss_to_source,
            input_names=inspect.getargspec(sss_to_source)[0],
            output_names=['filename']))
    cope_filename.inputs.source_format = "sub-{0}_ses-{1}_trial-{2}_cope.nii.gz"
    varcb_filename = pe.Node(
        name='varcb_filename',
        interface=util.Function(
            function=sss_to_source,
            input_names=inspect.getargspec(sss_to_source)[0],
            output_names=['filename']))
    varcb_filename.inputs.source_format = "sub-{0}_ses-{1}_trial-{2}_varcb.nii.gz"
    tstat_filename = pe.Node(
        name='tstat_filename',
        interface=util.Function(
            function=sss_to_source,
            input_names=inspect.getargspec(sss_to_source)[0],
            output_names=['filename']))
    tstat_filename.inputs.source_format = "sub-{0}_ses-{1}_trial-{2}_tstat.nii.gz"
    zstat_filename = pe.Node(
        name='zstat_filename',
        interface=util.Function(
            function=sss_to_source,
            input_names=inspect.getargspec(sss_to_source)[0],
            output_names=['filename']))
    zstat_filename.inputs.source_format = "sub-{0}_ses-{1}_trial-{2}_zstat.nii.gz"
    pstat_filename = pe.Node(
        name='pstat_filename',
        interface=util.Function(
            function=sss_to_source,
            input_names=inspect.getargspec(sss_to_source)[0],
            output_names=['filename']))
    pstat_filename.inputs.source_format = "sub-{0}_ses-{1}_trial-{2}_pstat.nii.gz"
    pfstat_filename = pe.Node(
        name='pfstat_filename',
        interface=util.Function(
            function=sss_to_source,
            input_names=inspect.getargspec(sss_to_source)[0],
            output_names=['filename']))
    pfstat_filename.inputs.source_format = "sub-{0}_ses-{1}_trial-{2}_pfstat.nii.gz"

    datasink = pe.Node(nio.DataSink(), name='datasink')
    datasink.inputs.base_directory = path.join(l1_dir, workflow_name)
    datasink.inputs.parameterization = False

    workflow_connections = [
        (infosource, datafile_source, [('subject_session_scan',
                                        'subject_session_scan')]),
        (infosource, eventfile_source, [('subject_session_scan',
                                         'subject_session_scan')]),
        (eventfile_source, specify_model, [('out_file', 'event_files')]),
        (datafile_source, specify_model, [('out_file', 'functional_runs')]),
        (specify_model, level1design, [('session_info', 'session_info')]),
        (level1design, modelgen, [('ev_files', 'ev_files')]),
        (level1design, modelgen, [('fsf_files', 'fsf_file')]),
        (datafile_source, glm, [('out_file', 'in_file')]),
        (modelgen, glm, [('design_file', 'design')]),
        (modelgen, glm, [('con_file', 'contrasts')]),
        (infosource, datasink, [(('subject_session_scan', ss_to_path),
                                 'container')]),
        (infosource, cope_filename, [('subject_session_scan',
                                      'subject_session_scan')]),
        (infosource, varcb_filename, [('subject_session_scan',
                                       'subject_session_scan')]),
        (infosource, tstat_filename, [('subject_session_scan',
                                       'subject_session_scan')]),
        (infosource, zstat_filename, [('subject_session_scan',
                                       'subject_session_scan')]),
        (infosource, pstat_filename, [('subject_session_scan',
                                       'subject_session_scan')]),
        (infosource, pfstat_filename, [('subject_session_scan',
                                        'subject_session_scan')]),
        (cope_filename, glm, [('filename', 'out_cope')]),
        (varcb_filename, glm, [('filename', 'out_varcb_name')]),
        (tstat_filename, glm, [('filename', 'out_t_name')]),
        (zstat_filename, glm, [('filename', 'out_z_name')]),
        (pstat_filename, glm, [('filename', 'out_p_name')]),
        (pfstat_filename, glm, [('filename', 'out_pf_name')]),
        (glm, datasink, [('out_pf', '@pfstat')]),
        (glm, datasink, [('out_p', '@pstat')]),
        (glm, datasink, [('out_z', '@zstat')]),
        (glm, datasink, [('out_t', '@tstat')]),
        (glm, datasink, [('out_cope', '@cope')]),
        (glm, datasink, [('out_varcb', '@varcb')]),
    ]

    workdir_name = workflow_name + "_work"
    workflow = pe.Workflow(name=workdir_name)
    workflow.connect(workflow_connections)
    workflow.base_dir = l1_dir
    workflow.config = {
        "execution": {
            "crashdump_dir": path.join(l1_dir, "crashdump")
        }
    }
    workflow.write_graph(dotfilename=path.join(workflow.base_dir, workdir_name,
                                               "graph.dot"),
                         graph2use="hierarchical",
                         format="png")

    workflow.run(plugin="MultiProc", plugin_args={'n_procs': nprocs})
    if not keep_work:
        shutil.rmtree(path.join(l1_dir, workdir_name))
コード例 #12
0
def denoise(subject, sessions, data_dir, wd, sink, TR):

    #initiate min func preproc workflow
    wf = pe.Workflow(name='DENOISE_aCompCor')
    wf.base_dir = wd
    wf.config['execution']['crashdump_dir'] = wf.base_dir + "/crash_files"

    ## set fsl output type to nii.gz
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')

    # I/O nodes
    inputnode = pe.Node(util.IdentityInterface(fields=['subjid']),
                        name='inputnode')
    inputnode.inputs.subjid = subject

    ds = pe.Node(nio.DataSink(base_directory=sink, parameterization=False),
                 name='sink')

    #infosource to interate over sessions: COND, EXT1, EXT2
    sessions_infosource = pe.Node(util.IdentityInterface(fields=['session']),
                                  name='session')
    sessions_infosource.iterables = [('session', sessions)]

    #select files
    templates = {
        'prefiltered': 'MPP/{subject}/{session}/prefiltered_func_data.nii.gz',
        'prefiltered_detrend':
        'MPP/{subject}/{session}/prefiltered_func_data_detrend.nii.gz',
        'prefiltered_detrend_Tmean':
        'MPP/{subject}/{session}/QC/prefiltered_func_data_detrend_Tmean.nii.gz',
        'prefiltered_mask':
        'MPP/{subject}/{session}/prefiltered_func_data_mask.nii.gz',
        'WM_msk': 'MASKS/{subject}/aparc_asec.WMmask_ero2EPI.nii.gz',
        'CSF_msk': 'MASKS/{subject}/aparc_asec.CSFmask_ero0EPI.nii.gz',
        'motion_par': 'MPP/{subject}/{session}/MOCO/func_data_stc_moco.par'
    }

    selectfiles = pe.Node(nio.SelectFiles(templates, base_directory=data_dir),
                          name='selectfiles')

    wf.connect(inputnode, 'subjid', selectfiles, 'subject')
    wf.connect(sessions_infosource, 'session', selectfiles, 'session')
    wf.connect(sessions_infosource, 'session', ds, 'container')

    ##########################################################################
    ########################    START   ######################################
    ##########################################################################

    ###########################################################################
    ########################    No. 1  ######################################
    #the script outputs only std DVARS
    DVARS = pe.Node(util.Function(
        input_names=['in_file', 'in_mask', 'out_std_name'],
        output_names=['out_std', 'out_nstd', 'out_vx_std'],
        function=compute_dvars),
                    name='DVARS')

    DVARS.inputs.out_std_name = 'stdDVARS_pre.txt'
    wf.connect(selectfiles, 'prefiltered_detrend', DVARS, 'in_file')
    wf.connect(selectfiles, 'prefiltered_mask', DVARS, 'in_mask')
    wf.connect(DVARS, 'out_std', ds, 'QC.@DVARS')

    ###########################################################################
    ########################    No. 2   ######################################
    # DEMAN and DETREND the data, which are used to get nuisance regressors

    def run_demean_detrend(in_file):
        import nibabel as nb
        import numpy as np
        import os
        from scipy.signal import detrend

        img = nb.load(in_file)
        imgseries = img.get_data().astype(np.float32)
        imgseries_new = detrend(imgseries, type='linear')

        new = nb.nifti1.Nifti1Image(imgseries_new,
                                    header=img.get_header(),
                                    affine=img.get_affine())
        out_file = os.path.join(os.getcwd(),
                                'prefiltered_func_data_demean_detrend.nii.gz')
        new.to_filename(out_file)
        del imgseries, imgseries_new, new
        return out_file

    demean_detrend = pe.Node(util.Function(input_names=['in_file'],
                                           output_names=['out_file'],
                                           function=run_demean_detrend),
                             name='demean_detrend')

    wf.connect(selectfiles, 'prefiltered', demean_detrend, 'in_file')
    wf.connect(demean_detrend, 'out_file', ds, 'TEMP.@demean_detrend')
    ###########################################################################
    ########################    No. 3A   ######################################
    #PREPARE WM_CSF MASK

    WM_CSF_msk = pe.Node(fsl.BinaryMaths(operation='add'), name='wm_csf_msk')

    wf.connect(selectfiles, 'WM_msk', WM_CSF_msk, 'in_file')
    wf.connect(selectfiles, 'CSF_msk', WM_CSF_msk, 'operand_file')

    #take the coverage of the masks from functional data (essentially multiply by the mask from functional data)
    func_msk = pe.Node(fsl.BinaryMaths(operation='mul',
                                       out_file='WM_CSFmsk.nii.gz'),
                       name='func_masking')

    wf.connect(WM_CSF_msk, 'out_file', func_msk, 'in_file')
    wf.connect(selectfiles, 'prefiltered_mask', func_msk, 'operand_file')
    wf.connect(func_msk, 'out_file', ds, 'TEMP.@masks')

    ###########################################################################
    ########################    No. 3B   ######################################
    #PREPARE MOTION REGRESSSORS FRISTON 24 AND TRENDS

    friston24 = pe.Node(util.Function(input_names=['in_file'],
                                      output_names=['out_file'],
                                      function=calc_friston_twenty_four),
                        name='friston24')

    wf.connect(selectfiles, 'motion_par', friston24, 'in_file')
    wf.connect(friston24, 'out_file', ds, 'TEMP.@friston24')

    # linear and quadratic trends
    trends = pe.Node(util.Function(input_names=['nr_vols'],
                                   output_names=['out_file'],
                                   function=calc_trends),
                     name='trends')

    def get_nr_vols(in_file):
        import nibabel as nb
        img = nb.load(in_file)
        return img.shape[3]

    wf.connect(demean_detrend, ('out_file', get_nr_vols), trends, 'nr_vols')
    wf.connect(trends, 'out_file', ds, 'TEMP.@trends')

    ###########################################################################
    ########################    No. 3C   ######################################
    #aCOMP_COR
    aCompCor = pe.Node(util.Function(input_names=['in_file', 'in_mask'],
                                     output_names=['out_file'],
                                     function=calc_compcor),
                       name='aCompCor')

    wf.connect(demean_detrend, 'out_file', aCompCor, 'in_file')
    wf.connect(func_msk, 'out_file', aCompCor, 'in_mask')
    wf.connect(aCompCor, 'out_file', ds, 'TEMP.@aCompCor')

    ###########################################################################
    ########################    No. 4   ######################################
    #PREP the nuisance model

    #A is with Global Signal, and B is CompCor
    def mergetxt(filelist, fname):
        import pandas as pd
        import os
        for n, f in enumerate(filelist):
            if n == 0:
                data = pd.read_csv(f, header=None, sep='\t')
            else:
                data_new = pd.read_csv(f, header=None, sep='\t')
                data = pd.concat([data, data_new], axis=1)

        out_file = os.path.join(os.getcwd(), 'nuisance' + fname + '.mat')
        data.to_csv(out_file, index=False, header=None, sep='\t')
        return out_file

    merge_nuisance = pe.Node(util.Merge(3),
                             infields=['in1', 'in2', 'in3'],
                             name='merge_nuisance')

    wf.connect(aCompCor, 'out_file', merge_nuisance, 'in1')
    wf.connect(friston24, 'out_file', merge_nuisance, 'in2')
    wf.connect(trends, 'out_file', merge_nuisance, 'in3')

    nuisance_txt = pe.Node(util.Function(input_names=['filelist', 'fname'],
                                         output_names=['out_file'],
                                         function=mergetxt),
                           name='nuisance_txt')

    nuisance_txt.inputs.fname = '_model'
    wf.connect(merge_nuisance, 'out', nuisance_txt, 'filelist')
    wf.connect(nuisance_txt, 'out_file', ds, 'TEMP.@nuisance_txt')

    ###########################################################################
    ########################    No. 5   ######################################
    #run nuisance regression on prefiltered raw data

    regression = pe.Node(fsl.GLM(demean=True), name='regression')

    regression.inputs.out_res_name = 'residuals.nii.gz'
    regression.inputs.out_f_name = 'residuals_fstats.nii.gz'
    regression.inputs.out_pf_name = 'residuals_pstats.nii.gz'
    regression.inputs.out_z_name = 'residuals_zstats.nii.gz'

    wf.connect(nuisance_txt, 'out_file', regression, 'design')
    wf.connect(selectfiles, 'prefiltered', regression, 'in_file')
    wf.connect(selectfiles, 'prefiltered_mask', regression, 'mask')

    wf.connect(regression, 'out_f', ds, 'REGRESSION.@out_f_name')
    wf.connect(regression, 'out_pf', ds, 'REGRESSION.@out_pf_name')
    wf.connect(regression, 'out_z', ds, 'REGRESSION.@out_z_name')

    ########################   FIX HEADER TR AFTER FSL_GLM   #################
    fixhd = pe.Node(fsl.utils.CopyGeom(), name='fixhd')

    wf.connect(regression, 'out_res', fixhd, 'dest_file')
    wf.connect(selectfiles, 'prefiltered', fixhd, 'in_file')
    wf.connect(fixhd, 'out_file', ds, 'REGRESSION.@res_out')

    ###########################################################################
    ########################    No. 6   ######################################
    #apply HP FILTER of 0.01Hz
    #100/1.96/2 = 25.51
    hp_filter = pe.Node(fsl.maths.TemporalFilter(
        highpass_sigma=25.51, out_file='residuals_hp01.nii.gz'),
                        name='highpass')

    wf.connect(fixhd, 'out_file', hp_filter, 'in_file')
    wf.connect(hp_filter, 'out_file', ds, 'TEMP.@hp')

    #add the mean back for smoothing
    addmean = pe.Node(fsl.BinaryMaths(
        operation='add', out_file='filtered_func_data_hp01.nii.gz'),
                      name='addmean')

    wf.connect(hp_filter, 'out_file', addmean, 'in_file')
    wf.connect(selectfiles, 'prefiltered_detrend_Tmean', addmean,
               'operand_file')
    wf.connect(addmean, 'out_file', ds, '@out')

    ###########################################################################
    ########################    No. 7   ######################################
    ## COMPUTE POST DVARS
    DVARSpost = pe.Node(util.Function(
        input_names=['in_file', 'in_mask', 'out_std_name'],
        output_names=['out_std', 'out_nstd', 'out_vx_std'],
        function=compute_dvars),
                        name='DVARSpost')

    DVARSpost.inputs.out_std_name = 'stdDVARS_post.txt'

    wf.connect(addmean, 'out_file', DVARSpost, 'in_file')
    wf.connect(selectfiles, 'prefiltered_mask', DVARSpost, 'in_mask')
    wf.connect(DVARSpost, 'out_std', ds, 'QC.@DVARSpost')

    ###########################################################################
    ########################    No. 8   ######################################
    #SMOOTHING of 6fwhm

    merge_datasets = pe.Node(util.Merge(2),
                             infields=['in1', 'in2'],
                             name='merge_datasets')

    wf.connect(addmean, 'out_file', merge_datasets, 'in1')
    wf.connect(selectfiles, 'prefiltered_detrend', merge_datasets, 'in2')

    median = pe.MapNode(fsl.utils.ImageStats(op_string='-k %s -p 50'),
                        name='median',
                        iterfield=['in_file'])

    wf.connect(merge_datasets, 'out', median, 'in_file')
    wf.connect(selectfiles, 'prefiltered_mask', median, 'mask_file')

    smooth = pe.MapNode(
        fsl.SUSAN(fwhm=6.0),
        name='smooth',
        iterfield=['in_file', 'brightness_threshold', 'usans', 'out_file'])
    smooth.inputs.out_file = [
        'filtered_func_data_hp01_sm6fwhm.nii.gz',
        'prefiltered_func_data_detrend_sm6fwhm.nii.gz'
    ]

    merge_usans = pe.MapNode(util.Merge(2),
                             infields=['in1', 'in2'],
                             name='merge_usans',
                             iterfield=['in2'])

    wf.connect(selectfiles, 'prefiltered_detrend_Tmean', merge_usans, 'in1')
    wf.connect(median, 'out_stat', merge_usans, 'in2')

    def getbtthresh(medianvals):
        return [0.75 * val for val in medianvals]

    def getusans(x):
        return [[tuple([val[0], 0.75 * val[1]])] for val in x]

    wf.connect(merge_datasets, 'out', smooth, 'in_file')
    wf.connect(median, ('out_stat', getbtthresh), smooth,
               'brightness_threshold')
    wf.connect(merge_usans, ('out', getusans), smooth, 'usans')
    wf.connect(smooth, 'smoothed_file', ds, '@smoothout')

    ###########################################################################
    ########################    RUN   ######################################
    wf.write_graph(dotfilename='wf.dot',
                   graph2use='colored',
                   format='pdf',
                   simple_form=True)
    wf.run(plugin='MultiProc', plugin_args={'n_procs': 2})
    #wf.run()
    return
コード例 #13
0
def init_dualregression_wf(analysis, memcalc=MemoryCalculator()):
    """
    create a workflow to calculate dual regression for ICA seeds
    """
    assert isinstance(analysis, Analysis)
    assert isinstance(analysis.tags, Tags)

    # make bold file variant specification
    confoundsfilefields = []
    varianttupls = [("space", analysis.tags.space)]
    if analysis.tags.grand_mean_scaled is not None:
        assert isinstance(analysis.tags.grand_mean_scaled, GrandMeanScaledTag)
        varianttupls.append(analysis.tags.grand_mean_scaled.as_tupl())
    if analysis.tags.band_pass_filtered is not None:
        assert isinstance(analysis.tags.band_pass_filtered,
                          BandPassFilteredTag)
        varianttupls.append(analysis.tags.band_pass_filtered.as_tupl())
    if analysis.tags.confounds_removed is not None:
        assert isinstance(analysis.tags.confounds_removed, ConfoundsRemovedTag)
        confounds_removed_names = tuple(
            name for name in analysis.tags.confounds_removed.names
            if "aroma_motion" in name)
        varianttupls.append(("confounds_removed", confounds_removed_names))
        confounds_extract_names = tuple(
            name for name in analysis.tags.confounds_removed.names
            if "aroma_motion" not in name)
        if len(confounds_extract_names) > 0:
            confoundsfilefields.append("confounds_file")
            varianttupls.append(("confounds_extract", confounds_extract_names))
    if analysis.tags.smoothed is not None:
        assert isinstance(analysis.tags.smoothed, SmoothedTag)
        varianttupls.append(analysis.tags.smoothed.as_tupl())

    boldfilevariant = (("bold_file", *confoundsfilefields),
                       tuple(varianttupls))

    assert analysis.name is not None
    workflow = pe.Workflow(name=analysis.name)

    # input
    inputnode = pe.Node(
        niu.IdentityInterface(fields=[
            "bold_file",
            *confoundsfilefields,
            "mask_file",
            "map_files",
            "map_components",
            "metadata",
        ]),
        name="inputnode",
    )

    resampleifneeded = pe.MapNode(
        interface=ResampleIfNeeded(method="continuous"),
        name="resampleifneeded",
        iterfield=["in_file"],
        mem_gb=memcalc.series_std_gb,
    )
    workflow.connect(inputnode, "map_files", resampleifneeded, "in_file")
    workflow.connect(inputnode, "bold_file", resampleifneeded, "ref_file")

    # Delete zero voxels for mean time series
    applymask = pe.MapNode(
        interface=fsl.ApplyMask(),
        name="applymask",
        iterfield="in_file",
        mem_gb=memcalc.volume_std_gb,
    )
    workflow.connect([
        (inputnode, applymask, [("mask_file", "mask_file")]),
        (resampleifneeded, applymask, [("out_file", "in_file")]),
    ])

    # first step, calculate spatial regression of ICA components on to the
    # bold file
    glm0 = pe.MapNode(
        interface=fsl.GLM(
            out_file="beta",
            demean=True,
        ),
        name="glm0",
        iterfield="design",
        mem_gb=memcalc.series_std_gb * 10,
    )
    workflow.connect([
        (applymask, glm0, [("out_file", "design")]),
        (inputnode, glm0, [("bold_file", "in_file"), ("mask_file", "mask")]),
    ])

    # second step, calculate the temporal regression of the time series
    # from the first step on to the bold file
    def make_contrastmat(map_file=None, confounds_file=None):
        """
        extract number of ICA components from 4d image and name them
        """
        import os
        from os import path as op

        from pipeline.utils import nvol, ncol
        import numpy as np

        ncomponents = nvol(map_file)
        if confounds_file is not None:
            nconfounds = ncol(confounds_file)
        else:
            nconfounds = 0
        contrastmat = np.zeros((ncomponents, ncomponents + nconfounds))
        contrastmat[:ncomponents, :ncomponents] = np.eye(ncomponents)

        out_file = op.join(os.getcwd(), "contrasts.tsv")
        np.savetxt(out_file, contrastmat, delimiter="\t")
        return out_file

    contrastmat = pe.MapNode(
        interface=niu.Function(
            input_names=["map_file", *confoundsfilefields],
            output_names=["out_file"],
            function=make_contrastmat,
        ),
        iterfield="map_file",
        name="contrastmat",
    )
    workflow.connect([(inputnode, contrastmat, [("map_files", "map_file")])])
    if confoundsfilefields:
        workflow.connect([(inputnode, contrastmat, [(*confoundsfilefields,
                                                     *confoundsfilefields)])])

    designnode = pe.Node(niu.IdentityInterface(fields=["design"]),
                         name="designnode")
    if confoundsfilefields:
        mergecolumns = pe.MapNode(
            interface=MergeColumnsTSV(2),
            name="mergecolumns",
            mem_gb=memcalc.min_gb,
            iterfield="in1",
            run_without_submitting=True,
        )
        workflow.connect([
            (glm0, mergecolumns, [("out_file", "in1")]),
            (inputnode, mergecolumns, [(*confoundsfilefields, "in2")]),
            (mergecolumns, designnode, [("out_file", "design")]),
        ])
    else:
        workflow.connect([(glm0, designnode, [("out_file", "design")])])

    glm1 = pe.MapNode(
        interface=fsl.GLM(
            out_file="beta.nii.gz",
            out_cope="cope.nii.gz",
            out_varcb_name="varcope.nii.gz",
            out_z_name="zstat.nii.gz",
            demean=True,
        ),
        name="glm1",
        iterfield=["design", "contrasts"],
        mem_gb=memcalc.series_std_gb * 10,
    )
    workflow.connect([
        (inputnode, glm1, [("bold_file", "in_file"), ("mask_file", "mask")]),
        (contrastmat, glm1, [("out_file", "contrasts")]),
        (designnode, glm1, [("design", "design")]),
    ])

    splitcopesimages = pe.MapNode(interface=fsl.Split(dimension="t"),
                                  iterfield="in_file",
                                  name="splitcopesimages")
    splitvarcopesimage = pe.MapNode(interface=fsl.Split(dimension="t"),
                                    iterfield="in_file",
                                    name="splitvarcopesimage")
    splitzstatsimage = pe.MapNode(interface=fsl.Split(dimension="t"),
                                  iterfield="in_file",
                                  name="splitzstatsimage")
    workflow.connect([
        (glm1, splitcopesimages, [("out_cope", "in_file")]),
        (glm1, splitvarcopesimage, [("out_varcb", "in_file")]),
        (glm1, splitzstatsimage, [("out_z", "in_file")]),
    ])

    # make dof volume
    makedofvolume = pe.MapNode(
        interface=MakeDofVolume(),
        iterfield=["design"],
        name="makedofvolume",
    )
    workflow.connect([
        (inputnode, makedofvolume, [("bold_file", "bold_file")]),
        (designnode, makedofvolume, [("design", "design")]),
    ])

    # output

    outputnode = pe.Node(
        interface=MakeResultdicts(keys=[
            "firstlevelanalysisname",
            "firstlevelfeaturename",
            "cope",
            "varcope",
            "zstat",
            "dof_file",
            "mask_file",
        ]),
        name="outputnode",
    )
    outputnode.inputs.firstlevelanalysisname = analysis.name
    workflow.connect([
        (
            inputnode,
            outputnode,
            [
                ("metadata", "basedict"),
                ("mask_file", "mask_file"),
                (("map_components", ravel), "firstlevelfeaturename"),
            ],
        ),
        (makedofvolume, outputnode, [("out_file", "dof_file")]),
        (splitcopesimages, outputnode, [(("out_files", ravel), "cope")]),
        (splitvarcopesimage, outputnode, [(("out_files", ravel), "varcope")]),
        (splitzstatsimage, outputnode, [(("out_files", ravel), "zstat")]),
    ])

    return workflow, (boldfilevariant, )
コード例 #14
0
# prepare the nuisance factors
df = pd.concat([
    dread['X'], dread['Y'], dread['Z'], dread['RotX'], dread['RotY'],
    dread['RotZ'], dread['FramewiseDisplacement'], dread['aCompCor00'],
    dread['aCompCor01'], dread['aCompCor02'], dread['aCompCor03'],
    dread['aCompCor04'], dread['aCompCor05']
],
               axis=1)

design_out = conf_fil[:-4] + '_small.csv'
print(design_out)

df.to_csv(design_out, sep='\t', index=False, header=False)

# regress out nuisance factors using fls_glm
glm = fsl.GLM(in_file=img_removed,
              mask=img_mask,
              design=design_out,
              demean=True,
              out_res_name=img_removed[:-7] + '_denois.nii.gz',
              output_type='NIFTI_GZ')
glm.run()

# detrend timeseries using afni-3dDetrend
detrend = afni.Detrend()
detrend.inputs.in_file = img_removed[:-7] + '_denois.nii.gz'
detrend.inputs.args = '-polort 2'
detrend.inputs.outputtype = 'NIFTI_GZ'
detrend.inputs.out_file = img_removed[:-7] + '_denois_detrend.nii.gz'
print(detrend.cmdline)
detrend.run()
コード例 #15
0
def seed_fc(
    preprocessing_dir,
    exclude={},
    habituation='confound',
    highpass_sigma=225,
    lowpass_sigma=False,
    include={},
    keep_work=False,
    out_dir="",
    mask="",
    match_regex='sub-(?P<sub>[a-zA-Z0-9]+)/ses-(?P<ses>[a-zA-Z0-9]+)/func/.*?_task-(?P<task>[a-zA-Z0-9]+)_acq-(?P<acq>[a-zA-Z0-9]+)_(?P<mod>[a-zA-Z0-9]+)\.(?:tsv|nii|nii\.gz)',
    nprocs=N_PROCS,
    tr=1,
    workflow_name="generic",
    modality="cbv",
):
    """Calculate subject level seed-based functional connectivity via the `fsl_glm` command.

	Parameters
	----------

	exclude : dict
		A dictionary with any combination of "sessions", "subjects", "tasks" as keys and corresponding identifiers as values.
		If this is specified matching entries will be excluded in the analysis.
	habituation : {"", "confound", "separate_contrast", "in_main_contrast"}, optional
		How the habituation regressor should be handled.
		Anything which evaluates as False (though we recommend "") means no habituation regressor will be introduced.
	highpass_sigma : int, optional
		Highpass threshold (in seconds).
	include : dict
		A dictionary with any combination of "sessions", "subjects", "tasks" as keys and corresponding identifiers as values.
		If this is specified only matching entries will be included in the analysis.
	keep_work : bool, optional
		Whether to keep the work directory (containing all the intermediary workflow steps, as managed by nipypye).
		This is useful for debugging and quality control.
	out_dir : str, optional
		Path to the directory inside which both the working directory and the output directory will be created.
	mask : str, optional
		Path to the brain mask which shall be used to define the brain volume in the analysis.
		This has to point to an existing NIfTI file containing zero and one values only.
	match_regex : str, optional
		Regex matching pattern by which to select input files. Has to contain groups named "sub", "ses", "acq", "task", and "mod".
	n_procs : int, optional
		Maximum number of processes which to simultaneously spawn for the workflow.
		If not explicitly defined, this is automatically calculated from the number of available cores and under the assumption that the workflow will be the main process running for the duration that it is running.
	tr : int, optional
		Repetition time, in seconds.
	workflow_name : str, optional
		Name of the workflow; this will also be the name of the final output directory produced under `out_dir`.
	"""

    preprocessing_dir = path.abspath(path.expanduser(preprocessing_dir))
    if not out_dir:
        out_dir = path.join(bids_base, 'l1')
    else:
        out_dir = path.abspath(path.expanduser(out_dir))

    datafind = nio.DataFinder()
    datafind.inputs.root_paths = preprocessing_dir
    datafind.inputs.match_regex = match_regex
    datafind_res = datafind.run()
    out_paths = [
        path.abspath(path.expanduser(i))
        for i in datafind_res.outputs.out_paths
    ]
    data_selection = zip(*[
        datafind_res.outputs.sub, datafind_res.outputs.ses,
        datafind_res.outputs.acq, datafind_res.outputs.task,
        datafind_res.outputs.mod, out_paths
    ])
    data_selection = [list(i) for i in data_selection]
    data_selection = pd.DataFrame(data_selection,
                                  columns=('subject', 'session', 'acquisition',
                                           'task', 'modality', 'path'))
    if exclude:
        for key in exclude:
            data_selection = data_selection[~data_selection[key].
                                            isin(exclude[key])]
    if include:
        for key in include:
            data_selection = data_selection[data_selection[key].isin(
                include[key])]
    bids_dictionary = data_selection[
        data_selection['modality'] ==
        modality].drop_duplicates().T.to_dict().values()

    infosource = pe.Node(
        interface=util.IdentityInterface(fields=['bids_dictionary']),
        name="infosource")
    infosource.iterables = [('bids_dictionary', bids_dictionary)]

    datafile_source = pe.Node(
        name='datafile_source',
        interface=util.Function(
            function=select_from_datafind_df,
            input_names=inspect.getargspec(select_from_datafind_df)[0],
            output_names=['out_file']))
    datafile_source.inputs.bids_dictionary_override = {'modality': modality}
    datafile_source.inputs.df = data_selection

    seed_timecourse = pe.Node(
        name='seed_timecourse',
        interface=util.Function(
            function=select_from_datafind_df,
            input_names=inspect.getargspec(select_from_datafind_df)[0],
            output_names=['out_file']))

    specify_model = pe.Node(interface=SpecifyModel(), name="specify_model")
    specify_model.inputs.input_units = 'secs'
    specify_model.inputs.time_repetition = tr
    specify_model.inputs.high_pass_filter_cutoff = highpass_sigma
    specify_model.inputs.habituation_regressor = bool(habituation)

    level1design = pe.Node(interface=Level1Design(), name="level1design")
    level1design.inputs.interscan_interval = tr
    if bf_path:
        bf_path = path.abspath(path.expanduser(bf_path))
        level1design.inputs.bases = {"custom": {"bfcustompath": bf_path}}
    # level1design.inputs.bases = {'gamma': {'derivs':False, 'gammasigma':10, 'gammadelay':5}}
    level1design.inputs.orthogonalization = {
        1: {
            0: 0,
            1: 0,
            2: 0
        },
        2: {
            0: 1,
            1: 1,
            2: 0
        }
    }
    level1design.inputs.model_serial_correlations = True
    if habituation == "separate_contrast":
        level1design.inputs.contrasts = [
            ('allStim', 'T', ["e0"], [1]), ('allStim', 'T', ["e1"], [1])
        ]  #condition names as defined in specify_model
    elif habituation == "in_main_contrast":
        level1design.inputs.contrasts = [
            ('allStim', 'T', ["e0", "e1"], [1, 1])
        ]  #condition names as defined in specify_model
    elif habituation == "confound" or not habituation:
        level1design.inputs.contrasts = [
            ('allStim', 'T', ["e0"], [1])
        ]  #condition names as defined in specify_model
    else:
        raise ValueError(
            'The value you have provided for the `habituation` parameter, namely "{}", is invalid. Please choose one of: {"confound","in_main_contrast","separate_contrast"}'
            .format(habituation))

    modelgen = pe.Node(interface=fsl.FEATModel(), name='modelgen')
    modelgen.inputs.ignore_exception = True

    glm = pe.Node(interface=fsl.GLM(), name='glm', iterfield='design')
    glm.inputs.out_cope = "cope.nii.gz"
    glm.inputs.out_varcb_name = "varcb.nii.gz"
    #not setting a betas output file might lead to beta export in lieu of COPEs
    glm.inputs.out_file = "betas.nii.gz"
    glm.inputs.out_t_name = "t_stat.nii.gz"
    glm.inputs.out_p_name = "p_stat.nii.gz"
    if mask:
        glm.inputs.mask = path.abspath(path.expanduser(mask))
    glm.inputs.ignore_exception = True

    cope_filename = pe.Node(
        name='cope_filename',
        interface=util.Function(
            function=bids_dict_to_source,
            input_names=inspect.getargspec(bids_dict_to_source)[0],
            output_names=['filename']))
    cope_filename.inputs.source_format = "sub-{subject}_ses-{session}_task-{task}_acq-{acquisition}_{modality}_cope.nii.gz"
    varcb_filename = pe.Node(
        name='varcb_filename',
        interface=util.Function(
            function=bids_dict_to_source,
            input_names=inspect.getargspec(bids_dict_to_source)[0],
            output_names=['filename']))
    varcb_filename.inputs.source_format = "sub-{subject}_ses-{session}_task-{task}_acq-{acquisition}_{modality}_varcb.nii.gz"
    tstat_filename = pe.Node(
        name='tstat_filename',
        interface=util.Function(
            function=bids_dict_to_source,
            input_names=inspect.getargspec(bids_dict_to_source)[0],
            output_names=['filename']))
    tstat_filename.inputs.source_format = "sub-{subject}_ses-{session}_task-{task}_acq-{acquisition}_{modality}_tstat.nii.gz"
    zstat_filename = pe.Node(
        name='zstat_filename',
        interface=util.Function(
            function=bids_dict_to_source,
            input_names=inspect.getargspec(bids_dict_to_source)[0],
            output_names=['filename']))
    zstat_filename.inputs.source_format = "sub-{subject}_ses-{session}_task-{task}_acq-{acquisition}_{modality}_zstat.nii.gz"
    pstat_filename = pe.Node(
        name='pstat_filename',
        interface=util.Function(
            function=bids_dict_to_source,
            input_names=inspect.getargspec(bids_dict_to_source)[0],
            output_names=['filename']))
    pstat_filename.inputs.source_format = "sub-{subject}_ses-{session}_task-{task}_acq-{acquisition}_{modality}_pstat.nii.gz"
    pfstat_filename = pe.Node(
        name='pfstat_filename',
        interface=util.Function(
            function=bids_dict_to_source,
            input_names=inspect.getargspec(bids_dict_to_source)[0],
            output_names=['filename']))
    pfstat_filename.inputs.source_format = "sub-{subject}_ses-{session}_task-{task}_acq-{acquisition}_{modality}_pfstat.nii.gz"

    datasink = pe.Node(nio.DataSink(), name='datasink')
    datasink.inputs.base_directory = path.join(out_dir, workflow_name)
    datasink.inputs.parameterization = False

    workflow_connections = [
        (infosource, datafile_source, [('bids_dictionary', 'bids_dictionary')
                                       ]),
        (infosource, eventfile_source, [('bids_dictionary', 'bids_dictionary')
                                        ]),
        (eventfile_source, specify_model, [('out_file', 'event_files')]),
        (specify_model, level1design, [('session_info', 'session_info')]),
        (level1design, modelgen, [('ev_files', 'ev_files')]),
        (level1design, modelgen, [('fsf_files', 'fsf_file')]),
        (modelgen, glm, [('design_file', 'design')]),
        (modelgen, glm, [('con_file', 'contrasts')]),
        (infosource, datasink, [(('bids_dictionary', bids_dict_to_dir),
                                 'container')]),
        (infosource, cope_filename, [('bids_dictionary', 'bids_dictionary')]),
        (infosource, varcb_filename, [('bids_dictionary', 'bids_dictionary')]),
        (infosource, tstat_filename, [('bids_dictionary', 'bids_dictionary')]),
        (infosource, zstat_filename, [('bids_dictionary', 'bids_dictionary')]),
        (infosource, pstat_filename, [('bids_dictionary', 'bids_dictionary')]),
        (infosource, pfstat_filename, [('bids_dictionary', 'bids_dictionary')
                                       ]),
        (cope_filename, glm, [('filename', 'out_cope')]),
        (varcb_filename, glm, [('filename', 'out_varcb_name')]),
        (tstat_filename, glm, [('filename', 'out_t_name')]),
        (zstat_filename, glm, [('filename', 'out_z_name')]),
        (pstat_filename, glm, [('filename', 'out_p_name')]),
        (pfstat_filename, glm, [('filename', 'out_pf_name')]),
        (glm, datasink, [('out_pf', '@pfstat')]),
        (glm, datasink, [('out_p', '@pstat')]),
        (glm, datasink, [('out_z', '@zstat')]),
        (glm, datasink, [('out_t', '@tstat')]),
        (glm, datasink, [('out_cope', '@cope')]),
        (glm, datasink, [('out_varcb', '@varcb')]),
    ]

    if highpass_sigma or lowpass_sigma:
        bandpass = pe.Node(interface=fsl.maths.TemporalFilter(),
                           name="bandpass")
        bandpass.inputs.highpass_sigma = highpass_sigma
        if lowpass_sigma:
            bandpass.inputs.lowpass_sigma = lowpass_sigma
        else:
            bandpass.inputs.lowpass_sigma = tr
        workflow_connections.extend([
            (datafile_source, bandpass, [('out_file', 'in_file')]),
            (bandpass, specify_model, [('out_file', 'functional_runs')]),
            (bandpass, glm, [('out_file', 'in_file')]),
        ])
    else:
        workflow_connections.extend([
            (datafile_source, specify_model, [('out_file', 'functional_runs')
                                              ]),
            (datafile_source, glm, [('out_file', 'in_file')]),
        ])

    workdir_name = workflow_name + "_work"
    workflow = pe.Workflow(name=workdir_name)
    workflow.connect(workflow_connections)
    workflow.base_dir = out_dir
    workflow.config = {
        "execution": {
            "crashdump_dir": path.join(out_dir, "crashdump")
        }
    }
    workflow.write_graph(dotfilename=path.join(workflow.base_dir, workdir_name,
                                               "graph.dot"),
                         graph2use="hierarchical",
                         format="png")

    workflow.run(plugin="MultiProc", plugin_args={'n_procs': nprocs})
    if not keep_work:
        shutil.rmtree(path.join(out_dir, workdir_name))
コード例 #16
0
def create_denoise_pipeline(name='denoise'):
    # workflow
    denoise = Workflow(name='denoise')
    # Define nodes
    inputnode = Node(interface=util.IdentityInterface(fields=[
        'anat_brain', 'brain_mask', 'flirt_mat', 'unwarped_mean', 'epi_coreg',
        'highpass_sigma', 'tr'
    ]),
                     name='inputnode')
    outputnode = Node(interface=util.IdentityInterface(fields=[
        'wmcsf_mask', 'brain2epi', 'wmcsf_mask2epi', 'combined_motion',
        'comp_regressor', 'comp_F', 'comp_pF', 'out_betas', 'ts_fullspectrum',
        'normalized_file'
    ]),
                      name='outputnode')
    # run fast to get tissue probability classes
    fast = Node(fsl.FAST(), name='fast')
    denoise.connect([(inputnode, fast, [('anat_brain', 'in_files')])])

    # functions to select tissue classes
    def selectindex(files, idx):
        import numpy as np
        from nipype.utils.filemanip import filename_to_list, list_to_filename
        return list_to_filename(
            np.array(filename_to_list(files))[idx].tolist())

    def selectsingle(files, idx):
        return files[idx]

    # binarize tissue classes
    binarize_tissue = MapNode(
        fsl.ImageMaths(op_string='-nan -thr 0.99 -ero -bin'),
        iterfield=['in_file'],
        name='binarize_tissue')
    denoise.connect([
        (fast, binarize_tissue, [(('partial_volume_files', selectindex,
                                   [0, 2]), 'in_file')]),
    ])
    # combine tissue classes to noise mask
    wmcsf_mask = Node(fsl.BinaryMaths(operation='add',
                                      out_file='wmcsf_mask.nii'),
                      name='wmcsf_mask')
    denoise.connect([(binarize_tissue, wmcsf_mask,
                      [(('out_file', selectsingle, 0), 'in_file'),
                       (('out_file', selectsingle, 1), 'operand_file')]),
                     (wmcsf_mask, outputnode, [('out_file', 'wmcsf_mask')])])

    # project wm_csf mask from anatomical to original epi space using inverse FLIRT-matrix
    invmat = Node(fsl.ConvertXFM(), name='invmat')
    invmat.inputs.invert_xfm = True

    apply_inv = Node(fsl.ApplyXfm(), name='apply_inv')
    apply_inv.inputs.apply_xfm = True
    denoise.connect([(inputnode, invmat, [('flirt_mat', 'in_file')]),
                     (invmat, apply_inv, [('out_file', 'in_matrix_file')]),
                     (inputnode, apply_inv, [('unwarped_mean', 'reference')]),
                     (wmcsf_mask, apply_inv, [('out_file', 'in_file')]),
                     (apply_inv, outputnode, [('out_file', 'wmcsf_mask2epi')])
                     ])
    #project brain to epi space as a checkup
    apply_inv_brain = Node(fsl.ApplyXfm(), name='apply_inv_brain')
    apply_inv_brain.inputs.apply_xfm = True
    denoise.connect([
        (invmat, apply_inv_brain, [('out_file', 'in_matrix_file')]),
        (inputnode, apply_inv_brain, [('unwarped_mean', 'reference')]),
        (inputnode, apply_inv_brain, [('anat_brain', 'in_file')]),
        (apply_inv_brain, outputnode, [('out_file', 'brain2epi')])
    ])

    #no artifact detection and motion regression done because of AROMA

    # create filter with compcor components
    createfilter2 = Node(util.Function(input_names=[
        'realigned_file', 'mask_file', 'num_components', 'extra_regressors'
    ],
                                       output_names=['out_files'],
                                       function=extract_noise_components),
                         name='makecompcorfilter')
    createfilter2.inputs.num_components = 6
    createfilter2.inputs.extra_regressors = None
    createfilter2.plugin_args = {'submit_specs': 'request_memory = 17000'}
    denoise.connect([
        (inputnode, createfilter2, [('epi_coreg', 'realigned_file')]),
        (apply_inv, createfilter2, [('out_file', 'mask_file')]),
        (createfilter2, outputnode, [('out_files', 'comp_regressor')]),
    ])
    # regress compcor and other noise components
    filter2 = Node(fsl.GLM(out_f_name='F_noise.nii.gz',
                           out_pf_name='pF_noise.nii.gz',
                           out_res_name='rest2anat_denoised.nii.gz',
                           output_type='NIFTI_GZ',
                           demean=True),
                   name='filternoise')
    filter2.plugin_args = {'submit_specs': 'request_memory = 17000'}
    denoise.connect([(inputnode, filter2, [('epi_coreg', 'in_file')]),
                     (createfilter2, filter2, [('out_files', 'design')]),
                     (inputnode, filter2, [('brain_mask', 'mask')]),
                     (filter2, outputnode, [('out_f', 'comp_F'),
                                            ('out_pf', 'comp_pF'),
                                            ('out_file', 'out_betas')])])

    # write TR into header again (glms remove it)
    # do not use mri_convert interface as it has a bug (already fixed in niyppe master)
    fix_tr = Node(util.Function(input_names=['in_file', 'TR_sec'],
                                output_names=['out_file'],
                                function=fix_TR_fs),
                  name='fix_tr')
    denoise.connect(inputnode, 'tr', fix_tr, 'TR_sec')
    denoise.connect(filter2, 'out_res', fix_tr, 'in_file')

    #use only highpass filter (because high-frequency content (otherwise filtered by lowpass is already considered in AROMA))
    highpass_filter = Node(
        fsl.TemporalFilter(out_file='rest_denoised_highpassed.nii'),
        name='highpass_filter')
    highpass_filter.plugin_args = {'submit_specs': 'request_memory = 17000'}
    denoise.connect([(inputnode, highpass_filter, [('highpass_sigma',
                                                    'highpass_sigma')]),
                     (fix_tr, highpass_filter, [('out_file', 'in_file')]),
                     (fix_tr, outputnode, [('out_file', 'ts_fullspectrum')])])

    # time-normalize scans (could be set to percent change etc.  but here NO normalization is used
    #                 http://nipy.org/nitime/api/generated/nitime.fmri.io.html)
    normalize_time = Node(util.Function(input_names=['in_file', 'tr'],
                                        output_names=['out_file'],
                                        function=time_normalizer),
                          name='normalize_time')
    normalize_time.plugin_args = {'submit_specs': 'request_memory = 17000'}
    denoise.connect([
        (inputnode, normalize_time, [('tr', 'tr')]),
        (highpass_filter, normalize_time, [('out_file', 'in_file')]),
        (normalize_time, outputnode, [('out_file', 'normalized_file')])
    ])
    return denoise
コード例 #17
0
 def dr_stage2(self, **kwargs):
     regress_moco = True
     desnorm = True
     for i in kwargs.keys():
         if i == 'regress_moco':
             if kwargs[i]:
                 regress_moco = True
             else:
                 regress_moco = False
         if i == 'desnorm':
             if kwargs[i]:
                 desnorm = True
             else:
                 desnorm = False
     for subj in self.subjects:
         dr_s1_txt = os.path.join(self.outdir, 'stage1',
                                  'dr_stage1_idc_' + subj + '.txt')
         moco_txt = os.path.join(os.path.dirname(self.indir), subj,
                                 'idc_' + subj + self.featdir_sfix, 'mc',
                                 'prefiltered_func_data_mcf.par')
         dr_s1 = read_txt_file(dr_s1_txt)
         if regress_moco:
             moco_pars = read_txt_file(moco_txt)
             for i in range(0, len(dr_s1)):
                 for j in range(0, 6):
                     dr_s1[i].append(moco_pars[i][j])
             dr_s1_moco_txt = os.path.join(
                 self.outdir, 'stage1',
                 'dr_stage1_moco_idc_' + subj + '.txt')
             write_txt_file(dr_s1, dr_s1_moco_txt)
             designFile = dr_s1_moco_txt
         else:
             designFile = dr_s1_txt
         iFile = os.path.join(os.path.dirname(self.indir), subj,
                              'idc_' + subj + self.featdir_sfix,
                              self.ff_data_name)
         oFile = os.path.join(self.outdir, 'stage2',
                              'dr_stage2_idc_' + subj + '.nii.gz')
         ozFile = os.path.join(self.outdir, 'stage2',
                               'dr_stage2_Z_idc_' + subj + '.nii.gz')
         mask = os.path.join(self.outdir, 'stage1', 'mask.nii.gz')
         if desnorm:
             opts_str = '--demean --des_norm'
         else:
             opst_str = '--demean'
         fsl_glm = fsl.GLM(in_file=iFile,
                           design=designFile,
                           terminal_output='stream',
                           out_file=oFile,
                           mask=mask,
                           options=opts_str,
                           output_type='NIFTI_GZ')
         fsl_glm.run()
         obname = os.path.join(self.outdir, 'stage2',
                               'dr_stage2_idc_' + subj + '_ic')
         fslsplit = fsl.Split(dimension='t',
                              in_file=oFile,
                              out_base_name=obname,
                              terminal_output='stream',
                              output_type='NIFTI_GZ')
         fslsplit.run()
コード例 #18
0
def init_seedbasedconnectivity_wf(analysis, memcalc=MemoryCalculator()):
    """
    create workflow to calculate seed connectivity maps
    """
    assert isinstance(analysis, Analysis)
    assert isinstance(analysis.tags, Tags)

    # make bold file variant specification
    confoundsfilefields = []
    varianttupls = [("space", analysis.tags.space)]
    if analysis.tags.grand_mean_scaled is not None:
        assert isinstance(analysis.tags.grand_mean_scaled, GrandMeanScaledTag)
        varianttupls.append(analysis.tags.grand_mean_scaled.as_tupl())
    if analysis.tags.band_pass_filtered is not None:
        assert isinstance(analysis.tags.band_pass_filtered,
                          BandPassFilteredTag)
        varianttupls.append(analysis.tags.band_pass_filtered.as_tupl())
    if analysis.tags.confounds_removed is not None:
        assert isinstance(analysis.tags.confounds_removed, ConfoundsRemovedTag)
        confounds_removed_names = tuple(
            name for name in analysis.tags.confounds_removed.names
            if "aroma_motion" in name)
        varianttupls.append(("confounds_removed", confounds_removed_names))
        confounds_extract_names = tuple(
            name for name in analysis.tags.confounds_removed.names
            if "aroma_motion" not in name)
        if len(confounds_extract_names) > 0:
            confoundsfilefields.append("confounds_file")
            varianttupls.append(("confounds_extract", confounds_extract_names))
    if analysis.tags.smoothed is not None:
        assert isinstance(analysis.tags.smoothed, SmoothedTag)
        varianttupls.append(analysis.tags.smoothed.as_tupl())

    boldfilevariant = (("bold_file", *confoundsfilefields),
                       tuple(varianttupls))

    assert analysis.name is not None
    workflow = pe.Workflow(name=analysis.name)

    # input
    inputnode = pe.Node(
        niu.IdentityInterface(fields=[
            "bold_file",
            *confoundsfilefields,
            "mask_file",
            "seed_files",
            "seed_names",
            "metadata",
        ]),
        name="inputnode",
    )

    resampleifneeded = pe.MapNode(
        interface=ResampleIfNeeded(method="nearest"),
        name="resampleifneeded",
        iterfield=["in_file"],
        mem_gb=memcalc.series_std_gb,
    )
    workflow.connect(inputnode, "seed_files", resampleifneeded, "in_file")
    workflow.connect(inputnode, "bold_file", resampleifneeded, "ref_file")

    # Delete zero voxels for the seeds
    applymask = pe.MapNode(
        interface=fsl.ApplyMask(),
        name="applymask",
        iterfield="in_file",
        mem_gb=memcalc.volume_std_gb,
    )
    workflow.connect([
        (inputnode, applymask, [("mask_file", "mask_file")]),
        (resampleifneeded, applymask, [("out_file", "in_file")]),
    ])

    # calculate the mean time series of the region defined by each mask
    meants = pe.MapNode(
        interface=fsl.ImageMeants(),
        name="meants",
        iterfield="mask",
        mem_gb=memcalc.series_std_gb,
    )
    workflow.connect([
        (inputnode, meants, [("bold_file", "in_file")]),
        (applymask, meants, [("out_file", "mask")]),
    ])

    def make_contrastmat(confounds_file=None):
        import os
        from os import path as op

        from pipeline.utils import ncol
        import numpy as np

        if confounds_file is not None:
            nconfounds = ncol(confounds_file)
        else:
            nconfounds = 0
        contrastmat = np.zeros((1, 1 + nconfounds))
        contrastmat[0, 0] = 1

        out_file = op.join(os.getcwd(), "contrasts.tsv")
        np.savetxt(out_file, contrastmat, delimiter="\t")
        return out_file

    contrastmat = pe.Node(
        interface=niu.Function(
            input_names=[*confoundsfilefields],
            output_names=["out_file"],
            function=make_contrastmat,
        ),
        name="contrastmat",
    )
    if confoundsfilefields:
        workflow.connect([(inputnode, contrastmat, [(*confoundsfilefields,
                                                     *confoundsfilefields)])])

    designnode = pe.Node(niu.IdentityInterface(fields=["design"]),
                         name="designnode")
    if confoundsfilefields:
        mergecolumns = pe.MapNode(
            interface=MergeColumnsTSV(2),
            name="mergecolumns",
            mem_gb=memcalc.min_gb,
            iterfield="in1",
            run_without_submitting=True,
        )
        workflow.connect([
            (meants, mergecolumns, [("out_file", "in1")]),
            (inputnode, mergecolumns, [(*confoundsfilefields, "in2")]),
            (mergecolumns, designnode, [("out_file", "design")]),
        ])
    else:
        workflow.connect([(meants, designnode, [("out_file", "design")])])

    # calculate the regression of the mean time series
    # onto the functional image.
    # the result is the seed connectivity map
    glm = pe.MapNode(
        interface=fsl.GLM(
            out_file="beta.nii.gz",
            out_cope="cope.nii.gz",
            out_varcb_name="varcope.nii.gz",
            out_z_name="zstat.nii.gz",
            demean=True,
        ),
        name="glm",
        iterfield="design",
        mem_gb=memcalc.series_std_gb * 5,
    )
    workflow.connect([
        (inputnode, glm, [("bold_file", "in_file")]),
        (contrastmat, glm, [("out_file", "contrasts")]),
        (designnode, glm, [("design", "design")]),
    ])

    # make dof volume
    makedofvolume = pe.MapNode(
        interface=MakeDofVolume(),
        iterfield=["design"],
        name="makedofvolume",
    )
    workflow.connect([
        (inputnode, makedofvolume, [("bold_file", "bold_file")]),
        (designnode, makedofvolume, [("design", "design")]),
    ])

    outputnode = pe.Node(
        interface=MakeResultdicts(keys=[
            "firstlevelanalysisname",
            "firstlevelfeaturename",
            "cope",
            "varcope",
            "zstat",
            "dof_file",
            "mask_file",
        ]),
        name="outputnode",
    )
    outputnode.inputs.firstlevelanalysisname = analysis.name
    workflow.connect([
        (
            inputnode,
            outputnode,
            [
                ("metadata", "basedict"),
                ("mask_file", "mask_file"),
                ("seed_names", "firstlevelfeaturename"),
            ],
        ),
        (makedofvolume, outputnode, [("out_file", "dof_file")]),
        (
            glm,
            outputnode,
            [
                (("out_cope", ravel), "cope"),
                (("out_varcb", ravel), "varcope"),
                (("out_z", ravel), "zstat"),
            ],
        ),
    ])

    return workflow, (boldfilevariant, )
コード例 #19
0
ファイル: sca.py プロジェクト: gkiar/C-PAC
def create_temporal_reg(wflow_name='temporal_reg', which='SR'):
    """
    Temporal multiple regression workflow
    Provides a spatial map of parameter estimates corresponding to each
    provided timeseries in a timeseries.txt file as regressors

    Parameters
    ----------

    wflow_name : a string
        Name of the temporal regression workflow

    which: a string
        SR: Spatial Regression, RT: ROI Timeseries

        NOTE: If you set (which = 'RT'), the output of this workflow will be
        renamed based on the header information provided in the
        timeseries.txt file.
        If you run the temporal regression workflow manually, don\'t set
        (which = 'RT') unless you provide a timeseries.txt file with a header
        containing the names of the timeseries.

    Returns
    -------

    wflow : workflow

        temporal multiple regression Workflow



    Notes
    -----

    `Source <https://github.com/FCP-INDI/C-PAC/blob/master/CPAC/sca/sca.py>`_

    Workflow Inputs::

        inputspec.subject_rest : string (existing nifti file)
            Band passed Image with Global Signal , white matter, csf and
            motion regression. Recommended bandpass filter (0.001,0.1) )

        inputspec.subject_timeseries : string (existing txt file)
            text file containing the timeseries to be regressed on the subjects
            functional file
            timeseries are organized by columns, timepoints by rows

        inputspec.subject_mask : string (existing nifti file)
            path to subject functional mask

        inputspec.demean : Boolean
            control whether to demean model and data

        inputspec.normalize : Boolean
            control whether to normalize the input timeseries to unit standard deviation



    Workflow Outputs::

        outputspec.temp_reg_map : string (nifti file)
            GLM parameter estimate image for each timeseries in the input file

        outputspec.temp_reg_map_zstat : string (nifti file)
            Normalized version of the GLM parameter estimates


    Temporal Regression Workflow Procedure:

    Enter all timeseries into a general linear model and regress these
    timeseries to the subjects functional file to get spatial maps of voxels
    showing activation patterns related to those in the timeseries.

    .. exec::
        from CPAC.sca import create_temporal_reg
        wf = create_temporal_reg()
        wf.write_graph(
            graph2use='orig',
            dotfilename='./images/generated/create_temporal_regression.dot'
        )

    Workflow:

    .. image:: ../../images/generated/create_temporal_regression.png
        :width: 500

    Detailed Workflow:

    .. image:: ../../images/generated/create_temporal_regression_detailed.png
        :width: 500

    References
    ----------
    `http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/DualRegression/UserGuide <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/DualRegression/UserGuide>`_

    Examples
    --------

    >>> tr_wf = create_temporal_reg('temporal regression')
    >>> tr_wf.inputs.inputspec.subject_rest = '/home/data/subject/func/rest_bandpassed.nii.gz'
    >>> tr_wf.inputs.inputspec.subject_timeseries = '/home/data/subject/func/timeseries.txt'
    >>> tr_wf.inputs.inputspec.subject_mask = '/home/data/spatialmaps/spatial_map.nii.gz'
    >>> tr_wf.inputs.inputspec.demean = True
    >>> tr_wf.inputs.inputspec.normalize = True
    >>> tr_wf.run() # doctest: +SKIP

    """

    wflow = pe.Workflow(name=wflow_name)

    inputNode = pe.Node(util.IdentityInterface(fields=[
        'subject_rest', 'subject_timeseries', 'subject_mask', 'demean',
        'normalize'
    ]),
                        name='inputspec')

    outputNode = pe.Node(util.IdentityInterface(fields=[
        'temp_reg_map', 'temp_reg_map_files', 'temp_reg_map_z',
        'temp_reg_map_z_files'
    ]),
                         name='outputspec')

    check_timeseries = pe.Node(util.Function(input_names=['in_file'],
                                             output_names=['out_file'],
                                             function=check_ts),
                               name='check_timeseries')

    wflow.connect(inputNode, 'subject_timeseries', check_timeseries, 'in_file')

    temporalReg = pe.Node(interface=fsl.GLM(),
                          name='temporal_regression',
                          mem_gb=4.0)
    temporalReg.inputs.out_file = 'temp_reg_map.nii.gz'
    temporalReg.inputs.out_z_name = 'temp_reg_map_z.nii.gz'

    wflow.connect(inputNode, 'subject_rest', temporalReg, 'in_file')
    wflow.connect(check_timeseries, 'out_file', temporalReg, 'design')
    wflow.connect(inputNode, 'demean', temporalReg, 'demean')
    wflow.connect(inputNode, 'normalize', temporalReg, 'des_norm')
    wflow.connect(inputNode, 'subject_mask', temporalReg, 'mask')

    wflow.connect(temporalReg, 'out_file', outputNode, 'temp_reg_map')
    wflow.connect(temporalReg, 'out_z', outputNode, 'temp_reg_map_z')
    '''
    split = pe.Node(interface=fsl.Split(), name='split_raw_volumes')
    split.inputs.dimension = 't'
    split.inputs.out_base_name = 'temp_reg_map_'

    wflow.connect(temporalReg, 'out_file', split, 'in_file')

    split_zstat = pe.Node(interface=fsl.Split(), name='split_zstat_volumes')
    split_zstat.inputs.dimension = 't'
    split_zstat.inputs.out_base_name = 'temp_reg_map_z_'

    wflow.connect(temporalReg, 'out_z',
                  split_zstat, 'in_file')

    if which == 'SR':
        wflow.connect(split, 'out_files',
                      outputNode, 'temp_reg_map_files')
        wflow.connect(split_zstat, 'out_files',
                      outputNode, 'temp_reg_map_z_files')

    elif which == 'RT':
        map_roi_imports = ['import os', 'import numpy as np']

        # get roi order and send to output node for raw outputs
        get_roi_order = pe.Node(util.Function(input_names=['maps',
                                                           'timeseries'],
                                              output_names=['labels',
                                                            'maps'],
                                              function=map_to_roi,
                                              imports=map_roi_imports),
                                name='get_roi_order')

        wflow.connect(split, 'out_files', get_roi_order, 'maps')

        wflow.connect(inputNode, 'subject_timeseries',
                      get_roi_order, 'timeseries')

        rename_maps = pe.MapNode(interface=util.Rename(),
                                 name='rename_maps',
                                 iterfield=['in_file',
                                            'format_string'])
        rename_maps.inputs.keep_ext = True

        wflow.connect(get_roi_order, 'labels', rename_maps, 'format_string')
        wflow.connect(get_roi_order, 'maps', rename_maps, 'in_file')
        wflow.connect(rename_maps, 'out_file',
                      outputNode, 'temp_reg_map_files')

        # get roi order and send to output node for z-stat outputs
        get_roi_order_zstat = pe.Node(util.Function(input_names=['maps',
                                                           'timeseries'],
                                                    output_names=['labels',
                                                                  'maps'],
                                                    function=map_to_roi,
                                                    imports=map_roi_imports),
                                      name='get_roi_order_zstat')

        wflow.connect(split_zstat, 'out_files', get_roi_order_zstat, 'maps')
        wflow.connect(inputNode, 'subject_timeseries',
                      get_roi_order_zstat, 'timeseries')

        rename_maps_zstat = pe.MapNode(interface=util.Rename(),
                                       name='rename_maps_zstat',
                                       iterfield=['in_file',
                                                  'format_string'])
        rename_maps_zstat.inputs.keep_ext = True

        wflow.connect(get_roi_order_zstat, 'labels',
                      rename_maps_zstat, 'format_string')
        wflow.connect(get_roi_order_zstat, 'maps',
                      rename_maps_zstat, 'in_file')

        wflow.connect(rename_maps_zstat, 'out_file',
                      outputNode, 'temp_reg_map_z_files')
    '''

    return wflow
コード例 #20
0
    def runglmperun(self, subject, trtimeinsec):
        s = SpecifyModel()
        # loop on all runs and models within each run
        modelfiles = subject._modelfiles

        for model in modelfiles:
            # Make directory results to store the results of the model
            results_dir = os.path.join(subject._path, 'model', model[0],
                                       'results', model[1])
            dir_util.mkpath(results_dir)
            os.chdir(results_dir)

            s.inputs.event_files = model[2]
            s.inputs.input_units = 'secs'
            s.inputs.functional_runs = os.path.join(subject._path, 'BOLD',
                                                    model[1],
                                                    'bold_mcf_hp.nii.gz')
            # use nibable to get the tr of from the .nii file
            s.inputs.time_repetition = trtimeinsec
            s.inputs.high_pass_filter_cutoff = 128.
            # find par file that has motion
            motionfiles = glob(
                os.path.join(subject._path, 'BOLD', model[1], "*.par"))
            s.inputs.realignment_parameters = motionfiles
            #info = [Bunch(conditions=['cond1'], onsets=[[2, 50, 100, 180]],                      durations=[[1]]),                 Bunch(conditions=['cond1'], onsets=[[30, 40, 100, 150]],                       durations=[[1]])]
            #s.inputs.subject_info = None

            res = s.run()
            res.runtime.cwd
            print ">>>> preparing evs for model " + model[
                1] + "and run " + model[0]
            sessionInfo = res.outputs.session_info

            level1design = Level1Design()
            level1design.inputs.interscan_interval = trtimeinsec
            level1design.inputs.bases = {'dgamma': {'derivs': False}}
            level1design.inputs.session_info = sessionInfo
            level1design.inputs.model_serial_correlations = True
            #TODO: add contrasts to level 1 design so that I have just condition vs rest for each ev
            #TODO: Look into changign this to FILM instead of FEAT - this also has the option of setting output directory
            # http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FEAT/UserGuide#Contrasts
            #http://nipy.org/nipype/interfaces/generated/nipype.interfaces.fsl.model.html#filmgls
            resLevel = level1design.run()

            featModel = FEATModel()
            featModel.inputs.fsf_file = resLevel.outputs.fsf_files
            featModel.inputs.ev_files = resLevel.outputs.ev_files
            resFeat = featModel.run()

            print ">>>> creating fsf design files for  " + model[
                1] + "and run " + model[0]
            # TODO: give mask here
            glm = fsl.GLM(in_file=s.inputs.functional_runs[0],
                          design=resFeat.outputs.design_file,
                          output_type='NIFTI')

            print ">>>> running glm for  " + model[1] + "and run " + model[0]
            resGlm = glm.run()

            print ">>>> finished running  glm for  " + model[
                1] + "and run " + model[0]
コード例 #21
0
def create_indnet_workflow(hp_cutoff=100,
                           smoothing=5,
                           smm_threshold=0.5,
                           binarise_threshold=0.5,
                           melodic_seed=None,
                           aggr_aroma=False,
                           name="indnet"):

    indnet = Workflow(name=name)

    # Input node
    inputspec = Node(utility.IdentityInterface(
        fields=['anat_file', 'func_file', 'templates', 'networks']),
                     name='inputspec')

    # T1 skullstrip
    anat_bet = Node(fsl.BET(), name="anat_bet")

    # EPI preprocessing
    func_realignsmooth = create_featreg_preproc(highpass=False,
                                                whichvol='first',
                                                name='func_realignsmooth')
    func_realignsmooth.inputs.inputspec.fwhm = smoothing

    # Transform EPI to MNI space
    func_2mni = create_reg_workflow(name='func_2mni')
    func_2mni.inputs.inputspec.target_image = fsl.Info.standard_image(
        'MNI152_T1_2mm.nii.gz')
    func_2mni.inputs.inputspec.target_image_brain = fsl.Info.standard_image(
        'MNI152_T1_2mm_brain.nii.gz')
    func_2mni.inputs.inputspec.config_file = 'T1_2_MNI152_2mm'

    # Segmentation of T1
    anat_segmentation = Node(fsl.FAST(output_biascorrected=True),
                             name='anat_segmentation')

    # Transfrom segments to EPI space
    segments_2func = create_segments_2func_workflow(
        threshold=binarise_threshold, name='segments_2func')

    # Transform templates to EPI space
    templates_2func = create_templates_2func_workflow(
        threshold=binarise_threshold, name='templates_2func')

    # Mask network templates with GM
    gm_mask_templates = MapNode(fsl.ImageMaths(op_string='-mul'),
                                iterfield=['in_file2'],
                                name='gm_mask_templates')

    # Mask for ICA-AROMA and statistics
    func_brainmask = Node(fsl.BET(frac=0.3,
                                  mask=True,
                                  no_output=True,
                                  robust=True),
                          name='func_brainmask')

    # Melodic ICA
    if melodic_seed != None:
        func_melodic = Node(fsl.MELODIC(args='--seed={}'.format(melodic_seed),
                                        out_stats=True),
                            name='func_melodic')

    # ICA-AROMA
    func_aroma = Node(fsl.ICA_AROMA(), name='func_aroma')
    if aggr_aroma:
        func_aroma.inputs.denoise_type = 'aggr'

    else:
        func_aroma.inputs.denoise_type = 'nonaggr'

    # Highpass filter ICA results
    func_highpass = create_highpass_filter(cutoff=hp_cutoff,
                                           name='func_highpass')

    # Calculate mean CSF sgnal
    csf_meansignal = Node(fsl.ImageMeants(), name='csf_meansignal')

    # Calculate mean WM signal
    wm_meansignal = Node(fsl.ImageMeants(), name='wm_meansignal')

    # Calculate mean non-brain signal
    nonbrain_meansignal = create_nonbrain_meansignal(
        name='nonbrain_meansignal')

    # Calculate first Eigenvariates
    firsteigenvariates = MapNode(fsl.ImageMeants(show_all=True, eig=True),
                                 iterfield=['mask'],
                                 name='firsteigenvariates')

    # Combine first eigenvariates and wm/csf/non-brain signals
    regressors = Node(utility.Merge(4), name='regressors')

    # z-transform regressors
    ztransform = MapNode(Ztransform(),
                         iterfield=['in_file'],
                         name='ztransform')

    # Create design matrix
    designmatrix = Node(DesignMatrix(), name='designmatrix')

    # Create contrasts
    contrasts = Node(Contrasts(), name='contrasts')

    # GLM
    glm = Node(fsl.GLM(), name='glm')
    glm.inputs.out_z_name = 'z_stats.nii.gz'
    glm.inputs.demean = True

    # Split z-maps
    zmaps = Node(fsl.Split(), name='zmaps')
    zmaps.inputs.dimension = 't'

    # Spatial Mixture Modelling
    smm = MapNode(fsl.SMM(), iterfield=['spatial_data_file'], name='smm')

    # Transform probability maps to native (anat) space
    actmaps_2anat = MapNode(fsl.ApplyXFM(),
                            iterfield=['in_file'],
                            name='actmaps_2anat')

    # Transform probability maps to MNI space
    actmaps_2mni = MapNode(fsl.ApplyWarp(),
                           iterfield=['in_file'],
                           name='actmaps_2mni')
    actmaps_2mni.inputs.ref_file = fsl.Info.standard_image(
        'MNI152_T1_2mm.nii.gz')

    # Create network masks in native (func) space
    network_masks_func = create_network_masks_workflow(
        name='network_masks_func', smm_threshold=smm_threshold)

    # Create network masks in native (anat) space
    network_masks_anat = create_network_masks_workflow(
        name='network_masks_anat', smm_threshold=smm_threshold)

    # Create network masks in MNI space
    network_masks_mni = create_network_masks_workflow(
        name='network_masks_mni', smm_threshold=smm_threshold)

    # Output node
    outputspec = Node(utility.IdentityInterface(fields=[
        'network_masks_func_main', 'network_masks_func_exclusive',
        'network_masks_anat_main', 'network_masks_anat_exclusive',
        'network_masks_mni_main', 'network_masks_mni_exclusive',
        'preprocessed_func_file', 'preprocessed_anat_file',
        'motion_parameters', 'func2anat_transform', 'anat2mni_transform'
    ]),
                      name='outputspec')

    # Helper functions
    def get_first_item(x):
        try:
            return x[0]
        except:
            return x

    def get_second_item(x):
        return x[1]

    def get_third_item(x):
        return x[2]

    def get_components(x):
        return [y['components'] for y in x]

    # Connect the nodes

    # anat_bet
    indnet.connect(inputspec, 'anat_file', anat_bet, 'in_file')

    # func_realignsmooth
    indnet.connect(inputspec, 'func_file', func_realignsmooth,
                   'inputspec.func')

    # func_2mni
    indnet.connect(func_realignsmooth,
                   ('outputspec.smoothed_files', get_first_item), func_2mni,
                   'inputspec.source_files')
    indnet.connect(inputspec, 'anat_file', func_2mni,
                   'inputspec.anatomical_image')
    indnet.connect(func_realignsmooth, 'outputspec.reference', func_2mni,
                   'inputspec.mean_image')

    # anat_segmentation
    indnet.connect(anat_bet, 'out_file', anat_segmentation, 'in_files')

    # segments_2func
    indnet.connect(anat_segmentation, 'partial_volume_files', segments_2func,
                   'inputspec.segments')
    indnet.connect(func_2mni, 'outputspec.func2anat_transform', segments_2func,
                   'inputspec.premat')
    indnet.connect(func_realignsmooth, 'outputspec.mean', segments_2func,
                   'inputspec.func_file')

    # templates_2func
    indnet.connect(func_realignsmooth, 'outputspec.mean', templates_2func,
                   'inputspec.func_file')
    indnet.connect(func_2mni, 'outputspec.func2anat_transform',
                   templates_2func, 'inputspec.premat')
    indnet.connect(func_2mni, 'outputspec.anat2target_transform',
                   templates_2func, 'inputspec.warp')
    indnet.connect(inputspec, 'templates', templates_2func,
                   'inputspec.templates')

    # gm_mask_templates
    indnet.connect(segments_2func,
                   ('outputspec.segments_2func_files', get_second_item),
                   gm_mask_templates, 'in_file')
    indnet.connect(templates_2func, 'outputspec.templates_2func_files',
                   gm_mask_templates, 'in_file2')

    # func_brainmask
    indnet.connect(func_realignsmooth, 'outputspec.mean', func_brainmask,
                   'in_file')

    # func_melodic
    if melodic_seed != None:
        indnet.connect(func_realignsmooth,
                       ('outputspec.smoothed_files', get_first_item),
                       func_melodic, 'in_files')
        indnet.connect(func_brainmask, 'mask_file', func_melodic, 'mask')

    # func_aroma
    indnet.connect(func_realignsmooth,
                   ('outputspec.smoothed_files', get_first_item), func_aroma,
                   'in_file')
    indnet.connect(func_2mni, 'outputspec.func2anat_transform', func_aroma,
                   'mat_file')
    indnet.connect(func_2mni, 'outputspec.anat2target_transform', func_aroma,
                   'fnirt_warp_file')
    indnet.connect(func_realignsmooth,
                   ('outputspec.motion_parameters', get_first_item),
                   func_aroma, 'motion_parameters')
    indnet.connect(func_brainmask, 'mask_file', func_aroma, 'mask')
    if melodic_seed != None:
        indnet.connect(func_melodic, 'out_dir', func_aroma, 'melodic_dir')

    # func_highpass
    if aggr_aroma:
        indnet.connect(func_aroma, 'aggr_denoised_file', func_highpass,
                       'inputspec.in_file')
    else:
        indnet.connect(func_aroma, 'nonaggr_denoised_file', func_highpass,
                       'inputspec.in_file')

    # csf_meansignal
    indnet.connect(segments_2func,
                   ('outputspec.segments_2func_files', get_first_item),
                   csf_meansignal, 'mask')
    indnet.connect(func_highpass, 'outputspec.filtered_file', csf_meansignal,
                   'in_file')

    # wm_meansignal
    indnet.connect(segments_2func,
                   ('outputspec.segments_2func_files', get_third_item),
                   wm_meansignal, 'mask')
    indnet.connect(func_highpass, 'outputspec.filtered_file', wm_meansignal,
                   'in_file')

    # nonbrain_meansignal
    indnet.connect(inputspec, 'func_file', nonbrain_meansignal,
                   'inputspec.func_file')

    # firsteigenvariates
    indnet.connect(gm_mask_templates, 'out_file', firsteigenvariates, 'mask')
    indnet.connect(func_highpass, 'outputspec.filtered_file',
                   firsteigenvariates, 'in_file')

    # regressors
    indnet.connect(firsteigenvariates, 'out_file', regressors, 'in1')
    indnet.connect(wm_meansignal, 'out_file', regressors, 'in2')
    indnet.connect(csf_meansignal, 'out_file', regressors, 'in3')
    indnet.connect(nonbrain_meansignal, 'outputspec.nonbrain_regressor',
                   regressors, 'in4')

    # ztransform
    indnet.connect(regressors, 'out', ztransform, 'in_file')

    # designmatrix
    indnet.connect(ztransform, 'out_file', designmatrix, 'in_files')

    # contrasts
    indnet.connect(inputspec, ('networks', get_components), contrasts,
                   'in_list')
    indnet.connect(designmatrix, 'out_file', contrasts, 'design')

    # glm
    indnet.connect(designmatrix, 'out_file', glm, 'design')
    indnet.connect(contrasts, 'out_file', glm, 'contrasts')
    indnet.connect(func_brainmask, 'mask_file', glm, 'mask')
    indnet.connect(func_highpass, 'outputspec.filtered_file', glm, 'in_file')

    # zmaps
    indnet.connect(glm, 'out_z', zmaps, 'in_file')

    # smm
    indnet.connect(zmaps, 'out_files', smm, 'spatial_data_file')
    indnet.connect(func_brainmask, 'mask_file', smm, 'mask')

    # actmaps_2anat
    indnet.connect(smm, 'activation_p_map', actmaps_2anat, 'in_file')
    indnet.connect(func_2mni, 'outputspec.func2anat_transform', actmaps_2anat,
                   'in_matrix_file')
    indnet.connect(anat_bet, 'out_file', actmaps_2anat, 'reference')

    # actmaps_2mni
    indnet.connect(smm, 'activation_p_map', actmaps_2mni, 'in_file')
    indnet.connect(templates_2func, 'outputspec.func_2mni_warp', actmaps_2mni,
                   'field_file')

    # network_masks_func
    indnet.connect(smm, 'activation_p_map', network_masks_func,
                   'inputspec.actmaps')
    indnet.connect(inputspec, 'networks', network_masks_func,
                   'inputspec.networks')

    # network_masks_anat
    indnet.connect(actmaps_2anat, 'out_file', network_masks_anat,
                   'inputspec.actmaps')
    indnet.connect(inputspec, 'networks', network_masks_anat,
                   'inputspec.networks')

    # network_masks_mni
    indnet.connect(actmaps_2mni, 'out_file', network_masks_mni,
                   'inputspec.actmaps')
    indnet.connect(inputspec, 'networks', network_masks_mni,
                   'inputspec.networks')

    # output node
    indnet.connect(network_masks_func, 'outputspec.main_masks', outputspec,
                   'network_masks_func_main')
    indnet.connect(network_masks_func, 'outputspec.exclusive_masks',
                   outputspec, 'network_masks_func_exclusive')
    indnet.connect(network_masks_anat, 'outputspec.main_masks', outputspec,
                   'network_masks_anat_main')
    indnet.connect(network_masks_anat, 'outputspec.exclusive_masks',
                   outputspec, 'network_masks_anat_exclusive')
    indnet.connect(network_masks_mni, 'outputspec.main_masks', outputspec,
                   'network_masks_mni_main')
    indnet.connect(network_masks_mni, 'outputspec.exclusive_masks', outputspec,
                   'network_masks_mni_exclusive')
    indnet.connect(func_highpass, 'outputspec.filtered_file', outputspec,
                   'preprocessed_func_file')
    indnet.connect(anat_segmentation, 'restored_image', outputspec,
                   'preprocessed_anat_file')
    indnet.connect(func_realignsmooth,
                   ('outputspec.motion_parameters', get_first_item),
                   outputspec, 'motion_parameters')
    indnet.connect(func_2mni, 'outputspec.func2anat_transform', outputspec,
                   'func2anat_transform')
    indnet.connect(func_2mni, 'outputspec.anat2target_transform', outputspec,
                   'anat2mni_transform')

    return indnet
コード例 #22
0
def init_seedbasedconnectivity_wf(workdir=None,
                                  feature=None,
                                  seed_files=None,
                                  seed_spaces=None,
                                  memcalc=MemoryCalculator()):
    """
    create workflow to calculate seed connectivity maps
    """
    if feature is not None:
        name = f"{formatlikebids(feature.name)}_wf"
    else:
        name = "seedbasedconnectivity_wf"
    workflow = pe.Workflow(name=name)

    # input
    inputnode = pe.Node(
        niu.IdentityInterface(fields=[
            "tags",
            "vals",
            "metadata",
            "bold",
            "mask",
            "confounds_selected",
            "seed_names",
            "seed_files",
            "seed_spaces",
        ]),
        name="inputnode",
    )
    outputnode = pe.Node(niu.IdentityInterface(fields=["resultdicts"]),
                         name="outputnode")

    min_seed_coverage = 1
    if feature is not None:
        inputnode.inputs.seed_names = feature.seeds
        if hasattr(feature, "min_seed_coverage"):
            min_seed_coverage = feature.min_seed_coverage

    if seed_files is not None:
        inputnode.inputs.seed_files = seed_files

    if seed_spaces is not None:
        inputnode.inputs.seed_spaces = seed_spaces

    #
    statmaps = ["effect", "variance", "z", "dof", "mask"]
    make_resultdicts = pe.Node(
        MakeResultdicts(
            tagkeys=["feature", "seed"],
            imagekeys=[*statmaps, "design_matrix", "contrast_matrix"],
            metadatakeys=["mean_t_s_n_r", "coverage"],
        ),
        name="make_resultdicts",
    )
    if feature is not None:
        make_resultdicts.inputs.feature = feature.name
    workflow.connect(inputnode, "tags", make_resultdicts, "tags")
    workflow.connect(inputnode, "vals", make_resultdicts, "vals")
    workflow.connect(inputnode, "metadata", make_resultdicts, "metadata")
    workflow.connect(inputnode, "mask", make_resultdicts, "mask")

    workflow.connect(make_resultdicts, "resultdicts", outputnode,
                     "resultdicts")

    #
    resultdict_datasink = pe.Node(ResultdictDatasink(base_directory=workdir),
                                  name="resultdict_datasink")
    workflow.connect(make_resultdicts, "resultdicts", resultdict_datasink,
                     "indicts")

    #
    reference_dict = dict(reference_space=constants.reference_space,
                          reference_res=constants.reference_res)
    resample = pe.MapNode(
        Resample(interpolation="MultiLabel", **reference_dict),
        name="resample",
        iterfield=["input_image", "input_space"],
        n_procs=config.nipype.omp_nthreads,
        mem_gb=memcalc.series_std_gb,
    )
    workflow.connect(inputnode, "seed_files", resample, "input_image")
    workflow.connect(inputnode, "seed_spaces", resample, "input_space")

    # Delete zero voxels for the seeds
    maskseeds = pe.Node(
        MaskCoverage(keys=["names"], min_coverage=min_seed_coverage),
        name="maskseeds",
        mem_gb=memcalc.volume_std_gb,
    )
    workflow.connect(inputnode, "mask", maskseeds, "mask_file")

    workflow.connect(inputnode, "seed_names", maskseeds, "names")
    workflow.connect(resample, "output_image", maskseeds, "in_files")

    workflow.connect(maskseeds, "names", make_resultdicts, "seed")
    workflow.connect(maskseeds, "coverage", make_resultdicts, "coverage")

    # calculate the mean time series of the region defined by each mask
    meants = pe.MapNode(
        fsl.ImageMeants(),
        name="meants",
        iterfield="mask",
        mem_gb=memcalc.series_std_gb,
    )
    workflow.connect(inputnode, "bold", meants, "in_file")
    workflow.connect(maskseeds, "out_files", meants, "mask")

    #
    design = pe.MapNode(MergeColumns(2),
                        iterfield=["in1", "column_names1"],
                        name="design")
    workflow.connect(meants, "out_file", design, "in1")
    workflow.connect(maskseeds, "names", design, "column_names1")
    workflow.connect(inputnode, "confounds_selected", design, "in2")

    workflow.connect(design, "out_with_header", make_resultdicts,
                     "design_matrix")

    contrasts = pe.MapNode(
        niu.Function(
            input_names=["design_file"],
            output_names=["out_with_header", "out_no_header"],
            function=_contrasts,
        ),
        iterfield="design_file",
        name="contrasts",
    )
    workflow.connect(design, "out_with_header", contrasts, "design_file")

    workflow.connect(contrasts, "out_with_header", make_resultdicts,
                     "contrast_matrix")

    fillna = pe.MapNode(FillNA(), iterfield="in_tsv", name="fillna")
    workflow.connect(design, "out_no_header", fillna, "in_tsv")

    # calculate the regression of the mean time series
    # onto the functional image.
    # the result is the seed connectivity map
    glm = pe.MapNode(
        fsl.GLM(
            out_file="beta.nii.gz",
            out_cope="cope.nii.gz",
            out_varcb_name="varcope.nii.gz",
            out_z_name="zstat.nii.gz",
            demean=True,
        ),
        name="glm",
        iterfield=["design", "contrasts"],
        mem_gb=memcalc.series_std_gb * 5,
    )
    workflow.connect(inputnode, "bold", glm, "in_file")
    workflow.connect(inputnode, "mask", glm, "mask")
    workflow.connect(fillna, "out_no_header", glm, "design")
    workflow.connect(contrasts, "out_no_header", glm, "contrasts")

    # make dof volume
    makedofvolume = pe.MapNode(MakeDofVolume(),
                               iterfield=["design"],
                               name="makedofvolume")
    workflow.connect(inputnode, "bold", makedofvolume, "bold_file")
    workflow.connect(fillna, "out_no_header", makedofvolume, "design")

    workflow.connect(glm, "out_cope", make_resultdicts, "effect")
    workflow.connect(glm, "out_varcb", make_resultdicts, "variance")
    workflow.connect(glm, "out_z", make_resultdicts, "z")
    workflow.connect(makedofvolume, "out_file", make_resultdicts, "dof")

    #
    tsnr = pe.Node(nac.TSNR(), name="tsnr", mem_gb=memcalc.series_std_gb)
    workflow.connect(inputnode, "bold", tsnr, "in_file")

    calcmean = pe.MapNode(CalcMean(),
                          iterfield="mask",
                          name="calcmean",
                          mem_gb=memcalc.series_std_gb)
    workflow.connect(maskseeds, "out_files", calcmean, "mask")
    workflow.connect(tsnr, "tsnr_file", calcmean, "in_file")

    workflow.connect(calcmean, "mean", make_resultdicts, "mean_t_s_n_r")

    return workflow
コード例 #23
0
def create_denoise_pipeline(name='denoise'):
    # workflow
    denoise = Workflow(name='denoise')
    # Define nodes
    inputnode = Node(interface=util.IdentityInterface(fields=['brain_mask',
                                                              'epi_coreg',
                                                              'wmseg',
                                                              'csfseg',
                                                              'highpass_freq',
                                                              'tr']),
                     name='inputnode')
    outputnode = Node(interface=util.IdentityInterface(fields=['wmcsf_mask',
                                                               'combined_motion',
                                                               'comp_regressor',
                                                               'comp_F',
                                                               'comp_pF',
                                                               'out_betas',
                                                               'ts_fullspectrum',
                                                               'ts_filtered']),
                      name='outputnode')

    # combine tissue classes to noise mask
    wmcsf_mask = Node(fsl.BinaryMaths(operation='add',
                                      out_file='wmcsf_mask.nii'),
                      name='wmcsf_mask')
    denoise.connect([(inputnode, wmcsf_mask, [('wmseg', 'in_file'),
                                              ('csfseg', 'operand_file')])])
        
    #resample + binarize wm_csf mask to epi resolution.
   
    resample_wmcsf= Node(afni.Resample(resample_mode='NN',
    outputtype='NIFTI_GZ',
    out_file='wmcsf_mask_lowres.nii.gz'),
    name = 'resample_wmcsf')
    
    bin_wmcsf_mask=Node(fsl.utils.ImageMaths(), name="bin_wmcsf_mask")
    bin_wmcsf_mask.inputs.op_string='-nan -thr 0.99 -ero -bin'
    
    denoise.connect([(wmcsf_mask, resample_wmcsf, [('out_file', 'in_file')]),
                     (inputnode, resample_wmcsf, [('brain_mask', 'master')]),
                     (resample_wmcsf, bin_wmcsf_mask,[('out_file', 'in_file')]),
                     (bin_wmcsf_mask, outputnode, [('out_file', 'wmcsf_mask')])
                    ])
         
    #no other denoising filters created here because AROMA performs already well.
       
    compcor=Node(conf.ACompCor(), name="compcor")
    compcor.inputs.num_components=5 #https://www.sciencedirect.com/science/article/pii/S105381191400175X?via%3Dihub
    denoise.connect([
                     (inputnode, compcor, [('epi_coreg', 'realigned_file')]),
                     (bin_wmcsf_mask, compcor, [('out_file', 'mask_files')]),
                     
                     ])    
    
    def create_designs(compcor_regressors,epi_coreg,mask):
            import numpy as np
            import pandas as pd
            import os
            from nilearn.input_data import NiftiMasker
           
            brain_masker = NiftiMasker(mask_img = mask, 
                               smoothing_fwhm=None, standardize=False,
                               memory='nilearn_cache', 
                               memory_level=5, verbose=2)
    
            whole_brain = brain_masker.fit_transform(epi_coreg)
            avg_signal = np.mean(whole_brain,axis=1)
            
            all_regressors=pd.read_csv(compcor_regressors,sep='\t')
            
            #add global signal.
            all_regressors['global_signal']=avg_signal
            
            fn=os.getcwd()+'/all_regressors.txt'
            all_regressors.to_csv(fn, sep='\t', index=False)
            
            return [fn, compcor_regressors]
            
    #create a list of design to loop over.
    create_design = Node(util.Function(input_names=['compcor_regressors','epi_coreg','mask'], output_names=['reg_list'], function=create_designs),
              name='create_design')
    
    denoise.connect([
    (compcor, create_design, [('components_file', 'compcor_regressors')]),
    (inputnode, create_design, [('epi_coreg', 'epi_coreg')]),
    (inputnode, create_design, [('brain_mask', 'mask')])
    ])
    
    # regress compcor and other noise components
    filter2 = MapNode(fsl.GLM(out_f_name='F_noise.nii.gz',
                           out_pf_name='pF_noise.nii.gz',
                           out_res_name='rest2anat_denoised.nii.gz',
                           output_type='NIFTI_GZ',
                           demean=True), 
                    iterfield=['design'],
                    name='filternoise')
    filter2.plugin_args = {'submit_specs': 'request_memory = 17000'}
    
    denoise.connect([(inputnode, filter2, [('epi_coreg', 'in_file')]),
                     #(createfilter2, filter2, [('out_files', 'design')]),
                     #(compcor, filter2, [('components_file', 'design')]),
                     (create_design, filter2, [('reg_list', 'design')]),
                     (inputnode, filter2, [('brain_mask', 'mask')]),
                     (filter2, outputnode, [('out_f', 'comp_F'),
                                            ('out_pf', 'comp_pF'),
                                            ('out_file', 'out_betas'),
                                            ('out_res', 'ts_fullspectrum'),
                                            ])
                     ])



    def calc_sigma(TR,highpass):
        # https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=ind1205&L=FSL&P=R57592&1=FSL&9=A&I=-3&J=on&d=No+Match%3BMatch%3BMatches&z=4
        sigma=1. / (2 * TR * highpass)
        return sigma

    calc_s=Node(util.Function(input_names=['TR', 'highpass'], output_names=['sigma'], function=calc_sigma),
                  name='calc_s')
    
    
    denoise.connect(inputnode, 'tr', calc_s, 'TR')
    denoise.connect(inputnode, 'highpass_freq', calc_s, 'highpass')
    
    #use only highpass filter (because high-frequency content is already somewhat filtered in AROMA)) 
    highpass_filter = MapNode(fsl.TemporalFilter(out_file='rest_denoised_highpassed.nii'),
                           name='highpass_filter', iterfield=['in_file'])
    highpass_filter.plugin_args = {'submit_specs': 'request_memory = 17000'}
    denoise.connect([(calc_s, highpass_filter, [('sigma', 'highpass_sigma')]),
                     (filter2, highpass_filter, [('out_res', 'in_file')]),
                     (highpass_filter, outputnode, [('out_file', 'ts_filtered')])
                     ])
    
    return denoise
コード例 #24
0
def init_dualregression_wf(workdir=None,
                           feature=None,
                           map_files=None,
                           map_spaces=None,
                           memcalc=MemoryCalculator()):
    """
    create a workflow to calculate dual regression for ICA seeds
    """
    if feature is not None:
        name = f"{formatlikebids(feature.name)}_wf"
    else:
        name = "dualregression_wf"
    workflow = pe.Workflow(name=name)

    # input
    inputnode = pe.Node(
        niu.IdentityInterface(fields=[
            "tags",
            "vals",
            "metadata",
            "bold",
            "mask",
            "confounds_selected",
            "map_names",
            "map_files",
            "map_spaces",
        ]),
        name="inputnode",
    )
    outputnode = pe.Node(niu.IdentityInterface(fields=["resultdicts"]),
                         name="outputnode")

    if feature is not None:
        inputnode.inputs.map_names = feature.maps

    if map_files is not None:
        inputnode.inputs.map_files = map_files

    if map_spaces is not None:
        inputnode.inputs.map_spaces = map_spaces

    #
    statmaps = ["effect", "variance", "z", "dof", "mask"]
    make_resultdicts_a = pe.Node(
        MakeResultdicts(tagkeys=["feature", "map"],
                        imagekeys=["design_matrix", "contrast_matrix"]),
        name="make_resultdicts_a",
    )
    if feature is not None:
        make_resultdicts_a.inputs.feature = feature.name
    workflow.connect(inputnode, "tags", make_resultdicts_a, "tags")
    workflow.connect(inputnode, "vals", make_resultdicts_a, "vals")
    workflow.connect(inputnode, "metadata", make_resultdicts_a, "metadata")
    workflow.connect(inputnode, "map_names", make_resultdicts_a, "map")
    make_resultdicts_b = pe.Node(
        MakeResultdicts(
            tagkeys=["feature", "map", "component"],
            imagekeys=statmaps,
            metadatakeys=["sources", "mean_t_s_n_r"],
        ),
        name="make_resultdicts_b",
    )
    if feature is not None:
        make_resultdicts_b.inputs.feature = feature.name
    workflow.connect(inputnode, "tags", make_resultdicts_b, "tags")
    workflow.connect(inputnode, "vals", make_resultdicts_b, "vals")
    workflow.connect(inputnode, "metadata", make_resultdicts_b, "metadata")
    workflow.connect(inputnode, "map_names", make_resultdicts_b, "map")
    workflow.connect(inputnode, "mask", make_resultdicts_b, "mask")

    workflow.connect(make_resultdicts_b, "resultdicts", outputnode,
                     "resultdicts")

    #
    merge_resultdicts = pe.Node(niu.Merge(2), name="merge_resultdicts")
    workflow.connect(make_resultdicts_a, "resultdicts", merge_resultdicts,
                     "in1")
    workflow.connect(make_resultdicts_b, "resultdicts", merge_resultdicts,
                     "in2")
    resultdict_datasink = pe.Node(ResultdictDatasink(base_directory=workdir),
                                  name="resultdict_datasink")
    workflow.connect(merge_resultdicts, "out", resultdict_datasink, "indicts")

    #
    reference_dict = dict(reference_space=constants.reference_space,
                          reference_res=constants.reference_res)
    resample = pe.MapNode(
        Resample(interpolation="LanczosWindowedSinc", **reference_dict),
        name="resample",
        iterfield=["input_image", "input_space"],
        n_procs=config.nipype.omp_nthreads,
        mem_gb=memcalc.series_std_gb,
    )
    workflow.connect(inputnode, "map_files", resample, "input_image")
    workflow.connect(inputnode, "map_spaces", resample, "input_space")

    # Delete zero voxels for the maps
    applymask = pe.MapNode(
        fsl.ApplyMask(),
        name="applymask",
        iterfield="in_file",
        mem_gb=memcalc.volume_std_gb,
    )
    workflow.connect(inputnode, "mask", applymask, "mask_file")
    workflow.connect(resample, "output_image", applymask, "in_file")

    # first step, calculate spatial regression of ICA components on to the
    # bold file
    spatialglm = pe.MapNode(
        fsl.GLM(out_file="beta", demean=True),
        name="spatialglm",
        iterfield="design",
        mem_gb=memcalc.series_std_gb * 5,
    )
    workflow.connect(applymask, "out_file", spatialglm, "design")
    workflow.connect(inputnode, "bold", spatialglm, "in_file")
    workflow.connect(inputnode, "mask", spatialglm, "mask")

    # second step, calculate the temporal regression of the time series
    # from the first step on to the bold file
    contrasts = pe.MapNode(
        niu.Function(
            input_names=["map_timeseries_file", "confounds_file"],
            output_names=[
                "out_with_header", "out_no_header", "map_component_names"
            ],
            function=_contrasts,
        ),
        iterfield="map_timeseries_file",
        name="contrasts",
    )
    workflow.connect(spatialglm, "out_file", contrasts, "map_timeseries_file")
    workflow.connect(inputnode, "confounds_selected", contrasts,
                     "confounds_file")

    workflow.connect(contrasts, "out_with_header", make_resultdicts_a,
                     "contrast_matrix")
    workflow.connect(contrasts, "map_component_names", make_resultdicts_b,
                     "component")

    design = pe.MapNode(MergeColumns(2),
                        iterfield=["in1", "column_names1"],
                        name="design")
    workflow.connect(spatialglm, "out_file", design, "in1")
    workflow.connect(contrasts, "map_component_names", design, "column_names1")
    workflow.connect(inputnode, "confounds_selected", design, "in2")

    workflow.connect(design, "out_with_header", make_resultdicts_a,
                     "design_matrix")

    fillna = pe.MapNode(FillNA(), iterfield="in_tsv", name="fillna")
    workflow.connect(design, "out_no_header", fillna, "in_tsv")

    temporalglm = pe.MapNode(
        fsl.GLM(
            out_file="beta.nii.gz",
            out_cope="cope.nii.gz",
            out_varcb_name="varcope.nii.gz",
            out_z_name="zstat.nii.gz",
            demean=True,
        ),
        name="temporalglm",
        iterfield=["design", "contrasts"],
        mem_gb=memcalc.series_std_gb * 5,
    )
    workflow.connect(inputnode, "bold", temporalglm, "in_file")
    workflow.connect(inputnode, "mask", temporalglm, "mask")
    workflow.connect(fillna, "out_no_header", temporalglm, "design")
    workflow.connect(contrasts, "out_no_header", temporalglm, "contrasts")

    # make dof volume
    makedofvolume = pe.MapNode(
        MakeDofVolume(),
        iterfield=["design"],
        name="makedofvolume",
    )
    workflow.connect(inputnode, "bold", makedofvolume, "bold_file")
    workflow.connect(fillna, "out_no_header", makedofvolume, "design")

    for glmattr, resultattr in (("cope", "effect"), ("varcb", "variance"),
                                ("z", "z")):
        split = pe.MapNode(fsl.Split(dimension="t"),
                           iterfield="in_file",
                           name=f"split{resultattr}images")
        workflow.connect(temporalglm, f"out_{glmattr}", split, "in_file")
        workflow.connect(split, "out_files", make_resultdicts_b, resultattr)
    workflow.connect(makedofvolume, "out_file", make_resultdicts_b, "dof")

    #
    tsnr = pe.Node(nac.TSNR(), name="tsnr", mem_gb=memcalc.series_std_gb)
    workflow.connect(inputnode, "bold", tsnr, "in_file")

    maxintensity = pe.MapNode(MaxIntensity(),
                              iterfield="in_file",
                              name="maxintensity",
                              mem_gb=memcalc.series_std_gb)
    workflow.connect(resample, "output_image", maxintensity, "in_file")

    calcmean = pe.MapNode(CalcMean(),
                          iterfield="parcellation",
                          name="calcmean",
                          mem_gb=memcalc.series_std_gb)
    workflow.connect(maxintensity, "out_file", calcmean, "parcellation")
    workflow.connect(tsnr, "tsnr_file", calcmean, "in_file")

    workflow.connect(calcmean, "mean", make_resultdicts_b, "mean_t_s_n_r")

    return workflow
コード例 #25
0
ファイル: nuisance.py プロジェクト: nagyistge/neurita.pypes
def rest_noise_filter_wf(wf_name='rest_noise_removal'):
    """ Create a resting-state fMRI noise removal node.

    Nipype Inputs
    -------------
    rest_noise_input.in_file

    rest_noise_input.brain_mask

    rest_noise_input.wm_mask

    rest_noise_input.csf_mask

    rest_noise_input.motion_params
        Nipy motion parameters.

    Nipype Outputs
    --------------
    rest_noise_output.tsnr_file
        A SNR estimation volume file for QA purposes.

    rest_noise_output.motion_corrected
        The fMRI motion corrected image.

    rest_noise_output.nuis_corrected
        The resulting nuisance corrected image.
        This will be the same as 'motion_corrected' if compcor
        is disabled.

    rest_noise_output.motion_regressors
        Motion regressors file.

    rest_noise_output.compcor_regressors
        CompCor regressors file.

    rest_noise_output.art_displacement_files
        One image file containing the voxel-displacement timeseries.

    rest_noise_output.art_intensity_files
        One file containing the global intensity values determined
        from the brainmask.

    rest_noise_output.art_norm_files
        One file containing the composite norm.

    rest_noise_output.art_outlier_files
         One file containing a list of 0-based indices corresponding
         to outlier volumes.

    rest_noise_output.art_plot_files
        One image file containing the detected outliers.

    rest_noise_output.art_statistic_files
        One file containing information about the different types of
        artifacts and if design info is provided then details of
        stimulus correlated motion and a listing or artifacts by
        event type.

    Returns
    -------
    rm_nuisance_wf: nipype Workflow
    """

    # Create the workflow object
    wf = pe.Workflow(name=wf_name)

    in_fields = [
        "in_file",
        "brain_mask",
        "wm_mask",
        "csf_mask",
        "motion_params",
    ]

    out_fields = [
        "tsnr_file",
        "motion_corrected",
        "nuis_corrected",
        "motion_regressors",
        "compcor_regressors",
        "gsr_regressors",
        "art_displacement_files",
        "art_intensity_files",
        "art_norm_files",
        "art_outlier_files",
        "art_plot_files",
        "art_statistic_files",
    ]

    # input identities
    rest_noise_input = setup_node(IdentityInterface(fields=in_fields,
                                                    mandatory_inputs=True),
                                  name="rest_noise_input")

    # get the settings for filters
    filters = _get_params_for('rest_filter')

    # Compute TSNR on realigned data regressing polynomial up to order 2
    tsnr = setup_node(TSNR(regress_poly=2), name='tsnr')

    # Use :class:`nipype.algorithms.rapidart` to determine which of the
    # images in the functional series are outliers based on deviations in
    # intensity or movement.
    art = setup_node(rapidart_fmri_artifact_detection(),
                     name="detect_artifacts")

    # Compute motion regressors
    motion_regs = setup_node(Function(
        input_names=[
            'motion_params',
            'order',
            'derivatives',
        ],
        output_names=['out_files'],
        function=motion_regressors,
    ),
                             name='motion_regressors')

    # Create a filter to remove motion and art confounds
    motart_pars = setup_node(Function(
        input_names=['motion_params', 'comp_norm', 'outliers', 'detrend_poly'],
        output_names=['out_files'],
        function=create_regressors),
                             name='motart_parameters')

    motion_filter = setup_node(fsl.GLM(out_f_name='F_mcart.nii.gz',
                                       out_pf_name='pF_mcart.nii.gz',
                                       demean=True),
                               name='motion_filter')

    # Noise confound regressors
    compcor_pars = setup_node(Function(
        input_names=[
            'realigned_file', 'mask_file', 'num_components', 'extra_regressors'
        ],
        output_names=['out_files'],
        function=extract_noise_components,
    ),
                              name='compcor_pars')
    #compcor_pars = setup_node(ACompCor(), name='compcor_pars')
    #compcor_pars.inputs.components_file = 'noise_components.txt'

    compcor_filter = setup_node(fsl.GLM(out_f_name='F.nii.gz',
                                        out_pf_name='pF.nii.gz',
                                        demean=True),
                                name='compcor_filter')

    # Global signal regression
    gsr_pars = setup_node(Function(
        input_names=[
            'realigned_file', 'mask_file', 'num_components', 'extra_regressors'
        ],
        output_names=['out_files'],
        function=extract_noise_components,
    ),
                          name='gsr_pars')

    gsr_filter = setup_node(fsl.GLM(out_f_name='F_gsr.nii.gz',
                                    out_pf_name='pF_gsr.nii.gz',
                                    demean=True),
                            name='gsr_filter')

    # output identities
    rest_noise_output = setup_node(IdentityInterface(fields=out_fields,
                                                     mandatory_inputs=True),
                                   name="rest_noise_output")

    # Connect the nodes
    wf.connect([
        # tsnr
        (rest_noise_input, tsnr, [("in_file", "in_file")]),

        # artifact detection
        (rest_noise_input, art, [
            ("in_file", "realigned_files"),
            ("motion_params", "realignment_parameters"),
            ("brain_mask", "mask_file"),
        ]),

        # calculte motion regressors
        (rest_noise_input, motion_regs, [("motion_params", "motion_params")]),

        # create motion and confound regressors parameters file
        (art, motart_pars, [
            ("norm_files", "comp_norm"),
            ("outlier_files", "outliers"),
        ]),
        (motion_regs, motart_pars, [("out_files", "motion_params")]),

        # motion filtering
        (rest_noise_input, motion_filter, [
            ("in_file", "in_file"),
            (("in_file", rename, "_filtermotart"), "out_res_name"),
        ]),
        (motart_pars, motion_filter, [(("out_files", selectindex, [0]),
                                       "design")]),

        # output
        (tsnr, rest_noise_output, [("tsnr_file", "tsnr_file")]),
        (motart_pars, rest_noise_output, [("out_files", "motion_regressors")]),
        (motion_filter, rest_noise_output, [("out_res", "motion_corrected")]),
        (art, rest_noise_output, [
            ("displacement_files", "art_displacement_files"),
            ("intensity_files", "art_intensity_files"),
            ("norm_files", "art_norm_files"),
            ("outlier_files", "art_outlier_files"),
            ("plot_files", "art_plot_files"),
            ("statistic_files", "art_statistic_files"),
        ]),
    ])

    last_filter = motion_filter

    # compcor filter
    if filters['compcor_csf'] or filters['compcor_wm']:
        wf.connect([
            # calculate compcor regressor and parameters file
            (motart_pars, compcor_pars, [
                (("out_files", selectindex, [0]), "extra_regressors"),
            ]),
            (motion_filter, compcor_pars, [
                ("out_res", "realigned_file"),
            ]),

            # the compcor filter
            (motion_filter, compcor_filter, [
                ("out_res", "in_file"),
                (("out_res", rename, "_cleaned"), "out_res_name"),
            ]),
            (compcor_pars, compcor_filter, [(("out_files", selectindex, [0]),
                                             "design")]),
            #(compcor_pars,     compcor_filter,    [("components_file",  "design")]),
            (rest_noise_input, compcor_filter, [("brain_mask", "mask")]),

            # output
            (compcor_pars, rest_noise_output, [("out_files",
                                                "compcor_regressors")]),
            #(compcor_pars,     rest_noise_output, [("components_file",   "compcor_regressors")]),
        ])
        last_filter = compcor_filter

    # global signal regression
    if filters['gsr']:
        wf.connect([
            # calculate gsr regressors parameters file
            (last_filter, gsr_pars, [("out_res", "realigned_file")]),
            (rest_noise_input, gsr_pars, [("brain_mask", "mask_file")]),

            # the output file name
            (rest_noise_input, gsr_filter, [("brain_mask", "mask")]),
            (last_filter, gsr_filter, [
                ("out_res", "in_file"),
                (("out_res", rename, "_gsr"), "out_res_name"),
            ]),
            (gsr_pars, gsr_filter, [(("out_files", selectindex, [0]), "design")
                                    ]),

            # output
            (gsr_pars, rest_noise_output, [("out_files", "gsr_regressors")]),
        ])
        last_filter = gsr_filter

    # connect the final nuisance correction output node
    wf.connect([
        (last_filter, rest_noise_output, [("out_res", "nuis_corrected")]),
    ])

    if filters['compcor_csf'] and filters['compcor_wm']:
        mask_merge = setup_node(Merge(2), name="mask_merge")
        wf.connect([
            ## the mask for the compcor filter
            (rest_noise_input, mask_merge, [("wm_mask", "in1")]),
            (rest_noise_input, mask_merge, [("csf_mask", "in2")]),
            (mask_merge, compcor_pars, [("out", "mask_file")]),
        ])

    elif filters['compcor_csf']:
        wf.connect([
            ## the mask for the compcor filter
            (rest_noise_input, compcor_pars, [("csf_mask", "mask_file")]),
        ])

    elif filters['compcor_wm']:
        wf.connect([
            ## the mask for the compcor filter
            (rest_noise_input, compcor_pars, [("wm_mask", "mask_file")]),
        ])

    return wf
コード例 #26
0
def create_workflow(func_runs,
                    subject_id,
                    subjects_dir,
                    fwhm,
                    slice_times,
                    highpass_frequency,
                    lowpass_frequency,
                    TR,
                    sink_directory,
                    use_fsl_bp,
                    num_components,
                    whichvol,
                    name='wmaze'):

    wf = pe.Workflow(name=name)

    datasource = pe.Node(nio.DataGrabber(infields=['subject_id', 'run'],
                                         outfields=['func']),
                         name='datasource')
    datasource.inputs.subject_id = subject_id
    datasource.inputs.run = func_runs
    datasource.inputs.template = '/home/data/madlab/data/mri/wmaze/%s/rsfmri/rest_run%03d/rest.nii.gz'
    datasource.inputs.sort_filelist = True

    # Rename files in case they are named identically
    name_unique = pe.MapNode(util.Rename(format_string='wmaze_rest_%(run)02d'),
                             iterfield=['in_file', 'run'],
                             name='rename')
    name_unique.inputs.keep_ext = True
    name_unique.inputs.run = func_runs
    wf.connect(datasource, 'func', name_unique, 'in_file')

    # Define the outputs for the preprocessing workflow
    output_fields = [
        'reference', 'motion_parameters', 'motion_parameters_plusDerivs',
        'motionandoutlier_noise_file', 'noise_components', 'realigned_files',
        'motion_plots', 'mask_file', 'smoothed_files', 'reg_file', 'reg_cost',
        'reg_fsl_file', 'artnorm_files', 'artoutlier_files',
        'artdisplacement_files', 'tsnr_file'
    ]

    outputnode = pe.Node(util.IdentityInterface(fields=output_fields),
                         name='outputspec')

    # Convert functional images to float representation
    img2float = pe.MapNode(fsl.ImageMaths(out_data_type='float',
                                          op_string='',
                                          suffix='_dtype'),
                           iterfield=['in_file'],
                           name='img2float')
    wf.connect(name_unique, 'out_file', img2float, 'in_file')

    # Run AFNI's despike. This is always run, however, whether this is fed to
    # realign depends on the input configuration
    despiker = pe.MapNode(afni.Despike(outputtype='NIFTI_GZ'),
                          iterfield=['in_file'],
                          name='despike')
    num_threads = 4
    despiker.inputs.environ = {'OMP_NUM_THREADS': '%d' % num_threads}
    despiker.plugin_args = {'bsub_args': '-n %d' % num_threads}
    despiker.plugin_args = {'bsub_args': '-R "span[hosts=1]"'}
    wf.connect(img2float, 'out_file', despiker, 'in_file')

    # Extract the first volume of the first run as the reference
    extractref = pe.Node(fsl.ExtractROI(t_size=1),
                         iterfield=['in_file'],
                         name="extractref")
    wf.connect(despiker, ('out_file', pickfirst), extractref, 'in_file')
    wf.connect(despiker, ('out_file', pickvol, 0, whichvol), extractref,
               't_min')
    wf.connect(extractref, 'roi_file', outputnode, 'reference')

    if slice_times is not None:
        # Simultaneous motion and slice timing correction with Nipy algorithm
        motion_correct = pe.Node(nipy.SpaceTimeRealigner(),
                                 name='motion_correct')
        motion_correct.inputs.tr = TR
        motion_correct.inputs.slice_times = slice_times
        motion_correct.inputs.slice_info = 2
        motion_correct.plugin_args = {
            'bsub_args': '-n %s' % os.environ['OMP_NUM_THREADS']
        }
        motion_correct.plugin_args = {'bsub_args': '-R "span[hosts=1]"'}
        wf.connect(despiker, 'out_file', motion_correct, 'in_file')
        wf.connect(motion_correct, 'par_file', outputnode, 'motion_parameters')
        wf.connect(motion_correct, 'out_file', outputnode, 'realigned_files')
    else:
        # Motion correct functional runs to the reference (1st volume of 1st run)
        motion_correct = pe.MapNode(fsl.MCFLIRT(save_mats=True,
                                                save_plots=True,
                                                interpolation='sinc'),
                                    name='motion_correct',
                                    iterfield=['in_file'])
        wf.connect(despiker, 'out_file', motion_correct, 'in_file')
        wf.connect(extractref, 'roi_file', motion_correct, 'ref_file')
        wf.connect(motion_correct, 'par_file', outputnode, 'motion_parameters')
        wf.connect(motion_correct, 'out_file', outputnode, 'realigned_files')

    # Compute TSNR on realigned data regressing polynomials upto order 2
    tsnr = pe.MapNode(TSNR(regress_poly=2), iterfield=['in_file'], name='tsnr')
    wf.connect(motion_correct, 'out_file', tsnr, 'in_file')
    wf.connect(tsnr, 'tsnr_file', outputnode, 'tsnr_file')

    # Plot the estimated motion parameters
    plot_motion = pe.MapNode(fsl.PlotMotionParams(in_source='fsl'),
                             name='plot_motion',
                             iterfield=['in_file'])
    plot_motion.iterables = ('plot_type', ['rotations', 'translations'])
    wf.connect(motion_correct, 'par_file', plot_motion, 'in_file')
    wf.connect(plot_motion, 'out_file', outputnode, 'motion_plots')

    # Register a source file to fs space and create a brain mask in source space
    fssource = pe.Node(nio.FreeSurferSource(), name='fssource')
    fssource.inputs.subject_id = subject_id
    fssource.inputs.subjects_dir = subjects_dir

    # Extract aparc+aseg brain mask and binarize
    fs_threshold = pe.Node(fs.Binarize(min=0.5, out_type='nii'),
                           name='fs_threshold')
    wf.connect(fssource, ('aparc_aseg', get_aparc_aseg), fs_threshold,
               'in_file')

    # Calculate the transformation matrix from EPI space to FreeSurfer space
    # using the BBRegister command
    fs_register = pe.MapNode(fs.BBRegister(init='fsl'),
                             iterfield=['source_file'],
                             name='fs_register')
    fs_register.inputs.contrast_type = 't2'
    fs_register.inputs.out_fsl_file = True
    fs_register.inputs.subject_id = subject_id
    fs_register.inputs.subjects_dir = subjects_dir
    wf.connect(extractref, 'roi_file', fs_register, 'source_file')
    wf.connect(fs_register, 'out_reg_file', outputnode, 'reg_file')
    wf.connect(fs_register, 'min_cost_file', outputnode, 'reg_cost')
    wf.connect(fs_register, 'out_fsl_file', outputnode, 'reg_fsl_file')

    # Extract wm+csf, brain masks by eroding freesurfer lables
    wmcsf = pe.MapNode(fs.Binarize(),
                       iterfield=['match', 'binary_file', 'erode'],
                       name='wmcsfmask')
    #wmcsf.inputs.wm_ven_csf = True
    wmcsf.inputs.match = [[2, 41], [4, 5, 14, 15, 24, 31, 43, 44, 63]]
    wmcsf.inputs.binary_file = ['wm.nii.gz', 'csf.nii.gz']
    wmcsf.inputs.erode = [2, 2]  #int(np.ceil(slice_thickness))
    wf.connect(fssource, ('aparc_aseg', get_aparc_aseg), wmcsf, 'in_file')

    # Now transform the wm and csf masks to 1st volume of 1st run
    wmcsftransform = pe.MapNode(fs.ApplyVolTransform(inverse=True,
                                                     interp='nearest'),
                                iterfield=['target_file'],
                                name='wmcsftransform')
    wmcsftransform.inputs.subjects_dir = subjects_dir
    wf.connect(extractref, 'roi_file', wmcsftransform, 'source_file')
    wf.connect(fs_register, ('out_reg_file', pickfirst), wmcsftransform,
               'reg_file')
    wf.connect(wmcsf, 'binary_file', wmcsftransform, 'target_file')

    # Transform the binarized aparc+aseg file to the 1st volume of 1st run space
    fs_voltransform = pe.MapNode(fs.ApplyVolTransform(inverse=True),
                                 iterfield=['source_file', 'reg_file'],
                                 name='fs_transform')
    fs_voltransform.inputs.subjects_dir = subjects_dir
    wf.connect(extractref, 'roi_file', fs_voltransform, 'source_file')
    wf.connect(fs_register, 'out_reg_file', fs_voltransform, 'reg_file')
    wf.connect(fs_threshold, 'binary_file', fs_voltransform, 'target_file')

    # Dilate the binarized mask by 1 voxel that is now in the EPI space
    fs_threshold2 = pe.MapNode(fs.Binarize(min=0.5, out_type='nii'),
                               iterfield=['in_file'],
                               name='fs_threshold2')
    fs_threshold2.inputs.dilate = 1
    wf.connect(fs_voltransform, 'transformed_file', fs_threshold2, 'in_file')
    wf.connect(fs_threshold2, ('binary_file', pickfirst), outputnode,
               'mask_file')

    # Use RapidART to detect motion/intensity outliers
    art = pe.MapNode(ra.ArtifactDetect(use_differences=[True, False],
                                       use_norm=True,
                                       zintensity_threshold=3,
                                       norm_threshold=1,
                                       bound_by_brainmask=True,
                                       mask_type="file"),
                     iterfield=["realignment_parameters", "realigned_files"],
                     name="art")
    if slice_times is not None:
        art.inputs.parameter_source = "NiPy"
    else:
        art.inputs.parameter_source = "FSL"
    wf.connect(motion_correct, 'par_file', art, 'realignment_parameters')
    wf.connect(motion_correct, 'out_file', art, 'realigned_files')
    wf.connect(fs_threshold2, ('binary_file', pickfirst), art, 'mask_file')
    wf.connect(art, 'norm_files', outputnode, 'artnorm_files')
    wf.connect(art, 'outlier_files', outputnode, 'artoutlier_files')
    wf.connect(art, 'displacement_files', outputnode, 'artdisplacement_files')

    # Compute motion regressors (save file with 1st and 2nd derivatives)
    motreg = pe.Node(util.Function(
        input_names=['motion_params', 'order', 'derivatives'],
        output_names=['out_files'],
        function=motion_regressors,
        imports=imports),
                     name='getmotionregress')
    wf.connect(motion_correct, 'par_file', motreg, 'motion_params')
    wf.connect(motreg, 'out_files', outputnode, 'motion_parameters_plusDerivs')

    # Create a filter text file to remove motion (+ derivatives), art confounds,
    # and 1st, 2nd, and 3rd order legendre polynomials.
    createfilter1 = pe.Node(util.Function(
        input_names=['motion_params', 'comp_norm', 'outliers', 'detrend_poly'],
        output_names=['out_files'],
        function=build_filter1,
        imports=imports),
                            name='makemotionbasedfilter')
    createfilter1.inputs.detrend_poly = 3
    wf.connect(motreg, 'out_files', createfilter1, 'motion_params')
    wf.connect(art, 'norm_files', createfilter1, 'comp_norm')
    wf.connect(art, 'outlier_files', createfilter1, 'outliers')
    wf.connect(createfilter1, 'out_files', outputnode,
               'motionandoutlier_noise_file')

    filter1 = pe.MapNode(fsl.GLM(out_f_name='F_mcart.nii.gz',
                                 out_pf_name='pF_mcart.nii.gz',
                                 demean=True),
                         iterfield=['in_file', 'design', 'out_res_name'],
                         name='filtermotion')
    wf.connect(motion_correct, 'out_file', filter1, 'in_file')
    wf.connect(motion_correct, ('out_file', rename, '_filtermotart'), filter1,
               'out_res_name')
    wf.connect(createfilter1, 'out_files', filter1, 'design')

    # Create a filter to remove noise components based on white matter and CSF
    createfilter2 = pe.MapNode(
        util.Function(input_names=[
            'realigned_file', 'mask_file', 'num_components', 'extra_regressors'
        ],
                      output_names=['out_files'],
                      function=extract_noise_components,
                      imports=imports),
        iterfield=['realigned_file', 'extra_regressors'],
        name='makecompcorrfilter')
    createfilter2.inputs.num_components = num_components
    wf.connect(createfilter1, 'out_files', createfilter2, 'extra_regressors')
    wf.connect(motion_correct, 'out_file', createfilter2, 'realigned_file')
    wf.connect(wmcsftransform, 'transformed_file', createfilter2, 'mask_file')
    wf.connect(createfilter2, 'out_files', outputnode, 'noise_components')

    filter2 = pe.MapNode(fsl.GLM(out_f_name='F.nii.gz',
                                 out_pf_name='pF.nii.gz',
                                 demean=True),
                         iterfield=['in_file', 'design', 'out_res_name'],
                         name='filter_noise_nosmooth')
    wf.connect(filter1, 'out_res', filter2, 'in_file')
    wf.connect(filter1, ('out_res', rename, '_cleaned'), filter2,
               'out_res_name')
    wf.connect(createfilter2, 'out_files', filter2, 'design')
    wf.connect(fs_threshold2, ('binary_file', pickfirst), filter2, 'mask')

    # Band-pass filter the timeseries
    if use_fsl_bp == 'True':
        determine_bp_sigmas = pe.Node(util.Function(
            input_names=['tr', 'highpass_freq', 'lowpass_freq'],
            output_names=['out_sigmas'],
            function=calc_fslbp_sigmas),
                                      name='determine_bp_sigmas')
        determine_bp_sigmas.inputs.tr = float(TR)
        determine_bp_sigmas.inputs.highpass_freq = float(highpass_frequency)
        determine_bp_sigmas.inputs.lowpass_freq = float(lowpass_frequency)

        bandpass = pe.MapNode(fsl.ImageMaths(suffix='_tempfilt'),
                              iterfield=["in_file"],
                              name="bandpass")
        wf.connect(determine_bp_sigmas, ('out_sigmas', highpass_operand),
                   bandpass, 'op_string')
        wf.connect(filter2, 'out_res', bandpass, 'in_file')
        wf.connect(bandpass, 'out_file', outputnode, 'bandpassed_files')
    else:
        bandpass = pe.Node(util.Function(
            input_names=['files', 'lowpass_freq', 'highpass_freq', 'fs'],
            output_names=['out_files'],
            function=bandpass_filter,
            imports=imports),
                           name='bandpass')
        bandpass.inputs.fs = 1. / TR
        if highpass_frequency < 0:
            bandpass.inputs.highpass_freq = -1
        else:
            bandpass.inputs.highpass_freq = highpass_frequency
        if lowpass_frequency < 0:
            bandpass.inputs.lowpass_freq = -1
        else:
            bandpass.inputs.lowpass_freq = lowpass_frequency
        wf.connect(filter2, 'out_res', bandpass, 'files')
        wf.connect(bandpass, 'out_files', outputnode, 'bandpassed_files')

    # Smooth each run using SUSAn with the brightness threshold set to 75%
    # of the median value for each run and a mask constituting the mean functional
    smooth_median = pe.MapNode(fsl.ImageStats(op_string='-k %s -p 50'),
                               iterfield=['in_file'],
                               name='smooth_median')
    wf.connect(motion_correct, 'out_file', smooth_median, 'in_file')
    wf.connect(fs_threshold2, ('binary_file', pickfirst), smooth_median,
               'mask_file')

    smooth_meanfunc = pe.MapNode(fsl.ImageMaths(op_string='-Tmean',
                                                suffix='_mean'),
                                 iterfield=['in_file'],
                                 name='smooth_meanfunc')
    wf.connect(motion_correct, 'out_file', smooth_meanfunc, 'in_file')

    smooth_merge = pe.Node(util.Merge(2, axis='hstack'), name='smooth_merge')
    wf.connect(smooth_meanfunc, 'out_file', smooth_merge, 'in1')
    wf.connect(smooth_median, 'out_stat', smooth_merge, 'in2')

    smooth = pe.MapNode(fsl.SUSAN(),
                        iterfield=['in_file', 'brightness_threshold', 'usans'],
                        name='smooth')
    smooth.inputs.fwhm = fwhm
    wf.connect(bandpass, 'out_file', smooth, 'in_file')
    wf.connect(smooth_median, ('out_stat', getbtthresh), smooth,
               'brightness_threshold')
    wf.connect(smooth_merge, ('out', getusans), smooth, 'usans')
    wf.connect(smooth, 'smoothed_file', outputnode, 'smoothed_files')

    # Save the relevant data into an output directory
    datasink = pe.Node(nio.DataSink(), name="datasink")
    datasink.inputs.base_directory = sink_directory
    datasink.inputs.container = subject_id
    wf.connect(outputnode, 'reference', datasink, 'wmaze_rest.ref')
    wf.connect(outputnode, 'motion_parameters', datasink, 'wmaze_rest.motion')
    wf.connect(outputnode, 'realigned_files', datasink,
               'wmaze_rest.func.realigned')
    wf.connect(outputnode, 'motion_plots', datasink,
               'wmaze_rest.motion.@plots')
    wf.connect(outputnode, 'mask_file', datasink, 'wmaze_rest.ref.@mask')
    wf.connect(outputnode, 'smoothed_files', datasink,
               'wmaze_rest.func.smoothed_bandpassed')
    wf.connect(outputnode, 'reg_file', datasink, 'wmaze_rest.bbreg.@reg')
    wf.connect(outputnode, 'reg_cost', datasink, 'wmaze_rest.bbreg.@cost')
    wf.connect(outputnode, 'reg_fsl_file', datasink,
               'wmaze_rest.bbreg.@regfsl')
    wf.connect(outputnode, 'artnorm_files', datasink,
               'wmaze_rest.art.@norm_files')
    wf.connect(outputnode, 'artoutlier_files', datasink,
               'wmaze_rest.art.@outlier_files')
    wf.connect(outputnode, 'artdisplacement_files', datasink,
               'wmaze_rest.art.@displacement_files')
    wf.connect(outputnode, 'motion_parameters_plusDerivs', datasink,
               'wmaze_rest.noise.@motionplusDerivs')
    wf.connect(outputnode, 'motionandoutlier_noise_file', datasink,
               'wmaze_rest.noise.@motionplusoutliers')
    wf.connect(outputnode, 'noise_components', datasink, 'wmaze_rest.compcor')
    wf.connect(outputnode, 'tsnr_file', datasink, 'wmaze_rest.tsnr')

    return wf