def workflow_ieeg(parameters): node_read = Node(function_ieeg_read, name='read') node_read.inputs.active_conditions = parameters['ieeg']['read']['active_conditions'] node_read.inputs.baseline_conditions = parameters['ieeg']['read']['baseline_conditions'] node_read.inputs.minimalduration = parameters['ieeg']['read']['minimalduration'] node_preprocess = MapNode(function_ieeg_preprocess, name='preprocess', iterfield=['ieeg', ]) node_preprocess.inputs.duration = parameters['ieeg']['preprocess']['duration'] node_preprocess.inputs.reref = parameters['ieeg']['preprocess']['reref'] node_preprocess.inputs.offset = parameters['ieeg']['preprocess']['offset'] node_frequency = MapNode(function_ieeg_powerspectrum, name='powerspectrum', iterfield=['ieeg', ]) node_frequency.inputs.method = parameters['ieeg']['powerspectrum']['method'] node_frequency.inputs.taper = parameters['ieeg']['powerspectrum']['taper'] node_frequency.inputs.halfbandwidth = parameters['ieeg']['powerspectrum']['halfbandwidth'] node_frequency.inputs.duration = parameters['ieeg']['powerspectrum']['duration'] node_compare = Node(function_ieeg_compare, name='ecog_compare') node_compare.iterables = ( 'frequency', parameters['ieeg']['ecog_compare']['frequency_bands'], ) node_compare.inputs.baseline = parameters['ieeg']['ecog_compare']['baseline'] node_compare.inputs.method = parameters['ieeg']['ecog_compare']['method'] node_compare.inputs.measure = parameters['ieeg']['ecog_compare']['measure'] node_compare_allfreq = Node(function_ieeg_compare_allfreq, name='ecog_compare_allfreq') w = Workflow('ieeg') w.connect(node_read, 'ieeg', node_preprocess, 'ieeg') w.connect(node_preprocess, 'ieeg', node_frequency, 'ieeg') w.connect(node_frequency, 'ieeg', node_compare, 'in_files') w.connect(node_frequency, 'ieeg', node_compare_allfreq, 'in_files') return w
def test_mapnode_nested(): cwd = os.getcwd() wd = mkdtemp() os.chdir(wd) from nipype import MapNode, Function def func1(in1): return in1 + 1 n1 = MapNode(Function(input_names=['in1'], output_names=['out'], function=func1), iterfield=['in1'], nested=True, name='n1') n1.inputs.in1 = [[1, [2]], 3, [4, 5]] n1.run() print(n1.get_output('out')) yield assert_equal, n1.get_output('out'), [[2, [3]], 4, [5, 6]] n2 = MapNode(Function(input_names=['in1'], output_names=['out'], function=func1), iterfield=['in1'], nested=False, name='n1') n2.inputs.in1 = [[1, [2]], 3, [4, 5]] error_raised = False try: n2.run() except Exception as e: pe.logger.info('Exception: %s' % str(e)) error_raised = True yield assert_true, error_raised
def create_templates_2func_workflow(threshold=0.5, name='templates_2func_workflow'): templates_2func_workflow = Workflow(name=name) # Input Node inputspec = Node(utility.IdentityInterface(fields=[ 'func_file', 'premat', 'warp', 'templates', ]), name='inputspec') # Get the overal EPI to MNI warp func_2mni_warp = Node(fsl.ConvertWarp(), name='func_2mni_warp') func_2mni_warp.inputs.reference = fsl.Info.standard_image( 'MNI152_T1_2mm.nii.gz') # Calculate the inverse warp mni_2func_warp = Node(fsl.InvWarp(), name='mni_2func_warp') # Transform MNI templates to EPI space templates_2func_apply = MapNode(fsl.ApplyWarp(), iterfield=['in_file'], name='templates_2func_apply') # Threshold templates templates_threshold = MapNode( fsl.ImageMaths(op_string='-thr {0} -bin'.format(threshold)), iterfield=['in_file'], name='templates_threshold') # Output Node outputspec = Node(utility.IdentityInterface( fields=['templates_2func_files', 'func_2mni_warp']), name='outputspec') # Connect the workflow nodes templates_2func_workflow.connect(inputspec, 'premat', func_2mni_warp, 'premat') templates_2func_workflow.connect(inputspec, 'warp', func_2mni_warp, 'warp1') templates_2func_workflow.connect(inputspec, 'func_file', mni_2func_warp, 'reference') templates_2func_workflow.connect(func_2mni_warp, 'out_file', mni_2func_warp, 'warp') templates_2func_workflow.connect(inputspec, 'templates', templates_2func_apply, 'in_file') templates_2func_workflow.connect(inputspec, 'func_file', templates_2func_apply, 'ref_file') templates_2func_workflow.connect(mni_2func_warp, 'inverse_warp', templates_2func_apply, 'field_file') templates_2func_workflow.connect(templates_2func_apply, 'out_file', templates_threshold, 'in_file') templates_2func_workflow.connect(func_2mni_warp, 'out_file', outputspec, 'func_2mni_warp') templates_2func_workflow.connect(templates_threshold, 'out_file', outputspec, 'templates_2func_files') return templates_2func_workflow
def workflow_ieeg(PARAMETERS): input = Node(IdentityInterface(fields=['ieeg', 'electrodes']), name='input') node_read = Node(function_ieeg_read, name='read') node_read.inputs.conditions = PARAMETERS['read']['conditions'] node_read.inputs.minimalduration = PARAMETERS['read']['minimalduration'] node_preprocess = MapNode(function_ieeg_preprocess, name='preprocess', iterfield=['ieeg', ]) node_preprocess.inputs.duration = PARAMETERS['preprocess']['duration'] node_preprocess.inputs.reref = PARAMETERS['preprocess']['reref'] node_preprocess.inputs.offset = PARAMETERS['preprocess']['offset'] node_frequency = MapNode(function_ieeg_powerspectrum, name='powerspectrum', iterfield=['ieeg', ]) node_frequency.inputs.method = PARAMETERS['powerspectrum']['method'] node_frequency.inputs.taper = PARAMETERS['powerspectrum']['taper'] node_frequency.inputs.duration = PARAMETERS['powerspectrum']['duration'] node_compare = Node(function_ieeg_compare, name='ecog_compare') node_compare.inputs.frequency = PARAMETERS['ecog_compare']['frequency'] node_compare.inputs.baseline = PARAMETERS['ecog_compare']['baseline'] node_compare.inputs.method = PARAMETERS['ecog_compare']['method'] node_compare.inputs.measure = PARAMETERS['ecog_compare']['measure'] w = Workflow('ieeg') w.connect(input, 'ieeg', node_read, 'ieeg') w.connect(input, 'electrodes', node_read, 'electrodes') w.connect(node_read, 'ieeg', node_preprocess, 'ieeg') w.connect(node_preprocess, 'ieeg', node_frequency, 'ieeg') w.connect(node_frequency, 'ieeg', node_compare, 'in_files') return w
def run_bet(T1_image, workdir): """Run freesurfer, convert to nidm and extract stats """ from nipype import fsl from nipype import MapNode strip = MapNode(fsl.BET(), iterfield=['in_file'], name='skullstripper') strip.inputs.in_file = T1_image strip.inputs.mesh = True strip.inputs.mask = True strip.base_dir = workdir bet_results = strip.run() provgraph = bet_results.provenance[0] for bundle in bet_results.provenance[1:]: provgraph.add_bundle(bundle) vol = MapNode(fsl.ImageStats(op_string='-V'), iterfield=['in_file'], name='volumeextractor') vol.inputs.in_file = bet_results.outputs.out_file vol.base_dir = workdir vol_results = vol.run() for bundle in vol_results.provenance: provgraph.add_bundle(bundle) return provgraph, provgraph.rdf()
def test_mapnode_nested(tmpdir): tmpdir.chdir() from nipype import MapNode, Function def func1(in1): return in1 + 1 n1 = MapNode(Function(input_names=['in1'], output_names=['out'], function=func1), iterfield=['in1'], nested=True, name='n1') n1.inputs.in1 = [[1, [2]], 3, [4, 5]] n1.run() print(n1.get_output('out')) assert n1.get_output('out') == [[2, [3]], 4, [5, 6]] n2 = MapNode(Function(input_names=['in1'], output_names=['out'], function=func1), iterfield=['in1'], nested=False, name='n1') n2.inputs.in1 = [[1, [2]], 3, [4, 5]] with pytest.raises(Exception) as excinfo: n2.run() assert "can only concatenate list" in str(excinfo.value)
def test_mapnode_nested(tmpdir): os.chdir(str(tmpdir)) from nipype import MapNode, Function def func1(in1): return in1 + 1 n1 = MapNode(Function(input_names=['in1'], output_names=['out'], function=func1), iterfield=['in1'], nested=True, name='n1') n1.inputs.in1 = [[1, [2]], 3, [4, 5]] n1.run() print(n1.get_output('out')) assert n1.get_output('out') == [[2, [3]], 4, [5, 6]] n2 = MapNode(Function(input_names=['in1'], output_names=['out'], function=func1), iterfield=['in1'], nested=False, name='n1') n2.inputs.in1 = [[1, [2]], 3, [4, 5]] error_raised = False try: n2.run() except Exception as e: from nipype.pipeline.engine.base import logger logger.info('Exception: %s' % str(e)) error_raised = True assert error_raised
def run(self, n_pipeline_jobs=1): """Perform transformations. Args: n_pipeline_jobs (int, optional): number of parallel processing jobs. Defaults to 1. """ if not os.path.exists(self.strOutputDir): os.makedirs(self.strOutputDir) strJobListPath = os.path.join(self.strOutputDir, 'joblist.csv') self.dfConfig.to_csv(strJobListPath) datanode = Node(utility.csv.CSVReader(in_file=os.path.abspath(strJobListPath), header=True), name='datanode') augment = Workflow('augmentation_affinereg', base_dir=os.path.join(self.strOutputDir, 'working_dir')) transformFunc = MapNode(fsl.ApplyXFM(interp='spline', apply_xfm=True), name='transform_func', iterfield=['in_file', 'reference', 'in_matrix_file', 'out_file']) augment.connect(datanode, 'func', transformFunc, 'in_file') augment.connect(datanode, 'func', transformFunc, 'reference') augment.connect(datanode, 'affine', transformFunc, 'in_matrix_file') augment.connect(datanode, 'output_func', transformFunc, 'out_file') transformAnat = MapNode(fsl.ApplyXFM(interp='spline', apply_xfm=True), name='transform_anat', iterfield=['in_file', 'reference', 'in_matrix_file', 'out_file']) augment.connect(datanode, 'anat', transformAnat, 'in_file') augment.connect(datanode, 'anat', transformAnat, 'reference') augment.connect(datanode, 'affine', transformAnat, 'in_matrix_file') augment.connect(datanode, 'output_anat', transformAnat, 'out_file') if n_pipeline_jobs == 1: augment.run() else: augment.run(plugin='MultiProc', plugin_args={'n_procs': n_pipeline_jobs})
def create_bbregister_workflow(name="bbregister", contrast_type="t2", partial_brain=False, init_with="fsl"): """Find a linear transformation to align the EPI file with the anatomy.""" in_fields = ["subject_id", "timeseries"] if partial_brain: in_fields.append("whole_brain_template") inputnode = Node(IdentityInterface(in_fields), "inputs") # Take the mean over time to get a target volume meanvol = MapNode(fsl.MeanImage(), "in_file", "meanvol") # Do a rough skullstrip using BET skullstrip = MapNode(fsl.BET(), "in_file", "bet") # Estimate the registration to Freesurfer conformed space func2anat = MapNode( fs.BBRegister(contrast_type=contrast_type, init=init_with, epi_mask=True, registered_file=True, out_reg_file="func2anat_tkreg.dat", out_fsl_file="func2anat_flirt.mat"), "source_file", "func2anat") # Make an image for quality control on the registration report = MapNode(CoregReport(), "in_file", "coreg_report") # Define the workflow outputs outputnode = Node(IdentityInterface(["tkreg_mat", "flirt_mat", "report"]), "outputs") bbregister = Workflow(name=name) # Connect the registration bbregister.connect([ (inputnode, func2anat, [("subject_id", "subject_id")]), (inputnode, report, [("subject_id", "subject_id")]), (inputnode, meanvol, [("timeseries", "in_file")]), (meanvol, skullstrip, [("out_file", "in_file")]), (skullstrip, func2anat, [("out_file", "source_file")]), (func2anat, report, [("registered_file", "in_file")]), (func2anat, outputnode, [("out_reg_file", "tkreg_mat")]), (func2anat, outputnode, [("out_fsl_file", "flirt_mat")]), (report, outputnode, [("out_file", "report")]), ]) # Possibly connect the full_fov image if partial_brain: bbregister.connect([ (inputnode, func2anat, [("whole_brain_template", "intermediate_file")]), ]) return bbregister
def create_slicetime_workflow(name="slicetime", TR=2, slice_order="up", interleaved=False): inputnode = Node(IdentityInterface(["timeseries"]), "inputs") if isinstance(interleaved, str) and interleaved.lower() == "siemens": sliceorder = MapNode(SiemensSliceOrder(), "in_file", "sliceorder") slicetimer_set_interleaved = False slicetimer_iterfields = ["in_file", "custom_order"] elif isinstance(interleaved, bool): sliceorder = None slicetimer_set_interleaved = interleaved slicetimer_iterfields = ["in_file"] else: raise ValueError("interleaved must be True, False, or 'siemens'") slicetimer = MapNode(fsl.SliceTimer(time_repetition=TR), slicetimer_iterfields, "slicetime") if slicetimer_set_interleaved: slicetimer.inputs.interleaved = True if slice_order == "down": slicetimer.inputs.index_dir = True elif slice_order != "up": raise ValueError("slice_order must be 'up' or 'down'") outputnode = Node(IdentityInterface(["timeseries"]), "outputs") slicetime = Workflow(name) slicetime.connect([ (inputnode, slicetimer, [("timeseries", "in_file")]), (slicetimer, outputnode, [("slice_time_corrected_file", "timeseries") ]), ]) if sliceorder is not None: slicetime.connect([ (inputnode, sliceorder, [("timeseries", "in_file")]), (sliceorder, slicetimer, [("out_file", "custom_order")]), ]) return slicetime
def create_workflow_temporalpatterns_7T(subjects, runs): input_node = Node(IdentityInterface(fields=[ 'bold', 'events', 't2star_fov', 't2star_whole', 't1w', ]), name='input') coreg_tstat = MapNode( interface=FLIRT(), name='realign_result_to_anat', iterfield=['in_file', ]) coreg_tstat.inputs.apply_xfm = True w = Workflow('temporalpatterns_7T') w_preproc = create_workflow_preproc_spm() w_spatialobject = create_workflow_temporalpatterns_fsl() w_coreg = create_workflow_coreg_epi2t1w() w.connect(input_node, 'bold', w_preproc, 'input.bold') w.connect(input_node, 'events', w_spatialobject, 'input.events') w.connect(input_node, 't2star_fov', w_coreg, 'input.t2star_fov') w.connect(input_node, 't2star_whole', w_coreg, 'input.t2star_whole') w.connect(input_node, 't1w', w_coreg, 'input.t1w') w.connect(input_node, 't1w', coreg_tstat, 'reference') w.connect(w_preproc, 'realign.realigned_files', w_spatialobject, 'input.bold') w.connect(w_preproc, 'realign.mean_image', w_coreg, 'input.bold_mean') w.connect(w_spatialobject, 'output.T_image', coreg_tstat, 'in_file') w.connect(w_coreg, 'output.mat_epi2t1w', coreg_tstat, 'in_matrix_file') return w
def test_serial_input(tmpdir): tmpdir.chdir() wd = os.getcwd() from nipype import MapNode, Function, Workflow def func1(in1): return in1 n1 = MapNode(Function(input_names=['in1'], output_names=['out'], function=func1), iterfield=['in1'], name='n1') n1.inputs.in1 = [1, 2, 3] w1 = Workflow(name='test') w1.base_dir = wd w1.add_nodes([n1]) # set local check w1.config['execution'] = {'stop_on_first_crash': 'true', 'local_hash_check': 'true', 'crashdump_dir': wd, 'poll_sleep_duration': 2} # test output of num_subnodes method when serial is default (False) assert n1.num_subnodes() == len(n1.inputs.in1) # test running the workflow on default conditions w1.run(plugin='MultiProc') # test output of num_subnodes method when serial is True n1._serial = True assert n1.num_subnodes() == 1 # test running the workflow on serial conditions w1.run(plugin='MultiProc')
def create_filtering_workflow(name="filter", hpf_cutoff=128, TR=2, output_name="timeseries"): """Scale and high-pass filter the timeseries.""" inputnode = Node(IdentityInterface(["timeseries", "mask_file"]), "inputs") # Grand-median scale within the brain mask scale = MapNode(ScaleTimeseries(statistic="median", target=10000), ["in_file", "mask_file"], "scale") # Gaussian running-line filter if hpf_cutoff is None: hpf_sigma = -1 else: hpf_sigma = (hpf_cutoff / 2.0) / TR filter = MapNode(fsl.TemporalFilter(highpass_sigma=hpf_sigma), "in_file", "filter") # Possibly replace the mean # (In later versions of FSL, the highpass filter removes the # mean component. Put it back, but be flexible so this isn't # broken on older versions of FSL). replacemean = MapNode(ReplaceMean(output_name=output_name), ["orig_file", "filtered_file"], "replacemean") # Compute a final mean functional volume meanfunc = MapNode(fsl.MeanImage(out_file="mean_func.nii.gz"), "in_file", "meanfunc") outputnode = Node(IdentityInterface(["timeseries", "mean_file"]), "outputs") filtering = Workflow(name) filtering.connect([ (inputnode, scale, [("timeseries", "in_file"), ("mask_file", "mask_file")]), (scale, filter, [("out_file", "in_file")]), (scale, replacemean, [("out_file", "orig_file")]), (filter, replacemean, [("out_file", "filtered_file")]), (replacemean, meanfunc, [("out_file", "in_file")]), (replacemean, outputnode, [("out_file", "timeseries")]), (meanfunc, outputnode, [("out_file", "mean_file")]), ]) return filtering
def create_segments_2func_workflow(threshold=0.5, name='segments_2func_workflow'): segments_2func_workflow = Workflow(name=name) # Input Node inputspec = Node( utility.IdentityInterface(fields=['segments', 'premat', 'func_file']), name='inputspec') # Calculate inverse matrix of EPI to T1 anat_2func_matrix = Node(fsl.ConvertXFM(invert_xfm=True), name='anat_2func_matrix') # Transform segments to EPI space segments_2func_apply = MapNode(fsl.ApplyXFM(), iterfield=['in_file'], name='segments_2func_apply') # Threshold segments segments_threshold = MapNode( fsl.ImageMaths(op_string='-thr {0} -bin'.format(threshold)), iterfield=['in_file'], name='segments_threshold') # Output Node outputspec = Node(utility.IdentityInterface( fields=['segments_2func_files', 'anat_2func_matrix_file']), name='outputspec') segments_2func_workflow.connect(inputspec, 'premat', anat_2func_matrix, 'in_file') segments_2func_workflow.connect(inputspec, 'segments', segments_2func_apply, 'in_file') segments_2func_workflow.connect(inputspec, 'func_file', segments_2func_apply, 'reference') segments_2func_workflow.connect(anat_2func_matrix, 'out_file', segments_2func_apply, 'in_matrix_file') segments_2func_workflow.connect(segments_2func_apply, 'out_file', segments_threshold, 'in_file') segments_2func_workflow.connect(anat_2func_matrix, 'out_file', outputspec, 'anat_2func_matrix_file') segments_2func_workflow.connect(segments_threshold, 'out_file', outputspec, 'segments_2func_files') return segments_2func_workflow
def create_nonbrain_meansignal(name='nonbrain_meansignal'): nonbrain_meansignal = Workflow(name=name) inputspec = Node(utility.IdentityInterface(fields=['func_file']), name='inputspec') # Split raw 4D functional image into 3D niftis split_image = Node(fsl.Split(dimension='t', output_type='NIFTI'), name='split_image') # Create a brain mask for each of the 3D images brain_mask = MapNode(fsl.BET(frac=0.3, mask=True, no_output=True, robust=True), iterfield=['in_file'], name='brain_mask') # Merge the 3D masks into a 4D nifti (producing a separate mask per volume) merge_mask = Node(fsl.Merge(dimension='t'), name='merge_mask') # Reverse the 4D brain mask, to produce a 4D non brain mask reverse_mask = Node(fsl.ImageMaths(op_string='-sub 1 -mul -1'), name='reverse_mask') # Apply the mask on the raw functional data apply_mask = Node(fsl.ImageMaths(), name='apply_mask') # Highpass filter the non brain image highpass = create_highpass_filter(name='highpass') # Extract the mean signal from the non brain image mean_signal = Node(fsl.ImageMeants(), name='mean_signal') outputspec = Node(utility.IdentityInterface(fields=['nonbrain_regressor']), name='outputspec') nonbrain_meansignal.connect(inputspec, 'func_file', split_image, 'in_file') nonbrain_meansignal.connect(split_image, 'out_files', brain_mask, 'in_file') nonbrain_meansignal.connect(brain_mask, 'mask_file', merge_mask, 'in_files') nonbrain_meansignal.connect(merge_mask, 'merged_file', reverse_mask, 'in_file') nonbrain_meansignal.connect(reverse_mask, 'out_file', apply_mask, 'mask_file') nonbrain_meansignal.connect(inputspec, 'func_file', apply_mask, 'in_file') nonbrain_meansignal.connect(apply_mask, 'out_file', highpass, 'inputspec.in_file') nonbrain_meansignal.connect(highpass, 'outputspec.filtered_file', mean_signal, 'in_file') nonbrain_meansignal.connect(mean_signal, 'out_file', outputspec, 'nonbrain_regressor') return nonbrain_meansignal
def test_serial_input(): cwd = os.getcwd() wd = mkdtemp() os.chdir(wd) from nipype import MapNode, Function, Workflow def func1(in1): return in1 n1 = MapNode(Function(input_names=['in1'], output_names=['out'], function=func1), iterfield=['in1'], name='n1') n1.inputs.in1 = [1, 2, 3] w1 = Workflow(name='test') w1.base_dir = wd w1.add_nodes([n1]) # set local check w1.config['execution'] = { 'stop_on_first_crash': 'true', 'local_hash_check': 'true', 'crashdump_dir': wd, 'poll_sleep_duration': 2 } # test output of num_subnodes method when serial is default (False) yield assert_equal, n1.num_subnodes(), len(n1.inputs.in1) # test running the workflow on default conditions error_raised = False try: w1.run(plugin='MultiProc') except Exception as e: from nipype.pipeline.engine.base import logger logger.info('Exception: %s' % str(e)) error_raised = True yield assert_false, error_raised # test output of num_subnodes method when serial is True n1._serial = True yield assert_equal, n1.num_subnodes(), 1 # test running the workflow on serial conditions error_raised = False try: w1.run(plugin='MultiProc') except Exception as e: from nipype.pipeline.engine.base import logger logger.info('Exception: %s' % str(e)) error_raised = True yield assert_false, error_raised os.chdir(cwd) rmtree(wd)
def create_realignment_workflow(name="realignment"): """Motion and slice-time correct the timeseries and summarize.""" inputnode = Node(IdentityInterface(["timeseries"]), "inputs") # Get the middle volume of each run for motion correction extractref = MapNode(ExtractRealignmentTarget(), "in_file", "extractref") # Motion correct to middle volume of each run mcflirt = MapNode( fsl.MCFLIRT(cost="normcorr", interpolation="spline", save_mats=True, save_rms=True, save_plots=True), ["in_file", "ref_file"], "mcflirt") # Generate a report on the motion correction mcreport = MapNode(RealignmentReport(), ["target_file", "realign_params", "displace_params"], "mcreport") # Define the outputs outputnode = Node( IdentityInterface( ["timeseries", "example_func", "report", "motion_file"]), "outputs") # Define and connect the sub workflow realignment = Workflow(name) realignment.connect([ (inputnode, extractref, [("timeseries", "in_file")]), (inputnode, mcflirt, [("timeseries", "in_file")]), (extractref, mcflirt, [("out_file", "ref_file")]), (extractref, mcreport, [("out_file", "target_file")]), (mcflirt, mcreport, [("par_file", "realign_params"), ("rms_files", "displace_params")]), (mcflirt, outputnode, [("out_file", "timeseries")]), (extractref, outputnode, [("out_file", "example_func")]), (mcreport, outputnode, [("realign_report", "report"), ("motion_file", "motion_file")]), ]) return realignment
def create_unwarp_workflow(name="unwarp", fieldmap_pe=("y", "y-")): """Unwarp functional timeseries using reverse phase-blipped images.""" inputnode = Node(IdentityInterface(["timeseries", "fieldmap"]), "inputs") # Calculate the shift field # Note that setting readout_times to 1 will give a fine # map of the field, but the units will be off # Since we don't write out the map of the field itself, it does # not seem worth it to add another parameter for the readout times. # (It does require that they are the same, but when wouldn't they be?) topup = MapNode( fsl.TOPUP(encoding_direction=fieldmap_pe, readout_times=[1] * len(fieldmap_pe)), ["in_file"], "topup") # Unwarp the timeseries applytopup = MapNode(fsl.ApplyTOPUP(method="jac", in_index=[ 1 ]), ["in_files", "in_topup_fieldcoef", "in_topup_movpar", "encoding_file"], "applytopup") # Make a figure summarize the unwarping report = MapNode(UnwarpReport(), ["orig_file", "corrected_file"], "unwarp_report") # Define the outputs outputnode = Node(IdentityInterface(["timeseries", "report"]), "outputs") # Define and connect the workflow unwarp = Workflow(name) unwarp.connect([ (inputnode, topup, [("fieldmap", "in_file")]), (inputnode, applytopup, [("timeseries", "in_files")]), (topup, applytopup, [("out_fieldcoef", "in_topup_fieldcoef"), ("out_movpar", "in_topup_movpar"), ("out_enc_file", "encoding_file")]), (inputnode, report, [("fieldmap", "orig_file")]), (topup, report, [("out_corrected", "corrected_file")]), (applytopup, outputnode, [("out_corrected", "timeseries")]), (report, outputnode, [("out_file", "report")]), ]) return unwarp
def test_mapnode_iterfield_type(x_inp, f_exp): from nipype import MapNode, Function def double_func(x): return 2 * x double = Function(["x"], ["f_x"], double_func) double_node = MapNode(double, name="double", iterfield=["x"]) double_node.inputs.x = x_inp res = double_node.run() assert res.outputs.f_x == f_exp
def create_confound_extraction_workflow(name="confounds", wm_components=6): """Extract nuisance variables from anatomical sources.""" inputnode = Node( IdentityInterface( ["timeseries", "brain_mask", "reg_file", "subject_id"]), "inputs") # Find the subject's Freesurfer segmentation # Grab the Freesurfer aparc+aseg file as an anatomical brain mask getaseg = Node( io.SelectFiles({"aseg": "{subject_id}/mri/aseg.mgz"}, base_directory=os.environ["SUBJECTS_DIR"]), "getaseg") # Select and erode the white matter to get deep voxels selectwm = Node(fs.Binarize(erode=3, wm=True), "selectwm") # Transform the mask into functional space transform = MapNode(fs.ApplyVolTransform(inverse=True, interp="nearest"), ["reg_file", "source_file"], "transform") # Extract eigenvariates of the timeseries from WM and whole brain extract = MapNode(ExtractConfounds(n_components=wm_components), ["timeseries", "brain_mask", "wm_mask"], "extract") outputnode = Node(IdentityInterface(["confound_file"]), "outputs") confounds = Workflow(name) confounds.connect([ (inputnode, getaseg, [("subject_id", "subject_id")]), (getaseg, selectwm, [("aseg", "in_file")]), (selectwm, transform, [("binary_file", "target_file")]), (inputnode, transform, [("reg_file", "reg_file"), ("timeseries", "source_file")]), (transform, extract, [("transformed_file", "wm_mask")]), (inputnode, extract, [("timeseries", "timeseries"), ("brain_mask", "brain_mask")]), (extract, outputnode, [("out_file", "confound_file")]), ]) return confounds
def make_func_mask_workflow(name='funcmask', base_dir=None): brainmask = Workflow(name=name, base_dir=base_dir) inputnode = Node(utility.IdentityInterface(fields=['mean_file']), name='inputnode') outputnode = Node(utility.IdentityInterface(fields=['masked_file', 'mask']), name='outputnode') skullstrip1 = MapNode(fsl.BET(frac=0.2, mask=True, output_type='NIFTI_GZ'), name='skullstrip_first_pass', iterfield=['in_file']) brainmask.connect(inputnode, 'mean_file', skullstrip1, 'in_file') skullstrip2 = MapNode(afni.Automask(dilate=1, outputtype='NIFTI_GZ'), name='skullstrip_second_pass', iterfield=['in_file']) brainmask.connect(skullstrip1, 'out_file', skullstrip2, 'in_file') combine_masks = MapNode(fsl.BinaryMaths(operation='mul'), name='combine_masks', iterfield=['in_file', 'operand_file']) brainmask.connect(skullstrip1, 'mask_file', combine_masks, 'in_file') brainmask.connect(skullstrip2, 'out_file', combine_masks, 'operand_file') apply_mask = MapNode(fsl.ApplyMask(), name='apply_mask', iterfield=['in_file', 'mask_file']) brainmask.connect(inputnode, 'mean_file', apply_mask, 'in_file') brainmask.connect(combine_masks, 'out_file', apply_mask, 'mask_file') brainmask.connect(apply_mask, 'out_file', outputnode, 'masked_file') brainmask.connect(combine_masks, 'out_file', outputnode, 'mask') return brainmask
def make_simple_workflow(): wf = Workflow(name="test") node1 = Node(IdentityInterface(fields=["foo"]), name="node1") node2 = MapNode(IdentityInterface(fields=["foo"]), name="node2", iterfield=["foo"]) node3 = Node(IdentityInterface(fields=["foo"]), name="node3") wf.connect([ (node1, node2, [("foo", "foo")]), (node2, node3, [("foo", "foo")]), ]) return wf, node1, node2, node3
def test_mapnode_expansion(tmpdir): tmpdir.chdir() from nipype import MapNode, Function def func1(in1): return in1 + 1 mapnode = MapNode( Function(function=func1), iterfield="in1", name="mapnode", n_procs=2, mem_gb=2 ) mapnode.inputs.in1 = [1, 2] for idx, node in mapnode._make_nodes(): for attr in ("overwrite", "run_without_submitting", "plugin_args"): assert getattr(node, attr) == getattr(mapnode, attr) for attr in ("_n_procs", "_mem_gb"): assert getattr(node, attr) == getattr(mapnode, attr)
def test_mapnode_json(): """Tests that mapnodes don't generate excess jsons """ cwd = os.getcwd() wd = mkdtemp() os.chdir(wd) from nipype import MapNode, Function, Workflow def func1(in1): return in1 + 1 n1 = MapNode(Function(input_names=['in1'], output_names=['out'], function=func1), iterfield=['in1'], name='n1') n1.inputs.in1 = [1] w1 = Workflow(name='test') w1.base_dir = wd w1.config['execution']['crashdump_dir'] = wd w1.add_nodes([n1]) w1.run() n1.inputs.in1 = [2] w1.run() # should rerun n1.inputs.in1 = [1] eg = w1.run() node = eg.nodes()[0] outjson = glob(os.path.join(node.output_dir(), '_0x*.json')) yield assert_equal, len(outjson), 1 # check that multiple json's don't trigger rerun with open(os.path.join(node.output_dir(), 'test.json'), 'wt') as fp: fp.write('dummy file') w1.config['execution'].update(**{'stop_on_first_rerun': True}) error_raised = False try: w1.run() except: error_raised = True yield assert_false, error_raised os.chdir(cwd) rmtree(wd)
def test_mapnode_expansion(tmpdir): tmpdir.chdir() from nipype import MapNode, Function def func1(in1): return in1 + 1 mapnode = MapNode(Function(function=func1), iterfield='in1', name='mapnode', n_procs=2, mem_gb=2) mapnode.inputs.in1 = [1, 2] for idx, node in mapnode._make_nodes(): for attr in ('overwrite', 'run_without_submitting', 'plugin_args'): assert getattr(node, attr) == getattr(mapnode, attr) for attr in ('_n_procs', '_mem_gb'): assert (getattr(node, attr) == getattr(mapnode, attr))
def computed_avg_node(node_name, nnodes, work_dir, chunk=None, delay=0, benchmark_dir=None, benchmark=False, cli=False, avg=None): files = get_partitions(chunk, nnodes) if delay is None: delay = 0 ca_name = 'ca1_{0}'.format(node_name) ca2_name = 'ca2_{0}'.format(node_name) ca_1 = MapNode(Function(input_names=[ 'chunk', 'delay', 'benchmark', 'benchmark_dir', 'cli', 'wf_name', 'avg', 'work_dir' ], output_names=['inc_chunk'], function=increment_wf), name=ca_name, iterfield='chunk') ca_1.inputs.chunk = files ca_1.inputs.delay = delay ca_1.inputs.benchmark = benchmark ca_1.inputs.benchmark_dir = benchmark_dir ca_1.inputs.cli = cli ca_1.inputs.wf_name = 'incwf_{}'.format(ca_name) ca_1.inputs.avg = avg ca_1.inputs.work_dir = work_dir ca_2 = Node(Function(input_names=['chunks', 'benchmark', 'benchmark_dir'], output_names=['avg_chunk'], function=compute_avg), name=ca2_name) ca_2.inputs.benchmark = benchmark ca_2.inputs.benchmark_dir = benchmark_dir return ca_1, ca_2
def test_mapnode_expansion(tmpdir): os.chdir(str(tmpdir)) from nipype import MapNode, Function def func1(in1): return in1 + 1 mapnode = MapNode(Function(function=func1), iterfield='in1', name='mapnode') mapnode.inputs.in1 = [1, 2] mapnode.interface.num_threads = 2 mapnode.interface.estimated_memory_gb = 2 for idx, node in mapnode._make_nodes(): for attr in ('overwrite', 'run_without_submitting', 'plugin_args'): assert getattr(node, attr) == getattr(mapnode, attr) for attr in ('num_threads', 'estimated_memory_gb'): assert (getattr(node._interface, attr) == getattr(mapnode._interface, attr))
def cluster_save(nname, chunks, output_dir, it, benchmark, benchmark_dir, nnodes, work_dir): files = get_partitions(chunks, nnodes) sp_name = 'sp_{}'.format(nname) sp = MapNode(Function(input_names=[ 'input_img', 'output_dir', 'it', 'benchmark', 'benchmark_dir', 'work_dir' ], output_names=['output_filename'], function=save_wf), name=sp_name, iterfield='input_img') sp.inputs.input_img = files sp.inputs.output_dir = output_dir sp.inputs.it = it sp.inputs.benchmark = benchmark sp.inputs.benchmark_dir = benchmark_dir sp.inputs.work_dir = work_dir return sp
def test_serial_input(tmpdir): tmpdir.chdir() wd = os.getcwd() from nipype import MapNode, Function, Workflow def func1(in1): return in1 n1 = MapNode( Function(input_names=["in1"], output_names=["out"], function=func1), iterfield=["in1"], name="n1", ) n1.inputs.in1 = [1, 2, 3] w1 = Workflow(name="test") w1.base_dir = wd w1.add_nodes([n1]) # set local check w1.config["execution"] = { "stop_on_first_crash": "true", "local_hash_check": "true", "crashdump_dir": wd, "poll_sleep_duration": 2, } # test output of num_subnodes method when serial is default (False) assert n1.num_subnodes() == len(n1.inputs.in1) # test running the workflow on default conditions w1.run(plugin="MultiProc") # test output of num_subnodes method when serial is True n1._serial = True assert n1.num_subnodes() == 1 # test running the workflow on serial conditions w1.run(plugin="MultiProc")
def test_mapnode_json(tmpdir): """Tests that mapnodes don't generate excess jsons """ tmpdir.chdir() wd = os.getcwd() from nipype import MapNode, Function, Workflow def func1(in1): return in1 + 1 n1 = MapNode( Function(input_names=["in1"], output_names=["out"], function=func1), iterfield=["in1"], name="n1", ) n1.inputs.in1 = [1] w1 = Workflow(name="test") w1.base_dir = wd w1.config["execution"]["crashdump_dir"] = wd w1.add_nodes([n1]) w1.run() n1.inputs.in1 = [2] w1.run() # should rerun n1.inputs.in1 = [1] eg = w1.run() node = list(eg.nodes())[0] outjson = glob(os.path.join(node.output_dir(), "_0x*.json")) assert len(outjson) == 1 # check that multiple json's don't trigger rerun with open(os.path.join(node.output_dir(), "test.json"), "wt") as fp: fp.write("dummy file") w1.config["execution"].update(**{"stop_on_first_rerun": True}) w1.run()