def create_connectome(name='connectome'): wf = pe.Workflow(name=name) inputspec = pe.Node( util.IdentityInterface(fields=['time_series', 'method']), name='inputspec') outputspec = pe.Node(util.IdentityInterface(fields=[ 'connectome', ]), name='outputspec') node = pe.Node(Function(input_names=['time_series', 'method'], output_names=['connectome'], function=compute_correlation, as_module=True), name='connectome') wf.connect([ (inputspec, node, [('time_series', 'time_series')]), (inputspec, node, [('method', 'method')]), (node, outputspec, [('connectome', 'connectome')]), ]) return wf
def create_qc_skullstrip(wf_name='qc_skullstrip'): wf = pe.Workflow(name=wf_name) input_node = pe.Node(util.IdentityInterface( fields=['anatomical_brain', 'anatomical_reorient']), name='inputspec') output_node = pe.Node( util.IdentityInterface(fields=['axial_image', 'sagittal_image']), name='outputspec') skull_edge = pe.Node(Function(input_names=['in_file'], output_names=['out_file'], function=afni_Edge3, as_module=True), name='skull_edge') montage_skull = create_montage('montage_skull', 'red', 'skull_vis') wf.connect(input_node, 'anatomical_reorient', skull_edge, 'in_file') wf.connect(input_node, 'anatomical_brain', montage_skull, 'inputspec.underlay') wf.connect(skull_edge, 'out_file', montage_skull, 'inputspec.overlay') wf.connect(montage_skull, 'outputspec.axial_png', output_node, 'axial_image') wf.connect(montage_skull, 'outputspec.sagittal_png', output_node, 'sagittal_image') return wf
def test_function_str(): f = pe.Node(Function(input_names=['scan', 'rest_dict', 'resource'], output_names=['file_path'], function=get_rest), name='get_rest') f.inputs.set(resource=resource, rest_dict=rest_dict, scan=scan) results = f.run() assert rest_dict['rest_acq-1_run-1']['scan'] == results.outputs.file_path
def qc_T1w_standard(wf, cfg, strat_pool, pipe_num, opt=None): ''' {"name": "qc_brain_extraction", "config": ["pipeline_setup", "output_directory"], "switch": ["generate_quality_control_images"], "option_key": "None", "option_val": "None", "inputs": ["space-template_desc-brain_T1w", "T1w_brain_template"], "outputs": ["space-template_desc-brain_T1w-axial-qc", "space-template_desc-brain_T1w-sagittal-qc"]} ''' # make QC montages for mni normalized anatomical image montage_mni_anat = create_montage(f'montage_mni_anat_{pipe_num}', 'red', 'mni_anat', mapnode=False) node, out = strat_pool.get_data('space-template_desc-brain_T1w') wf.connect(node, out, montage_mni_anat, 'inputspec.underlay') anat_template_edge = pe.Node(Function(input_names=['in_file'], output_names=['out_file'], function=afni_Edge3, as_module=True), name=f'anat_template_edge_{pipe_num}') node, out = strat_pool.get_data('T1w_brain_template') wf.connect(node, out, anat_template_edge, 'in_file') wf.connect(anat_template_edge, 'out_file', montage_mni_anat, 'inputspec.overlay') outputs = { 'space-template_desc-brain_T1w-axial-qc': (montage_mni_anat, 'outputspec.axial_png'), 'space-template_desc-brain_T1w-sagittal-qc': (montage_mni_anat, 'outputspec.sagittal_png') } return (wf, outputs)
def qc_bold_registration(wf, cfg, strat_pool, pipe_num, opt=None): ''' {"name": "qc_bold_registration", "config": ["pipeline_setup", "output_directory"], "switch": ["generate_quality_control_images"], "option_key": "None", "option_val": "None", "inputs": ["space-template_desc-mean_bold", "T1w_brain_template_funcreg"], "outputs": ["space-template_desc-mean_bold-axial-qc", "space-template_desc-mean_bold-sagittal-qc"]} ''' # make QC montage for Mean Functional in MNI with MNI edge montage_mfi = create_montage(f'montage_mfi_{pipe_num}', 'red', 'MNI_edge_on_mean_func_mni', mapnode=False) node, out = strat_pool.get_data('space-template_desc-mean_bold') wf.connect(node, out, montage_mfi, 'inputspec.underlay') func_template_edge = pe.Node(Function(input_names=['in_file'], output_names=['out_file'], function=afni_Edge3, as_module=True), name=f'func_template_edge_{pipe_num}') node, out = strat_pool.get_data("T1w_brain_template_funcreg") wf.connect(node, out, func_template_edge, 'in_file') wf.connect(func_template_edge, 'out_file', montage_mfi, 'inputspec.overlay') outputs = { 'space-template_desc-mean_bold-axial-qc': (montage_mfi, 'outputspec.axial_png'), 'space-template_desc-mean_bold-sagittal-qc': (montage_mfi, 'outputspec.sagittal_png') } return (wf, outputs)
def qc_coregistration(wf, cfg, strat_pool, pipe_num, opt=None): ''' {"name": "qc_coregistration", "config": ["pipeline_setup", "output_directory"], "switch": ["generate_quality_control_images"], "option_key": "None", "option_val": "None", "inputs": [("desc-brain_T1w", "space-T1w_desc-mean_bold")], "outputs": ["space-T1w_desc-mean_bold-axial-qc", "space-T1w_desc-mean_bold-sagittal-qc"]} ''' # make QC montage for Mean Functional in T1 with T1 edge anat_edge = pe.Node(Function(input_names=['in_file'], output_names=['out_file'], function=afni_Edge3, as_module=True), name=f'anat_edge_{pipe_num}') node, out = strat_pool.get_data('desc-brain_T1w') wf.connect(node, out, anat_edge, 'in_file') montage_anat = create_montage(f'montage_anat_{pipe_num}', 'red', 't1_edge_on_mean_func_in_t1', mapnode=False) wf.connect(anat_edge, 'out_file', montage_anat, 'inputspec.overlay') node, out = strat_pool.get_data('space-T1w_desc-mean_bold') wf.connect(node, out, montage_anat, 'inputspec.underlay') outputs = { 'space-T1w_desc-mean_bold-axial-qc': (montage_anat, 'outputspec.axial_png'), 'space-T1w_desc-mean_bold-sagittal-qc': (montage_anat, 'outputspec.sagittal_png') } return (wf, outputs)
def test_iterable_selector(): selector_test = yaml.load(selector)['Regressors'] nuisance_wf = pe.Workflow(name='iterable_selector') nuisance_wf.base_dir = '/tmp/iter_working_dir' try: import shutil shutil.rmtree(nuisance_wf.base_dir) except: pass inputspec = pe.Node(util.IdentityInterface(fields=['selector']), name='inputspec') summarize_timeseries_node = pe.Node(Function( input_names=['selector'], output_names=['residual_file_path', 'regressors_file_path'], function=summarize_timeseries, as_module=True, ), name='summarize_timeseries') outputspec = pe.Node(util.IdentityInterface( fields=['residual_file_path', 'regressors_file_path']), name='outputspec') nuisance_wf.connect(inputspec, 'selector', summarize_timeseries_node, 'selector') nuisance_wf.connect(summarize_timeseries_node, 'residual_file_path', outputspec, 'residual_file_path') nuisance_wf.connect(summarize_timeseries_node, 'regressors_file_path', outputspec, 'regressors_file_path') nuisance_wf.get_node('inputspec').iterables = (('selector', [ NuisanceRegressor(s) for s in selector_test ])) nuisance_wf.run()
def create_qc_fd(wf_name='qc_fd'): wf = pe.Workflow(name=wf_name) input_node = pe.Node( util.IdentityInterface(fields=['fd', 'excluded_volumes']), name='inputspec') output_node = pe.Node(util.IdentityInterface(fields=['fd_histogram_plot']), name='outputspec') fd_plot = pe.Node(Function(input_names=['arr', 'measure', 'ex_vol'], output_names=['hist_path'], function=gen_plot_png, as_module=True), name='fd_plot') fd_plot.inputs.measure = 'FD' wf.connect(input_node, 'fd', fd_plot, 'arr') wf.connect(input_node, 'excluded_volumes', fd_plot, 'ex_vol') wf.connect(fd_plot, 'hist_path', output_node, 'fd_histogram_plot') return wf
def create_qc_motion(wf_name='qc_motion'): wf = pe.Workflow(name=wf_name) input_node = pe.Node(util.IdentityInterface(fields=['motion_parameters']), name='inputspec') output_node = pe.Node(util.IdentityInterface( fields=['motion_translation_plot', 'motion_rotation_plot']), name='outputspec') mov_plot = pe.Node(Function( input_names=['motion_parameters'], output_names=['translation_plot', 'rotation_plot'], function=gen_motion_plt, as_module=True), name='motion_plot') wf.connect(input_node, 'motion_parameters', mov_plot, 'motion_parameters') wf.connect(mov_plot, 'translation_plot', output_node, 'motion_translation_plot') wf.connect(mov_plot, 'rotation_plot', output_node, 'motion_rotation_plot') return wf
def create_qc_snr(wf_name='qc_snr'): wf = pe.Workflow(name=wf_name) input_node = pe.Node(util.IdentityInterface(fields=[ 'functional_preprocessed', 'functional_brain_mask', 'functional_to_anat_linear_xfm', 'anatomical_brain', 'mean_functional_in_anat' ]), name='inputspec') output_node = pe.Node(util.IdentityInterface(fields=[ 'snr_axial_image', 'snr_sagittal_image', 'snr_histogram_image', 'snr_mean' ]), name='outputspec') std_dev = pe.Node(afni.TStat(args='-stdev'), name='std_dev') std_dev.inputs.outputtype = 'NIFTI_GZ' wf.connect(input_node, 'functional_preprocessed', std_dev, 'in_file') wf.connect(input_node, 'functional_brain_mask', std_dev, 'mask') std_dev_anat = pe.Node(fsl.ApplyWarp(interp='trilinear'), name='std_dev_anat') wf.connect(input_node, 'functional_to_anat_linear_xfm', std_dev_anat, 'premat') wf.connect(std_dev, 'out_file', std_dev_anat, 'in_file') wf.connect(input_node, 'anatomical_brain', std_dev_anat, 'ref_file') snr = pe.Node(afni.Calc(expr='b/a'), name='snr') snr.inputs.outputtype = 'NIFTI_GZ' wf.connect(input_node, 'mean_functional_in_anat', snr, 'in_file_b') wf.connect(std_dev_anat, 'out_file', snr, 'in_file_a') snr_val = pe.Node(Function(input_names=['measure_file'], output_names=['snr_storefl'], function=cal_snr_val, as_module=True), name='snr_val') wf.connect(snr, 'out_file', snr_val, 'measure_file') hist_snr = pe.Node(Function(input_names=['measure_file', 'measure'], output_names=['hist_path'], function=gen_histogram, as_module=True), name='hist_snr') hist_snr.inputs.measure = 'snr' wf.connect(snr, 'out_file', hist_snr, 'measure_file') snr_drop_percent = pe.Node(Function( input_names=['measure_file', 'percent'], output_names=['modified_measure_file'], function=drop_percent, as_module=True), name='dp_snr') snr_drop_percent.inputs.percent = 99 wf.connect(snr, 'out_file', snr_drop_percent, 'measure_file') montage_snr = create_montage('montage_snr', 'red_to_blue', 'snr') wf.connect(snr_drop_percent, 'modified_measure_file', montage_snr, 'inputspec.overlay') wf.connect(input_node, 'anatomical_brain', montage_snr, 'inputspec.underlay') wf.connect(montage_snr, 'outputspec.axial_png', output_node, 'snr_axial_image') wf.connect(montage_snr, 'outputspec.sagittal_png', output_node, 'snr_sagittal_image') wf.connect(hist_snr, 'hist_path', output_node, 'snr_histogram_image') wf.connect(snr_val, 'snr_storefl', output_node, 'snr_mean') return wf
def create_montage(wf_name, cbar_name, png_name): wf = pe.Workflow(name=wf_name) inputnode = pe.Node(util.IdentityInterface(fields=['underlay', 'overlay']), name='inputspec') outputnode = pe.Node(util.IdentityInterface(fields=[ 'axial_png', 'sagittal_png', 'resampled_underlay', 'resampled_overlay' ]), name='outputspec') # node for resampling create_montage images to 1mm for QC pages resample_u = pe.Node(Function(input_names=['file_'], output_names=['new_fname'], function=resample_1mm, as_module=True), name='resample_u') wf.connect(inputnode, 'underlay', resample_u, 'file_') wf.connect(resample_u, 'new_fname', outputnode, 'resampled_underlay') # same for overlays (resampling to 1mm) resample_o = pe.Node(Function(input_names=['file_'], output_names=['new_fname'], function=resample_1mm, as_module=True), name='resample_o') wf.connect(inputnode, 'overlay', resample_o, 'file_') wf.connect(resample_o, 'new_fname', outputnode, 'resampled_overlay') # node for axial montages montage_a = pe.MapNode(Function( input_names=['overlay', 'underlay', 'png_name', 'cbar_name'], output_names=['png_name'], function=montage_axial, as_module=True), name='montage_a', iterfield=['overlay']) montage_a.inputs.cbar_name = cbar_name montage_a.inputs.png_name = png_name + '_a.png' wf.connect(resample_u, 'new_fname', montage_a, 'underlay') wf.connect(resample_o, 'new_fname', montage_a, 'overlay') # node for sagittal montages montage_s = pe.MapNode(Function( input_names=['overlay', 'underlay', 'png_name', 'cbar_name'], output_names=['png_name'], function=montage_sagittal, as_module=True), name='montage_s', iterfield=['overlay']) montage_s.inputs.cbar_name = cbar_name montage_s.inputs.png_name = png_name + '_s.png' wf.connect(resample_u, 'new_fname', montage_s, 'underlay') wf.connect(resample_o, 'new_fname', montage_s, 'overlay') wf.connect(montage_a, 'png_name', outputnode, 'axial_png') wf.connect(montage_s, 'png_name', outputnode, 'sagittal_png') return wf
def qa_montages(workflow, c, strat, num_strat, qc_montage_id_a, qc_montage_id_s, qc_hist_id, measure, idx): try: overlay, out_file = strat[measure] overlay_drop_percent = pe.MapNode( Function(input_names=['measure_file', 'percent'], output_names=['modified_measure_file'], function=drop_percent, as_module=True), name='dp_%s_%d' % (measure, num_strat), iterfield=['measure_file']) overlay_drop_percent.inputs.percent = 99.999 workflow.connect(overlay, out_file, overlay_drop_percent, 'measure_file') montage = create_montage('montage_%s_%d' % (measure, num_strat), 'cyan_to_yellow', measure) montage.inputs.inputspec.underlay = c.template_brain_only_for_func workflow.connect(overlay_drop_percent, 'modified_measure_file', montage, 'inputspec.overlay') if 'centrality' in measure: histogram = pe.MapNode( Function(input_names=['measure_file', 'measure'], output_names=['hist_path'], function=gen_histogram, as_module=True), name='hist_{0}_{1}'.format(measure, num_strat), iterfield=['measure_file']) else: histogram = pe.Node(Function( input_names=['measure_file', 'measure'], output_names=['hist_path'], function=gen_histogram, as_module=True), name='hist_{0}_{1}'.format(measure, num_strat)) histogram.inputs.measure = measure workflow.connect(overlay, out_file, histogram, 'measure_file') strat.update_resource_pool({ 'qc___%s_a' % measure: (montage, 'outputspec.axial_png'), 'qc___%s_s' % measure: (montage, 'outputspec.sagittal_png'), 'qc___%s_hist' % measure: (histogram, 'hist_path') }) if not idx in qc_montage_id_a: qc_montage_id_a[idx] = '%s_a' % measure qc_montage_id_s[idx] = '%s_s' % measure qc_hist_id[idx] = '%s_hist' % measure except Exception as e: print "[!] Connection of QA montages workflow for %s " \ "has failed.\n" % measure print "Error: %s" % e pass
def create_qc_workflow(workflow, c, strategies, qc_outputs): qc_montage_id_a = {} qc_montage_id_s = {} qc_plot_id = {} qc_hist_id = {} for num_strat, strat in enumerate(strategies): nodes = strat.get_nodes_names() if 'functional_preprocessed' in strat: preproc, out_file = strat['functional_preprocessed'] brain_mask, mask_file = strat['functional_brain_mask'] func_to_anat_xfm, xfm_file = strat['functional_to_anat_linear_xfm'] anat_ref, ref_file = strat['anatomical_brain'] mfa, mfa_file = strat['mean_functional_in_anat'] # make SNR plot qc_workflow = create_qc_snr('qc_snr_{0}'.format(num_strat)) workflow.connect(preproc, out_file, qc_workflow, 'inputspec.functional_preprocessed') workflow.connect(brain_mask, mask_file, qc_workflow, 'inputspec.functional_brain_mask') workflow.connect(func_to_anat_xfm, xfm_file, qc_workflow, 'inputspec.functional_to_anat_linear_xfm') workflow.connect(anat_ref, ref_file, qc_workflow, 'inputspec.anatomical_brain') workflow.connect(mfa, mfa_file, qc_workflow, 'inputspec.mean_functional_in_anat') strat.update_resource_pool({ 'qc___snr_a': (qc_workflow, 'outputspec.snr_axial_image'), 'qc___snr_s': (qc_workflow, 'outputspec.snr_sagittal_image'), 'qc___snr_hist': (qc_workflow, 'outputspec.snr_histogram_image'), 'qc___snr_val': (qc_workflow, 'outputspec.snr_mean') }) if not 0 in qc_montage_id_a: qc_montage_id_a[0] = 'snr_a' qc_montage_id_s[0] = 'snr_s' qc_hist_id[0] = 'snr_hist' # make motion parameters plot mov_param, out_file = strat['movement_parameters'] qc_workflow = create_qc_motion('qc_motion_{0}'.format(num_strat)) workflow.connect(mov_param, out_file, qc_workflow, 'inputspec.motion_parameters') strat.update_resource_pool({ 'qc___movement_trans_plot': (qc_workflow, 'outputspec.motion_translation_plot'), 'qc___movement_rot_plot': (qc_workflow, 'outputspec.motion_rotation_plot') }) if not 1 in qc_plot_id: qc_plot_id[1] = 'movement_trans_plot' if not 2 in qc_plot_id: qc_plot_id[2] = 'movement_rot_plot' # make FD plot and volumes removed if ('gen_motion_stats' in nodes or 'gen_motion_stats_before_stc' in nodes) and 1 in c.runNuisance: fd, out_file = strat['frame_wise_displacement_jenkinson'] qc_workflow = create_qc_fd('qc_fd_{0}'.format(num_strat)) workflow.connect(fd, out_file, qc_workflow, 'inputspec.fd') strat.update_resource_pool({ 'qc___fd_plot': (qc_workflow, 'outputspec.fd_histogram_plot') }) if not 3 in qc_plot_id: qc_plot_id[3] = 'fd_plot' # make QC montages for Skull Stripping Visualization anat_underlay, out_file = strat['anatomical_brain'] skull, out_file_s = strat['anatomical_reorient'] qc_workflow = create_qc_skullstrip( 'qc_skullstrip_{0}'.format(num_strat)) workflow.connect(anat_underlay, out_file, qc_workflow, 'inputspec.anatomical_brain') workflow.connect(skull, out_file_s, qc_workflow, 'inputspec.anatomical_reorient') strat.update_resource_pool({ 'qc___skullstrip_vis_a': (qc_workflow, 'outputspec.axial_image'), 'qc___skullstrip_vis_s': (qc_workflow, 'outputspec.sagittal_image') }) if not 4 in qc_montage_id_a: qc_montage_id_a[4] = 'skullstrip_vis_a' qc_montage_id_s[4] = 'skullstrip_vis_s' if 'anatomical_to_standard' in strat: # make QC montages for mni normalized anatomical image mni_anat_underlay, out_file = strat['anatomical_to_standard'] montage_mni_anat = create_montage( 'montage_mni_anat_{0}'.format(num_strat), 'red', 'mni_anat') workflow.connect(mni_anat_underlay, out_file, montage_mni_anat, 'inputspec.underlay') template_brain_for_anat, out_file = strat[ 'template_brain_for_anat'] anat_template_edge = pe.Node( Function(input_names=['in_file'], output_names=['out_file'], function=afni_Edge3, as_module=True), name='anat_template_edge_{0}'.format(num_strat)) workflow.connect(template_brain_for_anat, out_file, anat_template_edge, 'in_file') workflow.connect(anat_template_edge, 'out_file', montage_mni_anat, 'inputspec.overlay') strat.update_resource_pool({ 'qc___mni_normalized_anatomical_a': (montage_mni_anat, 'outputspec.axial_png'), 'qc___mni_normalized_anatomical_s': (montage_mni_anat, 'outputspec.sagittal_png') }) if not 5 in qc_montage_id_a: qc_montage_id_a[5] = 'mni_normalized_anatomical_a' qc_montage_id_s[5] = 'mni_normalized_anatomical_s' # make QC montages for CSF WM GM if 'seg_preproc' in nodes: anat_underlay, out_file_anat = strat['anatomical_brain'] csf_overlay, out_file_csf = strat['anatomical_csf_mask'] wm_overlay, out_file_wm = strat['anatomical_wm_mask'] gm_overlay, out_file_gm = strat['anatomical_gm_mask'] montage_csf_gm_wm = create_montage_gm_wm_csf( 'montage_csf_gm_wm_%d' % num_strat, 'montage_csf_gm_wm') workflow.connect(anat_underlay, out_file_anat, montage_csf_gm_wm, 'inputspec.underlay') workflow.connect(csf_overlay, out_file_csf, montage_csf_gm_wm, 'inputspec.overlay_csf') workflow.connect(wm_overlay, out_file_wm, montage_csf_gm_wm, 'inputspec.overlay_wm') workflow.connect(gm_overlay, out_file_gm, montage_csf_gm_wm, 'inputspec.overlay_gm') strat.update_resource_pool({ 'qc___csf_gm_wm_a': (montage_csf_gm_wm, 'outputspec.axial_png'), 'qc___csf_gm_wm_s': (montage_csf_gm_wm, 'outputspec.sagittal_png') }) if not 7 in qc_montage_id_a: qc_montage_id_a[7] = 'csf_gm_wm_a' qc_montage_id_s[7] = 'csf_gm_wm_s' if 'functional_preprocessed' in strat: preproc, out_file_preproc = strat['functional_to_standard'] mean_preproc, out_file_mean_preproc = strat[ 'mean_functional_to_standard'] # make QC Carpet plot carpet_seg = create_qc_carpet('carpet_seg_%d' % num_strat, 'carpet_seg') workflow.connect(preproc, out_file_preproc, carpet_seg, 'inputspec.functional_to_standard') workflow.connect(mean_preproc, out_file_mean_preproc, carpet_seg, 'inputspec.mean_functional_to_standard') workflow.connect(c.PRIORS_GRAY, 'local_path', carpet_seg, 'inputspec.anatomical_gm_mask') workflow.connect(c.PRIORS_WHITE, 'local_path', carpet_seg, 'inputspec.anatomical_wm_mask') workflow.connect(c.PRIORS_CSF, 'local_path', carpet_seg, 'inputspec.anatomical_csf_mask') strat.update_resource_pool({ 'qc___carpet': (carpet_seg, 'outputspec.carpet_plot'), }) if not 8 in qc_plot_id: qc_plot_id[8] = 'carpet' if 'functional_preprocessed' in strat: # make QC montage for Mean Functional in T1 with T1 edge anat, out_file = strat['anatomical_brain'] m_f_a, out_file_mfa = strat['mean_functional_in_anat'] anat_edge = pe.Node(Function(input_names=['in_file'], output_names=['out_file'], function=afni_Edge3, as_module=True), name='anat_edge_%d' % num_strat) montage_anat = create_montage('montage_anat_%d' % num_strat, 'red', 't1_edge_on_mean_func_in_t1') workflow.connect(anat, out_file, anat_edge, 'in_file') workflow.connect(anat_edge, 'out_file', montage_anat, 'inputspec.overlay') workflow.connect(m_f_a, out_file_mfa, montage_anat, 'inputspec.underlay') strat.update_resource_pool({ 'qc___mean_func_with_t1_edge_a': (montage_anat, 'outputspec.axial_png'), 'qc___mean_func_with_t1_edge_s': (montage_anat, 'outputspec.sagittal_png') }) if not 9 in qc_montage_id_a: qc_montage_id_a[9] = 'mean_func_with_t1_edge_a' qc_montage_id_s[9] = 'mean_func_with_t1_edge_s' # make QC montage for Mean Functional in MNI with MNI edge m_f_i, out_file = strat['mean_functional_to_standard'] montage_mfi = create_montage('montage_mfi_%d' % num_strat, 'red', 'MNI_edge_on_mean_func_mni') workflow.connect(m_f_i, out_file, montage_mfi, 'inputspec.underlay') template_brain_for_func, out_file = strat[ 'template_brain_for_func_preproc'] func_template_edge = pe.Node( Function(input_names=['in_file'], output_names=['out_file'], function=afni_Edge3, as_module=True), name='func_template_edge_{0}'.format(num_strat)) workflow.connect(template_brain_for_func, out_file, func_template_edge, 'in_file') workflow.connect(func_template_edge, 'out_file', montage_mfi, 'inputspec.overlay') strat.update_resource_pool({ 'qc___mean_func_with_mni_edge_a': (montage_mfi, 'outputspec.axial_png'), 'qc___mean_func_with_mni_edge_s': (montage_mfi, 'outputspec.sagittal_png') }) if not 10 in qc_montage_id_a: qc_montage_id_a[10] = 'mean_func_with_mni_edge_a' qc_montage_id_s[10] = 'mean_func_with_mni_edge_s' # Link all the derivatives to the QC pages idx = 11 rp = strat.get_resource_pool() for key in sorted(rp.keys()): # qc_outputs is from the outputs CSV if key in qc_outputs: qa_montages(workflow, c, strat, num_strat, qc_montage_id_a, qc_montage_id_s, qc_hist_id, key, idx) idx += 1 return qc_montage_id_a, qc_montage_id_s, qc_hist_id, qc_plot_id
def connect_func_ingress(workflow, strat_list, c, sub_dict, subject_id, input_creds_path, unique_id=None): for num_strat, strat in enumerate(strat_list): if 'func' in sub_dict: func_paths_dict = sub_dict['func'] else: func_paths_dict = sub_dict['rest'] if unique_id is None: workflow_name = f'func_gather_{num_strat}' else: workflow_name = f'func_gather_{unique_id}_{num_strat}' func_wf = create_func_datasource(func_paths_dict, workflow_name) func_wf.inputs.inputnode.set(subject=subject_id, creds_path=input_creds_path, dl_dir=c.workingDirectory) func_wf.get_node('inputnode').iterables = \ ("scan", list(func_paths_dict.keys())) strat.update_resource_pool({ 'subject': (func_wf, 'outputspec.subject'), 'scan': (func_wf, 'outputspec.scan') }) # Grab field maps diff = False blip = False fmap_rp_list = [] fmap_TE_list = [] if "fmap" in sub_dict: for key in sub_dict["fmap"]: gather_fmap = create_fmap_datasource( sub_dict["fmap"], "fmap_gather_" "{0}".format(key)) gather_fmap.inputs.inputnode.set(subject=subject_id, creds_path=input_creds_path, dl_dir=c.workingDirectory) gather_fmap.inputs.inputnode.scan = key strat.update_resource_pool({ key: (gather_fmap, 'outputspec.rest'), "{0}_scan_params".format(key): (gather_fmap, 'outputspec.scan_params') }) fmap_rp_list.append(key) if key == "diff_phase" or key == "diff_mag_one" or \ key == "diff_mag_two": diff = True get_fmap_metadata_imports = ['import json'] get_fmap_metadata = pe.Node( Function(input_names=['data_config_scan_params'], output_names=[ 'echo_time', 'dwell_time', 'pe_direction' ], function=get_fmap_phasediff_metadata, imports=get_fmap_metadata_imports), name='{0}_get_metadata_{1}'.format(key, num_strat)) node, out_file = strat["{}_scan_params".format(key)] workflow.connect(node, out_file, get_fmap_metadata, 'data_config_scan_params') strat.update_resource_pool({ "{}_TE".format(key): (get_fmap_metadata, 'echo_time'), "{}_dwell".format(key): (get_fmap_metadata, 'dwell_time'), "{}_pedir".format(key): (get_fmap_metadata, 'pe_direction') }) fmap_TE_list.append("{}_TE".format(key)) if key == "epi_AP" or key == "epi_PA": blip = True if diff: calc_delta_ratio = pe.Node( Function(input_names=[ 'dwell_time', 'echo_time_one', 'echo_time_two', 'echo_time_three' ], output_names=['deltaTE', 'dwell_asym_ratio'], function=calc_deltaTE_and_asym_ratio), name='diff_distcor_calc_delta_{}'.format(num_strat)) node, out_file = strat['diff_phase_dwell'] workflow.connect(node, out_file, calc_delta_ratio, 'dwell_time') node, out_file = strat[fmap_TE_list[0]] workflow.connect(node, out_file, calc_delta_ratio, 'echo_time_one') node, out_file = strat[fmap_TE_list[1]] workflow.connect(node, out_file, calc_delta_ratio, 'echo_time_two') if len(fmap_TE_list) > 2: node, out_file = strat[fmap_TE_list[2]] workflow.connect(node, out_file, calc_delta_ratio, 'echo_time_three') strat.update_resource_pool({ 'deltaTE': (calc_delta_ratio, 'deltaTE'), 'dwell_asym_ratio': (calc_delta_ratio, 'dwell_asym_ratio') }) # Add in nodes to get parameters from configuration file # a node which checks if scan_parameters are present for each scan if unique_id is None: workflow_name = f'scan_params_{num_strat}' else: workflow_name = f'scan_params_{unique_id}_{num_strat}' scan_params = \ pe.Node(Function( input_names=['data_config_scan_params', 'subject_id', 'scan', 'pipeconfig_tr', 'pipeconfig_tpattern', 'pipeconfig_start_indx', 'pipeconfig_stop_indx'], output_names=['tr', 'tpattern', 'ref_slice', 'start_indx', 'stop_indx', 'pe_direction'], function=get_scan_params, as_module=True ), name=workflow_name) if "Selected Functional Volume" in c.func_reg_input: get_func_volume = pe.Node( interface=afni.Calc(), name='get_func_volume_{0}'.format(num_strat)) get_func_volume.inputs.set(expr='a', single_idx=c.func_reg_input_volume, outputtype='NIFTI_GZ') workflow.connect(func_wf, 'outputspec.rest', get_func_volume, 'in_file_a') # wire in the scan parameter workflow workflow.connect(func_wf, 'outputspec.scan_params', scan_params, 'data_config_scan_params') workflow.connect(func_wf, 'outputspec.subject', scan_params, 'subject_id') workflow.connect(func_wf, 'outputspec.scan', scan_params, 'scan') # connect in constants scan_params.inputs.set(pipeconfig_start_indx=c.startIdx, pipeconfig_stop_indx=c.stopIdx) strat.update_resource_pool({ 'raw_functional': (func_wf, 'outputspec.rest'), 'scan_id': (func_wf, 'outputspec.scan'), 'tr': (scan_params, 'tr'), 'tpattern': (scan_params, 'tpattern'), 'start_idx': (scan_params, 'start_indx'), 'stop_idx': (scan_params, 'stop_indx'), 'pe_direction': (scan_params, 'pe_direction'), }) strat.set_leaf_properties(func_wf, 'outputspec.rest') if "Selected Functional Volume" in c.func_reg_input: strat.update_resource_pool( {'selected_func_volume': (get_func_volume, 'out_file')}) return (workflow, diff, blip, fmap_rp_list)
def configuration_strategy_mock(method='FSL'): # mock the config dictionary c = Configuration({ "num_ants_threads": 4, "workingDirectory": "/scratch/pipeline_tests", "crashLogDirectory": "/scratch", "outputDirectory": "/output/output/pipeline_analysis_nuisance/sub-M10978008_ses-NFB3", "resolution_for_func_preproc": "3mm", "resolution_for_func_derivative": "3mm", "template_for_resample": "/usr/share/fsl/5.0/data/standard/MNI152_T1_1mm_brain.nii.gz", "template_brain_only_for_func": "/usr/share/fsl/5.0/data/standard/MNI152_T1_${func_resolution}_brain.nii.gz", "template_skull_for_func": "/usr/share/fsl/5.0/data/standard/MNI152_T1_${func_resolution}.nii.gz", "identityMatrix": "/usr/share/fsl/5.0/etc/flirtsch/ident.mat", "funcRegFSLinterpolation": "sinc", "funcRegANTSinterpolation": "LanczosWindowedSinc" }) if method == 'ANTS': c.update('regOption', 'ANTS') else: c.update('regOption', 'FSL') # mock the strategy strat = Strategy() resource_dict = { "mean_functional": os.path.join( c.outputDirectory, "mean_functional/sub-M10978008_ses-NFB3_task-test_bold_calc_tshift_resample_volreg_calc_tstat.nii.gz" ), "motion_correct": os.path.join( c.outputDirectory, "motion_correct/_scan_test/sub-M10978008_ses-NFB3_task-test_bold_calc_tshift_resample_volreg.nii.gz" ), "anatomical_brain": os.path.join( c.outputDirectory, "anatomical_brain/sub-M10978008_ses-NFB3_acq-ao_brain_resample.nii.gz" ), "ants_initial_xfm": os.path.join( c.outputDirectory, "ants_initial_xfm/transform0DerivedInitialMovingTranslation.mat"), "ants_affine_xfm": os.path.join(c.outputDirectory, "ants_affine_xfm/transform2Affine.mat"), "ants_rigid_xfm": os.path.join(c.outputDirectory, "ants_rigid_xfm/transform1Rigid.mat"), "anatomical_to_mni_linear_xfm": os.path.join( c.outputDirectory, "anatomical_to_mni_linear_xfm/sub-M10978008_ses-NFB3_T1w_resample_calc_flirt.mat" ), "functional_to_anat_linear_xfm": os.path.join( c.outputDirectory, "functional_to_anat_linear_xfm/_scan_test/sub-M10978008_ses-NFB3_task-test_bold_calc_tshift_resample_volreg_calc_tstat_flirt.mat" ), 'ants_symm_warp_field': os.path.join( c.outputDirectory, "anatomical_to_symmetric_mni_nonlinear_xfm/transform3Warp.nii.gz"), 'ants_symm_affine_xfm': os.path.join(c.outputDirectory, "ants_symmetric_affine_xfm/transform2Affine.mat"), 'ants_symm_rigid_xfm': os.path.join(c.outputDirectory, "ants_symmetric_rigid_xfm/transform1Rigid.mat"), 'ants_symm_initial_xfm': os.path.join( c.outputDirectory, "ants_symmetric_initial_xfm/transform0DerivedInitialMovingTranslation.mat" ), "dr_tempreg_maps_files": [ os.path.join( '/scratch', 'resting_preproc_sub-M10978008_ses-NFB3_cpac105', 'temporal_dual_regression_0/_scan_test/_selector_CSF-2mmE-M_aC-WM-2mmE-DPC5_G-M_M-SDB_P-2/_spatial_map_PNAS_Smith09_rsn10_spatial_map_file_..cpac_templates..PNAS_Smith09_rsn10.nii.gz/split_raw_volumes/temp_reg_map_000{0}.nii.gz' .format(n)) for n in range(10) ] } if method == 'ANTS': resource_dict["anatomical_to_mni_nonlinear_xfm"] = os.path.join( c.outputDirectory, "anatomical_to_mni_nonlinear_xfm/transform3Warp.nii.gz") else: resource_dict["anatomical_to_mni_nonlinear_xfm"] = os.path.join( c.outputDirectory, "anatomical_to_mni_nonlinear_xfm/sub-M10978008_ses-NFB3_T1w_resample_fieldwarp.nii.gz" ) file_node_num = 0 for resource, filepath in resource_dict.items(): strat.update_resource_pool( {resource: file_node(filepath, file_node_num)}) strat.append_name(resource + '_0') file_node_num += 1 templates_for_resampling = [ (c.resolution_for_func_preproc, c.template_brain_only_for_func, 'template_brain_for_func_preproc', 'resolution_for_func_preproc'), (c.resolution_for_func_preproc, c.template_brain_only_for_func, 'template_skull_for_func_preproc', 'resolution_for_func_preproc') ] for resolution, template, template_name, tag in templates_for_resampling: resampled_template = pe.Node(Function( input_names=['resolution', 'template', 'template_name', 'tag'], output_names=['resampled_template'], function=resolve_resolution, as_module=True), name='resampled_' + template_name) resampled_template.inputs.resolution = resolution resampled_template.inputs.template = template resampled_template.inputs.template_name = template_name resampled_template.inputs.tag = tag strat.update_resource_pool( {template_name: (resampled_template, 'resampled_template')}) strat.append_name('resampled_template_0') return c, strat
def temporal_variance_mask(threshold, by_slice=False, erosion=False, degree=1): threshold_method = "VAR" if isinstance(threshold, str): regex_match = { "SD": r"([0-9]+(\.[0-9]+)?)\s*SD", "PCT": r"([0-9]+(\.[0-9]+)?)\s*PCT", } for method, regex in regex_match.items(): matched = re.match(regex, threshold) if matched: threshold_method = method threshold_value = matched.groups()[0] try: threshold_value = float(threshold_value) except: raise ValueError( "Error converting threshold value {0} from {1} to a " "floating point number. The threshold value can " "contain SD or PCT for selecting a threshold based on " "the variance distribution, otherwise it should be a " "floating point number.".format(threshold_value, threshold)) if threshold_value < 0: raise ValueError( "Threshold value should be positive, instead of {0}.".format( threshold_value)) if threshold_method is "PCT" and threshold_value >= 100.0: raise ValueError( "Percentile should be less than 100, received {0}.".format( threshold_value)) threshold = threshold_value wf = pe.Workflow(name='tcompcor') input_node = pe.Node(util.IdentityInterface( fields=['functional_file_path', 'mask_file_path']), name='inputspec') output_node = pe.Node(util.IdentityInterface(fields=['mask']), name='outputspec') # C-PAC default performs linear regression while nipype performs quadratic regression detrend = pe.Node(afni.Detrend(args='-polort {0}'.format(degree), outputtype='NIFTI'), name='detrend') wf.connect(input_node, 'functional_file_path', detrend, 'in_file') std = pe.Node(afni.TStat(args='-nzstdev', outputtype='NIFTI'), name='std') wf.connect(input_node, 'mask_file_path', std, 'mask') wf.connect(detrend, 'out_file', std, 'in_file') var = pe.Node(afni.Calc(expr='a*a', outputtype='NIFTI'), name='var') wf.connect(std, 'out_file', var, 'in_file_a') if by_slice: slices = pe.Node(fsl.Slice(), name='slicer') wf.connect(var, 'out_file', slices, 'in_file') mask_slices = pe.Node(fsl.Slice(), name='mask_slicer') wf.connect(input_node, 'mask_file_path', mask_slices, 'in_file') mapper = pe.MapNode( util.IdentityInterface(fields=['out_file', 'mask_file']), name='slice_mapper', iterfield=['out_file', 'mask_file']) wf.connect(slices, 'out_files', mapper, 'out_file') wf.connect(mask_slices, 'out_files', mapper, 'mask_file') else: mapper_list = pe.Node(util.Merge(1), name='slice_mapper_list') wf.connect(var, 'out_file', mapper_list, 'in1') mask_mapper_list = pe.Node(util.Merge(1), name='slice_mask_mapper_list') wf.connect(input_node, 'mask_file_path', mask_mapper_list, 'in1') mapper = pe.Node( util.IdentityInterface(fields=['out_file', 'mask_file']), name='slice_mapper') wf.connect(mapper_list, 'out', mapper, 'out_file') wf.connect(mask_mapper_list, 'out', mapper, 'mask_file') if threshold_method is "PCT": threshold_node = pe.MapNode(Function( input_names=['in_file', 'mask', 'threshold_pct'], output_names=['threshold'], function=compute_pct_threshold, as_module=True), name='threshold_value', iterfield=['in_file', 'mask']) threshold_node.inputs.threshold_pct = threshold_value wf.connect(mapper, 'out_file', threshold_node, 'in_file') wf.connect(mapper, 'mask_file', threshold_node, 'mask') elif threshold_method is "SD": threshold_node = pe.MapNode(Function( input_names=['in_file', 'mask', 'threshold_sd'], output_names=['threshold'], function=compute_sd_threshold, as_module=True), name='threshold_value', iterfield=['in_file', 'mask']) threshold_node.inputs.threshold_sd = threshold_value wf.connect(mapper, 'out_file', threshold_node, 'in_file') wf.connect(mapper, 'mask_file', threshold_node, 'mask') else: threshold_node = pe.MapNode(Function( input_names=['in_file', 'mask', 'threshold'], output_names=['threshold'], function=compute_threshold, as_module=True), name='threshold_value', iterfield=['in_file', 'mask']) threshold_node.inputs.threshold = threshold_value wf.connect(mapper, 'out_file', threshold_node, 'in_file') wf.connect(mapper, 'mask_file', threshold_node, 'mask') threshold_mask = pe.MapNode(interface=fsl.maths.Threshold(), name='threshold', iterfield=['in_file', 'thresh']) threshold_mask.inputs.args = '-bin' wf.connect(mapper, 'out_file', threshold_mask, 'in_file') wf.connect(threshold_node, 'threshold', threshold_mask, 'thresh') merge_slice_masks = pe.Node(interface=fsl.Merge(), name='merge_slice_masks') merge_slice_masks.inputs.dimension = 'z' wf.connect(threshold_mask, 'out_file', merge_slice_masks, 'in_files') wf.connect(merge_slice_masks, 'merged_file', output_node, 'mask') return wf
def create_network_centrality_workflow(workflow, c, strategies): if not any((True in c.degWeightOptions, True in c.eigWeightOptions, True in c.lfcdWeightOptions)): return strategies for num_strat, strat in enumerate(strategies[:]): # Resample the functional mni to the centrality mask resolution resample_functional_to_template = pe.Node( interface=fsl.FLIRT(), name='resample_functional_to_template_%d' % num_strat) resample_functional_to_template.inputs.set( interp='trilinear', in_matrix_file=c.identityMatrix, apply_xfm=True) node, out_file = strat['functional_to_standard'] workflow.connect(node, out_file, resample_functional_to_template, 'in_file') workflow.connect(c.templateSpecificationFile, 'local_path', resample_functional_to_template, 'reference') merge_node = pe.Node(Function( input_names=['deg_list', 'eig_list', 'lfcd_list'], output_names=['merged_list'], function=merge_lists, as_module=True), name='merge_node_%d' % num_strat) if True in c.degWeightOptions: connect_centrality_workflow(workflow, c, strat, num_strat, resample_functional_to_template, c.templateSpecificationFile, merge_node, 'degree', c.degCorrelationThresholdOption, c.degCorrelationThreshold) if True in c.eigWeightOptions: connect_centrality_workflow(workflow, c, strat, num_strat, resample_functional_to_template, c.templateSpecificationFile, merge_node, 'eigenvector', c.eigCorrelationThresholdOption, c.eigCorrelationThreshold) if True in c.lfcdWeightOptions: connect_centrality_workflow(workflow, c, strat, num_strat, resample_functional_to_template, c.templateSpecificationFile, merge_node, 'lfcd', c.lfcdCorrelationThresholdOption, c.lfcdCorrelationThreshold) if 0 in c.runNetworkCentrality: strategies += [strat.fork()] strat.update_resource_pool({'centrality': (merge_node, 'merged_list')}) return strategies
def anat_longitudinal_wf(subject_id, sub_list, config): """ Parameters ---------- subject_id : str the id of the subject sub_list : list of dict this is a list of sessions for one subject and each session if the same dictionary as the one given to prep_workflow config : configuration a configuration object containing the information of the pipeline config. (Same as for prep_workflow) Returns ------- None """ # list of lists for every strategy session_id_list = [] session_wfs = {} cpac_dirs = [] out_dir = config.pipeline_setup['output_directory']['path'] orig_pipe_name = config.pipeline_setup['pipeline_name'] # Loop over the sessions to create the input for the longitudinal # algorithm for session in sub_list: unique_id = session['unique_id'] session_id_list.append(unique_id) try: creds_path = session['creds_path'] if creds_path and 'none' not in creds_path.lower(): if os.path.exists(creds_path): input_creds_path = os.path.abspath(creds_path) else: err_msg = 'Credentials path: "%s" for subject "%s" ' \ 'session "%s" was not found. Check this path ' \ 'and try again.' % (creds_path, subject_id, unique_id) raise Exception(err_msg) else: input_creds_path = None except KeyError: input_creds_path = None workflow = initialize_nipype_wf( config, sub_list[0], # just grab the first one for the name name="anat_longitudinal_pre-preproc") workflow, rpool = initiate_rpool(workflow, config, session) pipeline_blocks = build_anat_preproc_stack(rpool, config) workflow = connect_pipeline(workflow, config, rpool, pipeline_blocks) session_wfs[unique_id] = rpool rpool.gather_pipes(workflow, config) workflow.run() cpac_dir = os.path.join(out_dir, f'cpac_{orig_pipe_name}', f'{subject_id}_{unique_id}') cpac_dirs.append(os.path.join(cpac_dir, 'anat')) # Now we have all the anat_preproc set up for every session # loop over the different anat preproc strategies strats_brain_dct = {} strats_head_dct = {} for cpac_dir in cpac_dirs: if os.path.isdir(cpac_dir): for filename in os.listdir(cpac_dir): if 'T1w.nii' in filename: for tag in filename.split('_'): if 'desc-' in tag and 'brain' in tag: if tag not in strats_brain_dct: strats_brain_dct[tag] = [] strats_brain_dct[tag].append( os.path.join(cpac_dir, filename)) if tag not in strats_head_dct: strats_head_dct[tag] = [] head_file = filename.replace(tag, 'desc-reorient') strats_head_dct[tag].append( os.path.join(cpac_dir, head_file)) for strat in strats_brain_dct.keys(): wf = initialize_nipype_wf( config, sub_list[0], # just grab the first one for the name name=f"template_node_{strat}") config.pipeline_setup[ 'pipeline_name'] = f'longitudinal_{orig_pipe_name}' template_node_name = f'longitudinal_anat_template_{strat}' # This node will generate the longitudinal template (the functions are # in longitudinal_preproc) # Later other algorithms could be added to calculate it, like the # multivariate template from ANTS # It would just require to change it here. template_node = subject_specific_template( workflow_name=template_node_name) template_node.inputs.set( avg_method=config. longitudinal_template_generation['average_method'], dof=config.longitudinal_template_generation['dof'], interp=config.longitudinal_template_generation['interp'], cost=config.longitudinal_template_generation['cost'], convergence_threshold=config. longitudinal_template_generation['convergence_threshold'], thread_pool=config.longitudinal_template_generation['thread_pool'], unique_id_list=list(session_wfs.keys())) template_node.inputs.input_brain_list = strats_brain_dct[strat] template_node.inputs.input_skull_list = strats_head_dct[strat] long_id = f'longitudinal_{subject_id}_strat-{strat}' wf, rpool = initiate_rpool(wf, config, part_id=long_id) rpool.set_data("space-longitudinal_desc-brain_T1w", template_node, 'brain_template', {}, "", template_node_name) rpool.set_data("space-longitudinal_desc-brain_T1w-template", template_node, 'brain_template', {}, "", template_node_name) rpool.set_data("space-longitudinal_desc-reorient_T1w", template_node, 'skull_template', {}, "", template_node_name) rpool.set_data("space-longitudinal_desc-reorient_T1w-template", template_node, 'skull_template', {}, "", template_node_name) pipeline_blocks = [mask_longitudinal_T1w_brain] pipeline_blocks = build_T1w_registration_stack(rpool, config, pipeline_blocks) pipeline_blocks = build_segmentation_stack(rpool, config, pipeline_blocks) wf = connect_pipeline(wf, config, rpool, pipeline_blocks) excl = [ 'space-longitudinal_desc-brain_T1w', 'space-longitudinal_desc-reorient_T1w', 'space-longitudinal_desc-brain_mask' ] rpool.gather_pipes(wf, config, add_excl=excl) # this is going to run multiple times! # once for every strategy! wf.run() # now, just write out a copy of the above to each session config.pipeline_setup['pipeline_name'] = orig_pipe_name for session in sub_list: unique_id = session['unique_id'] try: creds_path = session['creds_path'] if creds_path and 'none' not in creds_path.lower(): if os.path.exists(creds_path): input_creds_path = os.path.abspath(creds_path) else: err_msg = 'Credentials path: "%s" for subject "%s" ' \ 'session "%s" was not found. Check this path ' \ 'and try again.' % (creds_path, subject_id, unique_id) raise Exception(err_msg) else: input_creds_path = None except KeyError: input_creds_path = None wf = initialize_nipype_wf(config, sub_list[0]) wf, rpool = initiate_rpool(wf, config, session) config.pipeline_setup[ 'pipeline_name'] = f'longitudinal_{orig_pipe_name}' rpool = ingress_output_dir(config, rpool, long_id, creds_path=input_creds_path) select_node_name = f'select_{unique_id}' select_sess = pe.Node(Function( input_names=['session', 'output_brains', 'warps'], output_names=['brain_path', 'warp_path'], function=select_session), name=select_node_name) select_sess.inputs.session = unique_id wf.connect(template_node, 'output_brain_list', select_sess, 'output_brains') wf.connect(template_node, 'warp_list', select_sess, 'warps') rpool.set_data("space-longitudinal_desc-brain_T1w", select_sess, 'brain_path', {}, "", select_node_name) rpool.set_data( "from-T1w_to-longitudinal_mode-image_" "desc-linear_xfm", select_sess, 'warp_path', {}, "", select_node_name) config.pipeline_setup['pipeline_name'] = orig_pipe_name excl = [ 'space-template_desc-brain_T1w', 'space-T1w_desc-brain_mask' ] rpool.gather_pipes(wf, config, add_excl=excl) wf.run() # begin single-session stuff again for session in sub_list: unique_id = session['unique_id'] try: creds_path = session['creds_path'] if creds_path and 'none' not in creds_path.lower(): if os.path.exists(creds_path): input_creds_path = os.path.abspath(creds_path) else: err_msg = 'Credentials path: "%s" for subject "%s" ' \ 'session "%s" was not found. Check this path ' \ 'and try again.' % (creds_path, subject_id, unique_id) raise Exception(err_msg) else: input_creds_path = None except KeyError: input_creds_path = None wf = initialize_nipype_wf(config, sub_list[0]) wf, rpool = initiate_rpool(wf, config, session) pipeline_blocks = [ warp_longitudinal_T1w_to_template, warp_longitudinal_seg_to_T1w ] wf = connect_pipeline(wf, config, rpool, pipeline_blocks) rpool.gather_pipes(wf, config) # this is going to run multiple times! # once for every strategy! wf.run()
def create_qc_carpet(wf_name='qc_carpet', output_image='qc_carpet'): wf = pe.Workflow(name=wf_name) input_node = pe.Node(util.IdentityInterface(fields=[ 'functional_to_standard', 'mean_functional_to_standard', 'anatomical_gm_mask', 'anatomical_wm_mask', 'anatomical_csf_mask' ]), name='inputspec') output_node = pe.Node(util.IdentityInterface(fields=['carpet_plot']), name='outputspec') gm_resample = pe.Node(afni.Resample(), name='gm_resample') gm_resample.inputs.outputtype = 'NIFTI' wf.connect(input_node, 'anatomical_gm_mask', gm_resample, 'in_file') wf.connect(input_node, 'mean_functional_to_standard', gm_resample, 'master') gm_mask = pe.Node(afni.Calc(), name="gm_mask") gm_mask.inputs.expr = 'astep(a, 0.5)' gm_mask.inputs.outputtype = 'NIFTI' wf.connect(gm_resample, 'out_file', gm_mask, 'in_file_a') wm_resample = pe.Node(afni.Resample(), name='wm_resample') wm_resample.inputs.outputtype = 'NIFTI' wf.connect(input_node, 'anatomical_wm_mask', wm_resample, 'in_file') wf.connect(input_node, 'mean_functional_to_standard', wm_resample, 'master') wm_mask = pe.Node(afni.Calc(), name="wm_mask") wm_mask.inputs.expr = 'astep(a, 0.5)' wm_mask.inputs.outputtype = 'NIFTI' wf.connect(wm_resample, 'out_file', wm_mask, 'in_file_a') csf_resample = pe.Node(afni.Resample(), name='csf_resample') csf_resample.inputs.outputtype = 'NIFTI' wf.connect(input_node, 'anatomical_csf_mask', csf_resample, 'in_file') wf.connect(input_node, 'mean_functional_to_standard', csf_resample, 'master') csf_mask = pe.Node(afni.Calc(), name="csf_mask") csf_mask.inputs.expr = 'astep(a, 0.5)' csf_mask.inputs.outputtype = 'NIFTI' wf.connect(csf_resample, 'out_file', csf_mask, 'in_file_a') carpet_plot = pe.Node(Function(input_names=[ 'gm_mask', 'wm_mask', 'csf_mask', 'functional_to_standard', 'output' ], output_names=['carpet_plot'], function=gen_carpet_plt, as_module=True), name='carpet_plot') carpet_plot.inputs.output = output_image wf.connect(gm_mask, 'out_file', carpet_plot, 'gm_mask') wf.connect(wm_mask, 'out_file', carpet_plot, 'wm_mask') wf.connect(csf_mask, 'out_file', carpet_plot, 'csf_mask') wf.connect(input_node, 'functional_to_standard', carpet_plot, 'functional_to_standard') wf.connect(carpet_plot, 'carpet_plot', output_node, 'carpet_plot') return wf
def ingress_func_metadata(wf, cfg, rpool, sub_dict, subject_id, input_creds_path, unique_id=None): # Grab field maps diff = False blip = False fmap_rp_list = [] fmap_TE_list = [] if "fmap" in sub_dict: for key in sub_dict["fmap"]: gather_fmap = create_fmap_datasource( sub_dict["fmap"], f"fmap_gather_{key}_" f"{subject_id}") gather_fmap.inputs.inputnode.set( subject=subject_id, creds_path=input_creds_path, dl_dir=cfg.pipeline_setup['working_directory']['path']) gather_fmap.inputs.inputnode.scan = key second = False if 'epi' in key: key = 'epi_1' second = True if 'epi' in key and second: key = 'epi_2' rpool.set_data(key, gather_fmap, 'outputspec.rest', {}, "", "fmap_ingress") rpool.set_data(f'{key}_scan_params', gather_fmap, 'outputspec.scan_params', {}, "", "fmap_params_ingress") fmap_rp_list.append(key) if key == "diff_phase" or key == "diff_mag_one" or \ key == "diff_mag_two": diff = True get_fmap_metadata_imports = ['import json'] get_fmap_metadata = pe.Node(Function( input_names=['data_config_scan_params'], output_names=['echo_time', 'dwell_time', 'pe_direction'], function=get_fmap_phasediff_metadata, imports=get_fmap_metadata_imports), name=f'{key}_get_metadata') wf.connect(gather_fmap, 'outputspec.scan_params', get_fmap_metadata, 'data_config_scan_params') rpool.set_data(f'{key}_TE', get_fmap_metadata, 'echo_time', {}, "", "fmap_TE_ingress") rpool.set_data(f'{key}_dwell', get_fmap_metadata, 'dwell_time', {}, "", "fmap_dwell_ingress") rpool.set_data(f'{key}_pedir', get_fmap_metadata, 'pe_direction', {}, "", "fmap_pedir_ingress") fmap_TE_list.append(f"{key}_TE") if key == "epi_AP" or key == "epi_PA": blip = True if diff: calc_delta_ratio = pe.Node(Function( input_names=[ 'dwell_time', 'echo_time_one', 'echo_time_two', 'echo_time_three' ], output_names=['deltaTE', 'dwell_asym_ratio'], function=calc_deltaTE_and_asym_ratio), name='diff_distcor_calc_delta') node, out_file = rpool.get( 'diff_phase_dwell')["['diff_phase_dwell:fmap_dwell_ingress']"][ 'data'] # <--- there will only be one pipe_idx wf.connect(node, out_file, calc_delta_ratio, 'dwell_time') node, out_file = rpool.get(f'{fmap_TE_list[0]}')[ f"['{fmap_TE_list[0]}:fmap_TE_ingress']"]['data'] wf.connect(node, out_file, calc_delta_ratio, 'echo_time_one') node, out_file = rpool.get(f'{fmap_TE_list[1]}')[ f"['{fmap_TE_list[1]}:fmap_TE_ingress']"]['data'] wf.connect(node, out_file, calc_delta_ratio, 'echo_time_two') if len(fmap_TE_list) > 2: node, out_file = rpool.get(f'{fmap_TE_list[2]}')[ f"['{fmap_TE_list[2]}:fmap_TE_ingress']"]['data'] wf.connect(node, out_file, calc_delta_ratio, 'echo_time_three') rpool.set_data('deltaTE', calc_delta_ratio, 'deltaTE', {}, "", "deltaTE_ingress") rpool.set_data('dwell_asym_ratio', calc_delta_ratio, 'dwell_asym_ratio', {}, "", "dwell_asym_ratio_ingress") # Add in nodes to get parameters from configuration file # a node which checks if scan_parameters are present for each scan scan_params_imports = [ 'from CPAC.utils.utils import check, ' 'try_fetch_parameter' ] scan_params = \ pe.Node(Function( input_names=['data_config_scan_params', 'subject_id', 'scan', 'pipeconfig_tr', 'pipeconfig_tpattern', 'pipeconfig_start_indx', 'pipeconfig_stop_indx'], output_names=['tr', 'tpattern', 'ref_slice', 'start_indx', 'stop_indx', 'pe_direction'], function=get_scan_params, imports=scan_params_imports ), name=f"bold_scan_params_{subject_id}_{unique_id}") scan_params.inputs.subject_id = subject_id scan_params.inputs.set( pipeconfig_start_indx=cfg.functional_preproc['truncation']['start_tr'], pipeconfig_stop_indx=cfg.functional_preproc['truncation']['stop_tr']) # wire in the scan parameter workflow node, out = rpool.get( 'scan_params')["['scan_params:scan_params_ingress']"]['data'] wf.connect(node, out, scan_params, 'data_config_scan_params') node, out = rpool.get('scan')["['scan:func_ingress']"]['data'] wf.connect(node, out, scan_params, 'scan') rpool.set_data('TR', scan_params, 'tr', {}, "", "func_metadata_ingress") rpool.set_data('tpattern', scan_params, 'tpattern', {}, "", "func_metadata_ingress") rpool.set_data('start_tr', scan_params, 'start_indx', {}, "", "func_metadata_ingress") rpool.set_data('stop_tr', scan_params, 'stop_indx', {}, "", "func_metadata_ingress") rpool.set_data('pe_direction', scan_params, 'pe_direction', {}, "", "func_metadata_ingress") return (wf, rpool, diff, blip, fmap_rp_list)
def create_qpp(name='qpp', working_dir=None, crash_dir=None): if not working_dir: working_dir = os.path.join(os.getcwd(), 'QPP_work_dir') if not crash_dir: crash_dir = os.path.join(os.getcwd(), 'QPP_crash_dir') workflow = pe.Workflow(name=name) workflow.base_dir = working_dir workflow.config['execution'] = { 'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(crash_dir) } inputspec = pe.Node(util.IdentityInterface(fields=[ 'datasets', 'window_length', 'permutations', 'lower_correlation_threshold', 'higher_correlation_threshold', 'correlation_threshold_iteration', 'iterations', 'convergence_iterations', ]), name='inputspec') outputspec = pe.Node(util.IdentityInterface(fields=['qpp']), name='outputspec') merge = pe.Node(fsl.Merge(), name='joint_datasets') merge.inputs.dimension = 't' merge.inputs.output_type = 'NIFTI_GZ' mask = pe.Node(interface=fsl.ImageMaths(), name='joint_mask') mask.inputs.op_string = '-abs -Tmin -bin' detect = pe.Node(Function(input_names=[ 'datasets', 'joint_datasets', 'joint_mask', 'window_length', 'permutations', 'lower_correlation_threshold', 'higher_correlation_threshold', 'correlation_threshold_iteration', 'iterations', 'convergence_iterations' ], output_names=['qpp'], function=detect_qpp, as_module=True), name='detect_qpp') workflow.connect([ (inputspec, merge, [('datasets', 'in_files')]), (merge, mask, [('merged_file', 'in_file')]), (merge, detect, [('merged_file', 'joint_datasets')]), (mask, detect, [('out_file', 'joint_mask')]), (inputspec, detect, [ (('datasets', length), 'datasets'), ('window_length', 'window_length'), ('permutations', 'permutations'), ('lower_correlation_threshold', 'lower_correlation_threshold'), ('higher_correlation_threshold', 'higher_correlation_threshold'), ('correlation_threshold_iteration', 'correlation_threshold_iteration'), ('iterations', 'iterations'), ('convergence_iterations', 'convergence_iterations'), ]), (detect, outputspec, [('qpp', 'qpp')]), ]) return workflow
def ants_apply_warps_func_mni(workflow, output_name, func_key, ref_key, num_strat, strat, interpolation_method='LanczosWindowedSinc', distcor=False, map_node=False, inverse=False, symmetry='asymmetric', input_image_type=0, num_ants_cores=1, registration_template='t1', func_type='non-ica-aroma', num_cpus=1): """ Applies previously calculated ANTS registration transforms to input images. This workflow employs the antsApplyTransforms tool: http://stnava.github.io/ANTs/ Parameters ---------- name : string, optional Name of the workflow. Returns ------- apply_ants_warp_wf : nipype.pipeline.engine.Workflow Notes ----- Workflow Inputs:: workflow: Nipype workflow object the workflow containing the resources involved output_name: str what the name of the warped functional should be when written to the resource pool func_key: string resource pool key correspoding to the node containing the 3D or 4D functional file to be written into MNI space, use 'leaf' for a leaf node ref_key: string resource pool key correspoding to the file path to the template brain used for functional-to-template registration num_strat: int the number of strategy objects strat: C-PAC Strategy object a strategy with one or more resource pools interpolation_method: str which interpolation to use when applying the warps, commonly used options are 'Linear', 'Bspline', 'LanczosWindowedSinc' (default) for derivatives and image data 'NearestNeighbor' for masks distcor: boolean indicates whether a distortion correction transformation should be added to the transforms, this of course requires that a distortion correction map exist in the resource pool map_node: boolean indicates whether a mapnode should be used, if TRUE func_key is expected to correspond to a list of resources that should each be written into standard space with the other parameters inverse: boolean writes the invrse of the transform, i.e. MNI->EPI instead of EPI->MNI input_image_type: int argument taken by the ANTs apply warp tool; in this case, should be 0 for scalars (default) and 3 for 4D functional time-series num_ants_cores: int the number of CPU cores dedicated to ANTS anatomical-to-standard registration registration_template: str which template to use as a target for the apply warps ('t1' or 'epi'), should be the same as the target used in the warp calculation (registration) func_type: str 'non-ica-aroma' or 'ica-aroma' - how to handle the functional time series based on the particular demands of ICA-AROMA processed time series num_cpus: int the number of CPUs dedicated to each participant workflow - this is used to determine how to parallelize the warp application step Workflow Outputs:: outputspec.output_image : string (nifti file) Normalized output file Workflow Graph: .. image:: :width: 500 Detailed Workflow Graph: .. image:: :width: 500 Apply the functional-to-structural and structural-to-template warps to the 4D functional time-series to warp it to template space. Parameters ---------- """ # if the input is a string, assume that it is resource pool key, # if it is a tuple, assume that it is a node, outfile pair, # otherwise, something funky is going on if isinstance(func_key, str): if func_key == "leaf": input_node, input_out = strat.get_leaf_properties() else: input_node, input_out = strat[func_key] elif isinstance(func_key, tuple): input_node, input_out = func_key if isinstance(ref_key, str): ref_node, ref_out = strat[ref_key] elif isinstance(ref_key, tuple): ref_node, ref_out = func_key # when inverse is enabled, we want to update the name of various # nodes so that we know they were inverted inverse_string = '' if inverse is True: inverse_string = '_inverse' # make sure that resource pool has some required resources before proceeding if 'fsl_mat_as_itk' not in strat and registration_template == 't1': fsl_reg_2_itk = pe.Node(c3.C3dAffineTool(), name='fsl_reg_2_itk_{0}'.format(num_strat)) fsl_reg_2_itk.inputs.itk_transform = True fsl_reg_2_itk.inputs.fsl2ras = True # convert the .mat from linear Func->Anat to # ANTS format node, out_file = strat['functional_to_anat_linear_xfm'] workflow.connect(node, out_file, fsl_reg_2_itk, 'transform_file') node, out_file = strat['anatomical_brain'] workflow.connect(node, out_file, fsl_reg_2_itk, 'reference_file') ref_node, ref_out = strat['mean_functional'] workflow.connect(ref_node, ref_out, fsl_reg_2_itk, 'source_file') itk_imports = ['import os'] change_transform = pe.Node( util.Function(input_names=['input_affine_file'], output_names=['updated_affine_file'], function=change_itk_transform_type, imports=itk_imports), name='change_transform_type_{0}'.format(num_strat)) workflow.connect(fsl_reg_2_itk, 'itk_transform', change_transform, 'input_affine_file') strat.update_resource_pool( {'fsl_mat_as_itk': (change_transform, 'updated_affine_file')}) strat.append_name(fsl_reg_2_itk.name) # stack of transforms to be combined to acheive the desired transformation num_transforms = 5 collect_transforms_key = \ 'collect_transforms{0}'.format(inverse_string) if distcor is True and func_type not in 'ica-aroma': num_transforms = 6 collect_transforms_key = \ 'collect_transforms{0}{1}'.format('_distcor', inverse_string) if collect_transforms_key not in strat: if registration_template == 't1': # handle both symmetric and asymmetric transforms ants_transformation_dict = { 'asymmetric': { 'anatomical_to_mni_nonlinear_xfm': 'anatomical_to_mni_nonlinear_xfm', 'mni_to_anatomical_nonlinear_xfm': 'mni_to_anatomical_nonlinear_xfm', 'ants_affine_xfm': 'ants_affine_xfm', 'ants_rigid_xfm': 'ants_rigid_xfm', 'ants_initial_xfm': 'ants_initial_xfm', 'blip_warp': 'blip_warp', 'blip_warp_inverse': 'blip_warp_inverse', 'fsl_mat_as_itk': 'fsl_mat_as_itk', }, 'symmetric': { 'anatomical_to_mni_nonlinear_xfm': 'anatomical_to_symmetric_mni_nonlinear_xfm', 'mni_to_anatomical_nonlinear_xfm': 'symmetric_mni_to_anatomical_nonlinear_xfm', 'ants_affine_xfm': 'ants_symmetric_affine_xfm', 'ants_rigid_xfm': 'ants_symmetric_rigid_xfm', 'ants_initial_xfm': 'ants_symmetric_initial_xfm', 'blip_warp': 'blip_warp', 'blip_warp_inverse': 'blip_warp_inverse', 'fsl_mat_as_itk': 'fsl_mat_as_itk', } } # transforms to be concatenated, the first element of each tuple is # the resource pool key related to the resource that should be # connected in, and the second element is the input to which it # should be connected if inverse is True: if distcor is True and func_type not in 'ica-aroma': # Field file from anatomical nonlinear registration transforms_to_combine = [\ ('mni_to_anatomical_nonlinear_xfm', 'in6'), ('ants_affine_xfm', 'in5'), ('ants_rigid_xfm', 'in4'), ('ants_initial_xfm', 'in3'), ('fsl_mat_as_itk', 'in2'), ('blip_warp_inverse', 'in1')] else: transforms_to_combine = [\ ('mni_to_anatomical_nonlinear_xfm', 'in5'), ('ants_affine_xfm', 'in4'), ('ants_rigid_xfm', 'in3'), ('ants_initial_xfm', 'in2'), ('fsl_mat_as_itk', 'in1')] else: transforms_to_combine = [\ ('anatomical_to_mni_nonlinear_xfm', 'in1'), ('ants_affine_xfm', 'in2'), ('ants_rigid_xfm', 'in3'), ('ants_initial_xfm', 'in4'), ('fsl_mat_as_itk', 'in5')] if distcor is True and func_type not in 'ica-aroma': transforms_to_combine.append(('blip_warp', 'in6')) if registration_template == 'epi': # handle both symmetric and asymmetric transforms ants_transformation_dict = { 'asymmetric': { 'func_to_epi_nonlinear_xfm': 'func_to_epi_nonlinear_xfm', 'epi_to_func_nonlinear_xfm': 'epi_to_func_nonlinear_xfm', 'func_to_epi_ants_affine_xfm': 'func_to_epi_ants_affine_xfm', 'func_to_epi_ants_rigid_xfm': 'func_to_epi_ants_rigid_xfm', 'func_to_epi_ants_initial_xfm': 'func_to_epi_ants_initial_xfm', # 'blip_warp': 'blip_warp', # 'blip_warp_inverse': 'blip_warp_inverse', # 'fsl_mat_as_itk': 'fsl_mat_as_itk', }, # 'symmetric': { # 'func_to_epi_nonlinear_xfm': 'anatomical_to_mni_nonlinear_xfm', # 'func_to_epi_ants_affine_xfm': 'func_to_epi_ants_affine_xfm', # 'func_to_epi_ants_rigid_xfm': 'func_to_epi_ants_rigid_xfm', # 'func_to_epi_ants_initial_xfm': 'ants_initial_xfm', # 'blip_warp': 'blip_warp', # 'blip_warp_inverse': 'blip_warp_inverse', # 'fsl_mat_as_itk': 'fsl_mat_as_itk', # } } # transforms to be concatenated, the first element of each tuple is # the resource pool key related to the resource that should be # connected in, and the second element is the input to which it # should be connected if inverse is True: if distcor is True and func_type not in 'ica-aroma': # Field file from anatomical nonlinear registration transforms_to_combine = [\ ('epi_to_func_nonlinear_xfm', 'in4'), ('func_to_epi_ants_affine_xfm', 'in3'), ('func_to_epi_ants_rigid_xfm', 'in2'), ('func_to_epi_ants_initial_xfm', 'in1')] else: transforms_to_combine = [\ ('epi_to_func_nonlinear_xfm', 'in4'), ('func_to_epi_ants_affine_xfm', 'in3'), ('func_to_epi_ants_rigid_xfm', 'in2'), ('func_to_epi_ants_initial_xfm', 'in1')] else: transforms_to_combine = [\ ('func_to_epi_nonlinear_xfm', 'in1'), ('func_to_epi_ants_affine_xfm', 'in2'), ('func_to_epi_ants_rigid_xfm', 'in3'), ('func_to_epi_ants_initial_xfm', 'in4')] # define the node collect_transforms = pe.Node( util.Merge(num_transforms), name='collect_transforms_{0}_{1}_{2}_{3}'.format( output_name, inverse_string, registration_template, num_strat)) # wire in the various transformations for transform_key, input_port in transforms_to_combine: try: node, out_file = strat[ants_transformation_dict[symmetry] [transform_key]] except KeyError: raise Exception(locals()) workflow.connect(node, out_file, collect_transforms, input_port) # check transform list (if missing any init/rig/affine) and exclude Nonetype check_transform = pe.Node( util.Function( input_names=['transform_list'], output_names=['checked_transform_list', 'list_length'], function=check_transforms), name='check_transforms{0}_{1}_{2}_{3}'.format( output_name, inverse_string, registration_template, num_strat)) workflow.connect(collect_transforms, 'out', check_transform, 'transform_list') # generate inverse transform flags, which depends on the number of transforms inverse_transform_flags = pe.Node( util.Function(input_names=['transform_list'], output_names=['inverse_transform_flags'], function=generate_inverse_transform_flags), name='inverse_transform_flags_{0}_{1}_{2}_{3}'.format( output_name, inverse_string, registration_template, num_strat)) workflow.connect(check_transform, 'checked_transform_list', inverse_transform_flags, 'transform_list') # set the output strat.update_resource_pool({ collect_transforms_key: (check_transform, 'checked_transform_list') }) strat.append_name(check_transform.name) strat.append_name(inverse_transform_flags.name) #### now we add in the apply ants warps node if int(num_cpus) > 1 and input_image_type == 3: # parallelize time series warp application map_node = True if map_node: apply_ants_warp = pe.MapNode( interface=ants.ApplyTransforms(), name='apply_ants_warp_{0}_mapnode_{1}_{2}_{3}'.format( output_name, inverse_string, registration_template, num_strat), iterfield=['input_image'], mem_gb=10.0) else: apply_ants_warp = pe.Node( interface=ants.ApplyTransforms(), name='apply_ants_warp_{0}_{1}_{2}_{3}'.format( output_name, inverse_string, registration_template, num_strat), mem_gb=10.0) apply_ants_warp.inputs.out_postfix = '_antswarp' apply_ants_warp.interface.num_threads = int(num_ants_cores) if inverse is True: workflow.connect(inverse_transform_flags, 'inverse_transform_flags', apply_ants_warp, 'invert_transform_flags') # input_image_type: # (0 or 1 or 2 or 3) # Option specifying the input image type of scalar # (default), vector, tensor, or time series. apply_ants_warp.inputs.input_image_type = input_image_type apply_ants_warp.inputs.dimension = 3 apply_ants_warp.inputs.interpolation = interpolation_method node, out_file = strat[ref_key] workflow.connect(node, out_file, apply_ants_warp, 'reference_image') collect_node, collect_out = strat[collect_transforms_key] workflow.connect(collect_node, collect_out, apply_ants_warp, 'transforms') if output_name == "functional_to_standard": # write out the composite functional to standard transforms write_composite_xfm = pe.Node( interface=ants.ApplyTransforms(), name='write_composite_xfm_{0}_{1}_{2}_{3}'.format( output_name, inverse_string, registration_template, num_strat), mem_gb=8.0) write_composite_xfm.inputs.print_out_composite_warp_file = True write_composite_xfm.inputs.output_image = "func_to_standard_xfm.nii.gz" workflow.connect(input_node, input_out, write_composite_xfm, 'input_image') write_composite_xfm.inputs.input_image_type = input_image_type write_composite_xfm.inputs.dimension = 3 write_composite_xfm.inputs.interpolation = interpolation_method node, out_file = strat[ref_key] workflow.connect(node, out_file, write_composite_xfm, 'reference_image') collect_node, collect_out = strat[collect_transforms_key] workflow.connect(collect_node, collect_out, write_composite_xfm, 'transforms') # write_composite_inv_xfm = pe.Node( # interface=ants.ApplyTransforms(), # name='write_composite_xfm_{0}_{1}_{2}_{3}'.format(output_name, # '_inverse', registration_template, num_strat), mem_gb=1.5) # write_composite_inv_xfm.inputs.print_out_composite_warp_file = True # write_composite_inv_xfm.inputs.output_image = "func_to_standard_inverse-xfm.nii.gz" # # workflow.connect(input_node, input_out, # write_composite_inv_xfm, 'input_image') # # workflow.connect(inverse_transform_flags, 'inverse_transform_flags', # write_composite_inv_xfm, 'invert_transform_flags') # # # write_composite_inv_xfm.inputs.input_image_type = input_image_type # write_composite_inv_xfm.inputs.dimension = 3 # write_composite_inv_xfm.inputs.interpolation = interpolation_method # # node, out_file = strat[ref_key] # workflow.connect(node, out_file, # write_composite_inv_xfm, 'reference_image') # # collect_node, collect_out = strat[collect_transforms_key] # workflow.connect(collect_node, collect_out, # write_composite_inv_xfm, 'transforms') strat.update_resource_pool({ "functional_to_standard_xfm": (write_composite_xfm, 'output_image') }) #"functional_to_standard_inverse-xfm": (write_composite_inv_xfm, 'output_image') #}) # parallelize the apply warp, if multiple CPUs, and it's a time series! if int(num_cpus) > 1 and input_image_type == 3: node_id = f'_{output_name}_{inverse_string}_{registration_template}_{num_strat}' chunk_imports = ['import nibabel as nb'] chunk = pe.Node(Function(input_names=['func_file', 'n_cpus'], output_names=['TR_ranges'], function=chunk_ts, imports=chunk_imports), name=f'chunk_{node_id}') chunk.inputs.n_cpus = int(num_cpus) workflow.connect(input_node, input_out, chunk, 'func_file') split_imports = ['import os', 'import subprocess'] split = pe.Node(Function(input_names=['func_file', 'tr_ranges'], output_names=['split_funcs'], function=split_ts_chunks, imports=split_imports), name=f'split_{node_id}') workflow.connect(input_node, input_out, split, 'func_file') workflow.connect(chunk, 'TR_ranges', split, 'tr_ranges') workflow.connect(split, 'split_funcs', apply_ants_warp, 'input_image') func_concat = pe.Node(interface=afni_utils.TCat(), name=f'func_concat_{node_id}') func_concat.inputs.outputtype = 'NIFTI_GZ' workflow.connect(apply_ants_warp, 'output_image', func_concat, 'in_files') strat.update_resource_pool({output_name: (func_concat, 'out_file')}) else: workflow.connect(input_node, input_out, apply_ants_warp, 'input_image') strat.update_resource_pool( {output_name: (apply_ants_warp, 'output_image')}) strat.append_name(apply_ants_warp.name) return workflow
def fsl_apply_transform_func_to_mni(workflow, output_name, func_key, ref_key, num_strat, strat, interpolation_method, distcor=False, map_node=False, func_ts=False, num_cpus=1): """ Applies previously calculated FSL registration transforms to input images. This workflow employs the FSL applywarp tool: https://fsl.fmrib.ox.ac.uk/fslcourse/lectures/practicals/registration/index.html Parameters ---------- workflow : Nipype workflow object the workflow containing the resources involved output_name : str what the name of the warped functional should be when written to the resource pool func_key : string resource pool key correspoding to the node containing the 3D or 4D functional file to be written into MNI space, use 'leaf' for a leaf node ref_key : string resource pool key correspoding to the file path to the template brain used for functional-to-template registration num_strat : int the number of strategy objects strat : C-PAC Strategy object a strategy with one or more resource pools interpolation_method : str which interpolation to use when applying the warps distcor : boolean indicates whether a distortion correction transformation should be added to the transforms, this of course requires that a distortion correction map exist in the resource pool map_node : boolean indicates whether a mapnode should be used, if TRUE func_key is expected to correspond to a list of resources that should each be written into standard space with the other parameters func_ts : boolean indicates whether the input image is a 4D time series num_cpus : int the number of CPUs dedicated to each participant workflow - this is used to determine how to parallelize the warp application step Returns ------- workflow : nipype.pipeline.engine.Workflow """ strat_nodes = strat.get_nodes_names() # if the input is a string, assume that it is resource pool key, # if it is a tuple, assume that it is a node, outfile pair, # otherwise, something funky is going on if isinstance(func_key, str): if func_key == "leaf": func_node, func_file = strat.get_leaf_properties() else: func_node, func_file = strat[func_key] elif isinstance(func_key, tuple): func_node, func_file = func_key if isinstance(ref_key, str): ref_node, ref_out_file = strat[ref_key] elif isinstance(ref_key, tuple): ref_node, ref_out_file = ref_key if int(num_cpus) > 1 and func_ts: # parallelize time series warp application map_node = True if map_node == True: # func_mni_warp func_mni_warp = pe.MapNode(interface=fsl.ApplyWarp(), name='func_mni_fsl_warp_{0}_{1:d}'.format( output_name, num_strat), iterfield=['in_file'], mem_gb=1.5) else: # func_mni_warp func_mni_warp = pe.Node(interface=fsl.ApplyWarp(), name='func_mni_fsl_warp_{0}_{1:d}'.format( output_name, num_strat)) func_mni_warp.inputs.interp = interpolation_method # parallelize the apply warp, if multiple CPUs, and it's a time series! if int(num_cpus) > 1 and func_ts: node_id = '{0}_{1:d}'.format(output_name, num_strat) chunk_imports = ['import nibabel as nb'] chunk = pe.Node(Function(input_names=['func_file', 'n_cpus'], output_names=['TR_ranges'], function=chunk_ts, imports=chunk_imports), name=f'chunk_{node_id}') chunk.inputs.n_cpus = int(num_cpus) workflow.connect(func_node, func_file, chunk, 'func_file') split_imports = ['import os', 'import subprocess'] split = pe.Node(Function(input_names=['func_file', 'tr_ranges'], output_names=['split_funcs'], function=split_ts_chunks, imports=split_imports), name=f'split_{node_id}') workflow.connect(func_node, func_file, split, 'func_file') workflow.connect(chunk, 'TR_ranges', split, 'tr_ranges') workflow.connect(split, 'split_funcs', func_mni_warp, 'in_file') func_concat = pe.Node(interface=afni_utils.TCat(), name=f'func_concat{node_id}') func_concat.inputs.outputtype = 'NIFTI_GZ' workflow.connect(func_mni_warp, 'out_file', func_concat, 'in_files') strat.update_resource_pool({output_name: (func_concat, 'out_file')}) else: workflow.connect(func_node, func_file, func_mni_warp, 'in_file') strat.update_resource_pool({output_name: (func_mni_warp, 'out_file')}) workflow.connect(ref_node, ref_out_file, func_mni_warp, 'ref_file') if 'anat_mni_fnirt_register' in strat_nodes: node, out_file = strat['functional_to_anat_linear_xfm'] workflow.connect(node, out_file, func_mni_warp, 'premat') node, out_file = strat['anatomical_to_mni_nonlinear_xfm'] workflow.connect(node, out_file, func_mni_warp, 'field_file') if output_name == "functional_to_standard": write_composite_xfm = pe.Node(interface=fsl.ConvertWarp(), name='combine_fsl_warps_{0}_{1:d}'.format(output_name,\ num_strat)) workflow.connect(ref_node, ref_out_file, write_composite_xfm, 'reference') node, out_file = strat['functional_to_anat_linear_xfm'] workflow.connect(node, out_file, write_composite_xfm, 'premat') node, out_file = strat['anatomical_to_mni_nonlinear_xfm'] workflow.connect(node, out_file, write_composite_xfm, 'warp1') strat.update_resource_pool({ "functional_to_standard_xfm": (write_composite_xfm, 'out_file') }) elif 'anat_mni_flirt_register' in strat_nodes: if 'functional_to_mni_linear_xfm' not in strat: combine_transforms = pe.Node(interface=fsl.ConvertXFM(), name='combine_fsl_xforms_{0}_{1:d}'.format(output_name,\ num_strat)) combine_transforms.inputs.concat_xfm = True node, out_file = strat['anatomical_to_mni_linear_xfm'] workflow.connect(node, out_file, combine_transforms, 'in_file2') node, out_file = strat['functional_to_anat_linear_xfm'] workflow.connect(node, out_file, combine_transforms, 'in_file') strat.update_resource_pool({ 'functional_to_mni_linear_xfm': (combine_transforms, 'out_file') }) strat.append_name(combine_transforms.name) combine_transforms, outfile = strat['functional_to_mni_linear_xfm'] workflow.connect(combine_transforms, outfile, func_mni_warp, 'premat') else: raise ValueError('Could not find flirt or fnirt registration in nodes') strat.append_name(func_mni_warp.name) return workflow
def create_cwas(name='cwas', working_dir=None, crash_dir=None): """ Connectome Wide Association Studies This workflow performs CWAS on a group of subjects. Parameters ---------- name : string, optional Name of the workflow. Returns ------- cwas : nipype.pipeline.engine.Workflow CWAS workflow. Notes ----- Workflow Inputs:: inputspec.subjects : dict (subject id: nifti files) 4-D timeseries of a group of subjects normalized to MNI space inputspec.roi : string (nifti file) Mask of region(s) of interest inputspec.regressor : list (float) Corresponding list of the regressor variable of shape (`N`) or (`N`,`1`), `N` subjects inputspec.cols : list (int) todo inputspec.f_samples : int Number of permutation samples to draw from the pseudo F distribution inputspec.parallel_nodes : integer Number of nodes to create and potentially parallelize over Workflow Outputs:: outputspec.F_map : string (nifti file) Pseudo F values of CWAS outputspec.p_map : string (nifti file) Significance p values calculated from permutation tests CWAS Procedure: 1. Calculate spatial correlation of a voxel 2. Correlate spatial z-score maps for every subject pair 3. Convert matrix to distance matrix, `1-r` 4. Calculate MDMR statistics for the voxel 5. Determine significance of MDMR statistics with permutation tests .. exec:: from CPAC.cwas import create_cwas wf = create_cwas() wf.write_graph( graph2use='orig', dotfilename='./images/generated/create_cwas.dot' ) Workflow Graph: .. image:: ../../images/generated/cwas.png :width: 500 Detailed Workflow Graph: .. image:: ../../images/generated/cwas_detailed.png :width: 500 References ---------- .. [1] Shehzad Z, Kelly C, Reiss PT, Emerson JW, McMahon K, Copland DA, Castellanos FX, Milham MP. An Analytic Framework for Connectome-Wide Association Studies. Under Review. """ if not working_dir: working_dir = os.path.join(os.getcwd(), 'MDMR_work_dir') if not crash_dir: crash_dir = os.path.join(os.getcwd(), 'MDMR_crash_dir') workflow = pe.Workflow(name=name) workflow.base_dir = working_dir workflow.config['execution'] = { 'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(crash_dir) } inputspec = pe.Node(util.IdentityInterface(fields=[ 'roi', 'subjects', 'regressor', 'participant_column', 'columns', 'permutations', 'parallel_nodes' ]), name='inputspec') outputspec = pe.Node( util.IdentityInterface(fields=['F_map', 'p_map', 'neglog_p_map']), name='outputspec') ccb = pe.Node(Function(input_names=['mask_file', 'batches'], output_names='batch_list', function=create_cwas_batches, as_module=True), name='cwas_batches') ncwas = pe.MapNode(Function(input_names=[ 'subjects', 'mask_file', 'regressor_file', 'participant_column', 'columns_string', 'permutations', 'voxel_range' ], output_names=['result_batch'], function=nifti_cwas, as_module=True), name='cwas_batch', iterfield='voxel_range') jmask = pe.Node(Function(input_names=['subjects', 'mask_file'], output_names=['joint_mask'], function=joint_mask, as_module=True), name='joint_mask') mcwasb = pe.Node(Function( input_names=['cwas_batches', 'mask_file'], output_names=['F_file', 'p_file', 'neglog_p_file'], function=merge_cwas_batches, as_module=True), name='cwas_volumes') #Compute the joint mask workflow.connect(inputspec, 'subjects', jmask, 'subjects') workflow.connect(inputspec, 'roi', jmask, 'mask_file') #Create batches based on the joint mask workflow.connect(jmask, 'joint_mask', ccb, 'mask_file') workflow.connect(inputspec, 'parallel_nodes', ccb, 'batches') #Compute CWAS over batches of voxels workflow.connect(jmask, 'joint_mask', ncwas, 'mask_file') workflow.connect(inputspec, 'subjects', ncwas, 'subjects') workflow.connect(inputspec, 'regressor', ncwas, 'regressor_file') workflow.connect(inputspec, 'permutations', ncwas, 'permutations') workflow.connect(inputspec, 'participant_column', ncwas, 'participant_column') workflow.connect(inputspec, 'columns', ncwas, 'columns_string') workflow.connect(ccb, 'batch_list', ncwas, 'voxel_range') #Merge the computed CWAS data workflow.connect(ncwas, 'result_batch', mcwasb, 'cwas_batches') workflow.connect(jmask, 'joint_mask', mcwasb, 'mask_file') workflow.connect(mcwasb, 'F_file', outputspec, 'F_map') workflow.connect(mcwasb, 'p_file', outputspec, 'p_map') workflow.connect(mcwasb, 'neglog_p_file', outputspec, 'neglog_p_map') return workflow
def create_montage_gm_wm_csf(wf_name, png_name): wf = pe.Workflow(name=wf_name) inputNode = pe.Node(util.IdentityInterface( fields=['underlay', 'overlay_csf', 'overlay_wm', 'overlay_gm']), name='inputspec') outputNode = pe.Node(util.IdentityInterface(fields=[ 'axial_png', 'sagittal_png', 'resampled_underlay', 'resampled_overlay_csf', 'resampled_overlay_wm', 'resampled_overlay_gm' ]), name='outputspec') resample_u = pe.Node(Function(input_names=['file_'], output_names=['new_fname'], function=resample_1mm, as_module=True), name='resample_u') resample_o_csf = resample_u.clone('resample_o_csf') resample_o_wm = resample_u.clone('resample_o_wm') resample_o_gm = resample_u.clone('resample_o_gm') wf.connect(inputNode, 'underlay', resample_u, 'file_') wf.connect(inputNode, 'overlay_csf', resample_o_csf, 'file_') wf.connect(inputNode, 'overlay_gm', resample_o_gm, 'file_') wf.connect(inputNode, 'overlay_wm', resample_o_wm, 'file_') montage_a = pe.Node(Function(input_names=[ 'overlay_csf', 'overlay_wm', 'overlay_gm', 'underlay', 'png_name' ], output_names=['png_name'], function=montage_gm_wm_csf_axial, as_module=True), name='montage_a') wf.connect(resample_u, 'new_fname', montage_a, 'underlay') wf.connect(resample_o_csf, 'new_fname', montage_a, 'overlay_csf') wf.connect(resample_o_gm, 'new_fname', montage_a, 'overlay_gm') wf.connect(resample_o_wm, 'new_fname', montage_a, 'overlay_wm') montage_a.inputs.png_name = png_name + '_a.png' montage_s = pe.Node(Function(input_names=[ 'overlay_csf', 'overlay_wm', 'overlay_gm', 'underlay', 'png_name' ], output_names=['png_name'], function=montage_gm_wm_csf_sagittal, as_module=True), name='montage_s') montage_s.inputs.png_name = png_name + '_s.png' wf.connect(resample_u, 'new_fname', montage_s, 'underlay') wf.connect(resample_o_csf, 'new_fname', montage_s, 'overlay_csf') wf.connect(resample_o_gm, 'new_fname', montage_s, 'overlay_gm') wf.connect(resample_o_wm, 'new_fname', montage_s, 'overlay_wm') wf.connect(resample_u, 'new_fname', outputNode, 'resampled_underlay') wf.connect(resample_o_csf, 'new_fname', outputNode, 'resampled_overlay_csf') wf.connect(resample_o_wm, 'new_fname', outputNode, 'resampled_overlay_wm') wf.connect(resample_o_gm, 'new_fname', outputNode, 'resampled_overlay_gm') wf.connect(montage_a, 'png_name', outputNode, 'axial_png') wf.connect(montage_s, 'png_name', outputNode, 'sagittal_png') return wf
def motion_power_statistics(name='motion_stats'): """ The main purpose of this workflow is to get various statistical measures from the movement/motion parameters obtained in functional preprocessing. Parameters ---------- :param str name: Name of the workflow, defaults to 'motion_stats' :return: Nuisance workflow. :rtype: nipype.pipeline.engine.Workflow Notes ----- Workflow Inputs:: inputspec.subject_id : string Subject name or id inputspec.scan_id : string Functional Scan id or name inputspec.motion_correct : string (func/rest file or a list of func/rest nifti file) Path to motion corrected functional data inputspec.mask : string (nifti file) Path to field containing brain-only mask for the functional data inputspec.max_displacement : string (Mat file) maximum displacement (in mm) vector for brain voxels in each volume. This file is obtained in functional preprocessing step inputspec.movement_parameters : string (Mat file) 1D file containing six movement/motion parameters(3 Translation, 3 Rotations) in different columns (roll pitch yaw dS dL dP), obtained in functional preprocessing step Workflow Outputs:: outputspec.FDP_1D : 1D file mean Framewise Displacement (FD) outputspec.power_params : txt file Text file containing various power parameters for scrubbing outputspec.motion_params : txt file Text file containing various movement parameters Order of commands: - Calculate Framewise Displacement FD as per power et al., 2012 Differentiating head realignment parameters across frames yields a six dimensional timeseries that represents instantaneous head motion. Rotational displacements are converted from degrees to millimeters by calculating displacement on the surface of a sphere of radius 50 mm.[R5] - Calculate Framewise Displacement FD as per jenkinson et al., 2002 - Calculate DVARS DVARS (D temporal derivative of timecourses, VARS referring to RMS variance over voxels) indexes the rate of change of BOLD signal across the entire brain at each frame of data.To calculate DVARS, the volumetric timeseries is differentiated (by backwards differences) and RMS signal change is calculated over the whole brain.DVARS is thus a measure of how much the intensity of a brain image changes in comparison to the previous timepoint (as opposed to the global signal, which is the average value of a brain image at a timepoint).[R5] - Calculate Power parameters:: MeanFD : Mean (across time/frames) of the absolute values for Framewise Displacement (FD), computed as described in Power et al., Neuroimage, 2012) rootMeanSquareFD : Root mean square (RMS; across time/frames) of the absolute values for FD rmsFD : Root mean square (RMS; across time/frames) of the absolute values for FD FDquartile(top 1/4th FD) : Mean of the top 25% highest FD values MeanDVARS : Mean of voxel DVARS - Calculate Motion Parameters Following motion parameters are calculated:: Subject Scan Mean Relative RMS Displacement Max Relative RMS Displacement Movements > threshold Mean Relative Mean Rotation Mean Relative Maxdisp Max Relative Maxdisp Max Abs Maxdisp Max Relative Roll Max Relative Pitch Max Relative Yaw Max Relative dS-I Max Relative dL-R Max Relative dP-A Mean Relative Roll Mean Relative Pitch Mean Relative Yaw Mean Relative dS-I Mean Relative dL-R Mean Relative dP-A Max Abs Roll Max Abs Pitch Max Abs Yaw Max Abs dS-I Max Abs dL-R Max Abs dP-A Mean Abs Roll Mean Abs Pitch Mean Abs Yaw Mean Abs dS-I Mean Abs dL-R Mean Abs dP-A .. exec:: from CPAC.generate_motion_statistics import motion_power_statistics wf = motion_power_statistics() wf.write_graph( graph2use='orig', dotfilename='./images/generated/motion_statistics.dot' ) High Level Workflow Graph: .. image:: ../images/generated/motion_statistics.png :width: 1000 Detailed Workflow Graph: .. image:: ../images/generated/motion_statistics_detailed.png :width: 1000 Examples -------- >>> import generate_motion_statistics >>> wf = generate_motion_statistics.motion_power_statistics("generate_statistics") >>> wf.inputs.inputspec.movement_parameters = 'CPAC_outupts/sub01/func/movement_parameteres/rest_mc.1D' >>> wf.inputs.inputspec.max_displacement = 'CPAC_outputs/sub01/func/max_dispalcement/max_disp.1D' >>> wf.inputs.inputspec.motion_correct = 'CPAC_outputs/sub01/func/motion_correct/rest_mc.nii.gz' >>> wf.inputs.inputspec.mask = 'CPAC_outputs/sub01/func/func_mask/rest_mask.nii.gz' >>> wf.inputs.inputspec.transformations = 'CPAC_outputs/sub01/func/coordinate_transformation/rest_mc.aff12.1D' >>> wf.inputs.inputspec.subject_id = 'sub01' >>> wf.inputs.inputspec.scan_id = 'rest_1' >>> wf.base_dir = './working_dir' >>> wf.run() References ---------- .. [1] Power, J. D., Barnes, K. A., Snyder, A. Z., Schlaggar, B. L., & Petersen, S. E. (2012). Spurious but systematic correlations in functional connectivity MRI networks arise from subject motion. NeuroImage, 59(3), 2142-2154. doi:10.1016/j.neuroimage.2011.10.018 .. [2] Power, J. D., Barnes, K. A., Snyder, A. Z., Schlaggar, B. L., & Petersen, S. E. (2012). Steps toward optimizing motion artifact removal in functional connectivity MRI; a reply to Carp. NeuroImage. doi:10.1016/j.neuroimage.2012.03.017 .. [3] Jenkinson, M., Bannister, P., Brady, M., Smith, S., 2002. Improved optimization for the robust and accurate linear registration and motion correction of brain images. Neuroimage 17, 825-841. """ wf = pe.Workflow(name=name) input_node = pe.Node(util.IdentityInterface(fields=['subject_id', 'scan_id', 'movement_parameters', 'max_displacement', 'motion_correct', 'mask', 'transformations']), name='inputspec') output_node = pe.Node(util.IdentityInterface(fields=['FDP_1D', 'FDJ_1D', 'DVARS_1D', 'power_params', 'motion_params']), name='outputspec') cal_DVARS = pe.Node(Function(input_names=['rest', 'mask'], output_names=['out_file'], function=calculate_DVARS, as_module=True), name='cal_DVARS') # calculate mean DVARS wf.connect(input_node, 'motion_correct', cal_DVARS, 'rest') wf.connect(input_node, 'mask', cal_DVARS, 'mask') wf.connect(cal_DVARS, 'out_file', output_node, 'DVARS_1D') # Calculating mean Framewise Displacement as per power et al., 2012 calculate_FDP = pe.Node(Function(input_names=['in_file'], output_names=['out_file'], function=calculate_FD_P, as_module=True), name='calculate_FD') wf.connect(input_node, 'movement_parameters', calculate_FDP, 'in_file') wf.connect(calculate_FDP, 'out_file', output_node, 'FDP_1D') # Calculating mean Framewise Displacement as per jenkinson et al., 2002 calculate_FDJ = pe.Node(Function(input_names=['in_file'], output_names=['out_file'], function=calculate_FD_J, as_module=True), name='calculate_FDJ') wf.connect(input_node, 'transformations', calculate_FDJ, 'in_file') wf.connect(calculate_FDJ, 'out_file', output_node, 'FDJ_1D') calc_motion_parameters = pe.Node(Function(input_names=["subject_id", "scan_id", "movement_parameters", "max_displacement"], output_names=['out_file'], function=gen_motion_parameters, as_module=True), name='calc_motion_parameters') wf.connect(input_node, 'subject_id', calc_motion_parameters, 'subject_id') wf.connect(input_node, 'scan_id', calc_motion_parameters, 'scan_id') wf.connect(input_node, 'movement_parameters', calc_motion_parameters, 'movement_parameters') wf.connect(input_node, 'max_displacement', calc_motion_parameters, 'max_displacement') wf.connect(calc_motion_parameters, 'out_file', output_node, 'motion_params') calc_power_parameters = pe.Node(Function(input_names=["subject_id", "scan_id", "fdp", "fdj", "dvars"], output_names=['out_file'], function=gen_power_parameters, as_module=True), name='calc_power_parameters') wf.connect(input_node, 'subject_id', calc_power_parameters, 'subject_id') wf.connect(input_node, 'scan_id', calc_power_parameters, 'scan_id') wf.connect(cal_DVARS, 'out_file', calc_power_parameters, 'dvars') wf.connect(calculate_FDP, 'out_file', calc_power_parameters, 'fdp') wf.connect(calculate_FDJ, 'out_file', calc_power_parameters, 'fdj') wf.connect(calc_power_parameters, 'out_file', output_node, 'power_params') return wf
def connect_distortion_correction(workflow, strat_list, c, diff, blip, fmap_rp_list, unique_id=None): # Distortion Correction new_strat_list = [] # Distortion Correction - Field Map Phase-difference if "PhaseDiff" in c.distortion_correction and diff: for num_strat, strat in enumerate(strat_list): if unique_id is None: workflow_name = f'diff_distcor_{num_strat}' else: workflow_name = f'diff_distcor_{unique_id}_{num_strat}' if 'BET' in c.fmap_distcorr_skullstrip: epi_distcorr = create_EPI_DistCorr(use_BET=True, wf_name=workflow_name) epi_distcorr.inputs.bet_frac_input.bet_frac = c.fmap_distcorr_frac epi_distcorr.get_node('bet_frac_input').iterables = \ ('bet_frac', c.fmap_distcorr_frac) else: epi_distcorr = create_EPI_DistCorr(use_BET=False, wf_name=workflow_name) epi_distcorr.inputs.afni_threshold_input.afni_threshold = \ c.fmap_distcorr_threshold node, out_file = strat['anatomical_skull_leaf'] workflow.connect(node, out_file, epi_distcorr, 'inputspec.anat_file') node, out_file = strat['diff_phase'] workflow.connect(node, out_file, epi_distcorr, 'inputspec.fmap_pha') node, out_file = strat['diff_mag_one'] workflow.connect(node, out_file, epi_distcorr, 'inputspec.fmap_mag') node, out_file = strat['deltaTE'] workflow.connect(node, out_file, epi_distcorr, 'deltaTE_input.deltaTE') node, out_file = strat['diff_phase_dwell'] workflow.connect(node, out_file, epi_distcorr, 'dwellT_input.dwellT') node, out_file = strat['dwell_asym_ratio'] workflow.connect(node, out_file, epi_distcorr, 'dwell_asym_ratio_input.dwell_asym_ratio') # TODO ASH review forking if "None" in c.distortion_correction: strat = strat.fork() new_strat_list.append(strat) strat.append_name(epi_distcorr.name) strat.update_resource_pool({ 'despiked_fieldmap': (epi_distcorr, 'outputspec.fmap_despiked'), 'fieldmap_mask': (epi_distcorr, 'outputspec.fieldmapmask'), }) strat_list += new_strat_list # Distortion Correction - "Blip-Up / Blip-Down" if "Blip" in c.distortion_correction and blip: for num_strat, strat in enumerate(strat_list): match_epi_imports = ['import json'] match_epi_fmaps_node = \ pe.Node(Function(input_names=['bold_pedir', 'epi_fmap_one', 'epi_fmap_params_one', 'epi_fmap_two', 'epi_fmap_params_two'], output_names=['opposite_pe_epi', 'same_pe_epi'], function=match_epi_fmaps, imports=match_epi_imports, as_module=True), name='match_epi_fmaps_{0}'.format(num_strat)) if fmap_rp_list: epi_rp_key = fmap_rp_list[0] epi_param_rp_key = "{0}_scan_params".format(epi_rp_key) node, node_out = strat[epi_rp_key] workflow.connect(node, node_out, match_epi_fmaps_node, 'epi_fmap_one') node, node_out = strat[epi_param_rp_key] workflow.connect(node, node_out, match_epi_fmaps_node, 'epi_fmap_params_one') if len(epi_rp_key) > 1: epi_rp_key = fmap_rp_list[1] epi_param_rp_key = "{0}_scan_params".format(epi_rp_key) node, node_out = strat[epi_rp_key] workflow.connect(node, node_out, match_epi_fmaps_node, 'epi_fmap_two') node, node_out = strat[epi_param_rp_key] workflow.connect(node, node_out, match_epi_fmaps_node, 'epi_fmap_params_two') node, node_out = strat['pe_direction'] workflow.connect(node, node_out, match_epi_fmaps_node, 'bold_pedir') if unique_id is None: workflow_name = f'blip_correct_{num_strat}' else: workflow_name = f'blip_correct_{unique_id}_{num_strat}' blip_correct = blip_distcor_wf(wf_name=workflow_name) node, out_file = strat["mean_functional"] workflow.connect(node, out_file, blip_correct, 'inputspec.func_mean') workflow.connect(match_epi_fmaps_node, 'opposite_pe_epi', blip_correct, 'inputspec.opposite_pe_epi') workflow.connect(match_epi_fmaps_node, 'same_pe_epi', blip_correct, 'inputspec.same_pe_epi') if "None" in c.distortion_correction: strat = strat.fork() new_strat_list.append(strat) strat.append_name(blip_correct.name) strat.update_resource_pool( { 'blip_warp': (blip_correct, 'outputspec.blip_warp'), 'blip_warp_inverse': (blip_correct, 'outputspec.blip_warp_inverse'), 'mean_functional': (blip_correct, 'outputspec.new_func_mean'), 'functional_brain_mask': (blip_correct, 'outputspec.new_func_mask') }, override=True) strat_list += new_strat_list return (workflow, strat_list)
def func_longitudinal_template_wf(subject_id, strat_list, config): ''' Parameters ---------- subject_id : string the id of the subject strat_list : list of list first level strategy, second level session config : configuration a configuration object containing the information of the pipeline config. Returns ------- None ''' workflow_name = 'func_longitudinal_template_' + str(subject_id) workflow = pe.Workflow(name=workflow_name) workflow.base_dir = config.pipeline_setup['working_directory']['path'] workflow.config['execution'] = { 'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(config.pipeline_setup['crash_directory']['path']) } # strat_nodes_list = strat_list['func_default'] strat_init = Strategy() templates_for_resampling = [ (config.resolution_for_func_preproc, config.template_brain_only_for_func, 'template_brain_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.template_skull_for_func, 'template_skull_for_func_preproc', 'resolution_for_func_preproc'), (config.resolution_for_func_preproc, config.ref_mask_for_func, 'template_ref_mask', 'resolution_for_func_preproc'), # TODO check float resolution (config.resolution_for_func_preproc, config.functional_registration['2-func_registration_to_template'] ['target_template']['EPI_template']['template_epi'], 'template_epi', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.functional_registration['2-func_registration_to_template'] ['target_template']['EPI_template']['template_epi'], 'template_epi_derivative', 'resolution_for_func_derivative'), (config.resolution_for_func_derivative, config.template_brain_only_for_func, 'template_brain_for_func_derivative', 'resolution_for_func_preproc'), (config.resolution_for_func_derivative, config.template_skull_for_func, 'template_skull_for_func_derivative', 'resolution_for_func_preproc'), ] for resolution, template, template_name, tag in templates_for_resampling: resampled_template = pe.Node(Function( input_names=['resolution', 'template', 'template_name', 'tag'], output_names=['resampled_template'], function=resolve_resolution, as_module=True), name='resampled_' + template_name) resampled_template.inputs.resolution = resolution resampled_template.inputs.template = template resampled_template.inputs.template_name = template_name resampled_template.inputs.tag = tag strat_init.update_resource_pool( {template_name: (resampled_template, 'resampled_template')}) merge_func_preproc_node = pe.Node(Function( input_names=['working_directory'], output_names=['brain_list', 'skull_list'], function=merge_func_preproc, as_module=True), name='merge_func_preproc') merge_func_preproc_node.inputs.working_directory = \ config.pipeline_setup['working_directory']['path'] template_node = subject_specific_template( workflow_name='subject_specific_func_template_' + subject_id) template_node.inputs.set( avg_method=config.longitudinal_template_average_method, dof=config.longitudinal_template_dof, interp=config.longitudinal_template_interp, cost=config.longitudinal_template_cost, convergence_threshold=config. longitudinal_template_convergence_threshold, thread_pool=config.longitudinal_template_thread_pool, ) workflow.connect(merge_func_preproc_node, 'brain_list', template_node, 'input_brain_list') workflow.connect(merge_func_preproc_node, 'skull_list', template_node, 'input_skull_list') workflow, strat_list = register_func_longitudinal_template_to_standard( template_node, config, workflow, strat_init, 'default') workflow.run() return
def create_isfc(name='isfc', working_dir=None, crash_dir=None): """ Inter-Subject Functional Correlation Parameters ---------- name : string, optional Name of the workflow. Returns ------- workflow : nipype.pipeline.engine.Workflow ISFC workflow. Notes ----- Workflow Inputs:: Workflow Outputs:: References ---------- .. [1] Simony, E., Honey, C. J., Chen, J., Lositsky, O., Yeshurun, Y., Wiesel, A., & Hasson, U. (2016). Dynamic reconfiguration of the default mode network during narrative comprehension. Nature Communications, 7(May 2015), 1-13. https://doi.org/10.1038/ncomms12141 """ if not working_dir: working_dir = os.path.join(os.getcwd(), 'ISC_work_dir') if not crash_dir: crash_dir = os.path.join(os.getcwd(), 'ISC_crash_dir') wf = pe.Workflow(name=name) wf.base_dir = working_dir wf.config['execution'] = { 'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(crash_dir) } inputspec = pe.Node(util.IdentityInterface(fields=[ 'subjects', 'permutations', 'collapse_subj', 'std', 'two_sided', 'random_state' ]), name='inputspec') data_node = pe.Node(Function( input_names=['subjects'], output_names=['subject_ids', 'D', 'voxel_masker'], function=load_data, as_module=True), name='data') save_node = pe.Node(Function( input_names=['subject_ids', 'ISFC', 'p', 'collapse_subj'], output_names=['subject_ids', 'correlations', 'significance'], function=save_data_isfc, as_module=True), name='save') outputspec = pe.Node( util.IdentityInterface(fields=['correlations', 'significance']), name='outputspec') isfc_node = pe.Node(Function(input_names=['D', 'std', 'collapse_subj'], output_names=['ISFC', 'masked'], function=node_isfc, as_module=True), name='ISFC') permutations_node = pe.MapNode(Function( input_names=[ 'permutation', 'D', 'masked', 'collapse_subj', 'random_state' ], output_names=['permutation', 'min_null', 'max_null'], function=node_isfc_permutation, as_module=True), name='ISFC_permutation', iterfield='permutation') significance_node = pe.Node(Function( input_names=['ISFC', 'min_null', 'max_null', 'two_sided'], output_names=['p'], function=node_isfc_significance, as_module=True), name='ISFC_p') wf.connect([ (inputspec, data_node, [('subjects', 'subjects')]), (inputspec, isfc_node, [('collapse_subj', 'collapse_subj')]), (inputspec, isfc_node, [('std', 'std')]), (data_node, isfc_node, [('D', 'D')]), (isfc_node, significance_node, [('ISFC', 'ISFC')]), (data_node, permutations_node, [('D', 'D')]), (isfc_node, permutations_node, [('masked', 'masked')]), (inputspec, permutations_node, [('collapse_subj', 'collapse_subj')]), (inputspec, permutations_node, [(('permutations', _permutations), 'permutation')]), (inputspec, permutations_node, [('random_state', 'random_state')]), (permutations_node, significance_node, [('min_null', 'min_null')]), (permutations_node, significance_node, [('max_null', 'max_null')]), (inputspec, significance_node, [('two_sided', 'two_sided')]), (data_node, save_node, [('subject_ids', 'subject_ids')]), (inputspec, save_node, [('collapse_subj', 'collapse_subj')]), (isfc_node, save_node, [('ISFC', 'ISFC')]), (significance_node, save_node, [('p', 'p')]), (save_node, outputspec, [('subject_ids', 'subject_ids')]), (save_node, outputspec, [('correlations', 'correlations')]), (save_node, outputspec, [('significance', 'significance')]), ]) return wf
def create_randomise(name='randomise', working_dir=None, crash_dir=None): """ Parameters ---------- Returns ------- workflow : nipype.pipeline.engine.Workflow Randomise workflow. Notes ----- Workflow Inputs:: Workflow Outputs:: References ---------- """ if not working_dir: working_dir = os.path.join(os.getcwd(), 'Randomise_work_dir') if not crash_dir: crash_dir = os.path.join(os.getcwd(), 'Randomise_crash_dir') wf = pe.Workflow(name=name) wf.base_dir = working_dir wf.config['execution'] = { 'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(crash_dir) } inputspec = pe.Node(util.IdentityInterface(fields=[ 'subjects_list', 'pipeline_output_folder', 'permutations', 'mask_boolean', 'demean', 'c_thresh' ]), name='inputspec') outputspec = pe.Node(util.IdentityInterface(fields=[ 'tstat_files', 't_corrected_p_files', 'index_file', 'threshold_file', 'localmax_txt_file', 'localmax_vol_file', 'max_file', 'mean_file', 'pval_file', 'size_file' ]), name='outputspec') #merge = pe.Node(interface=fsl.Merge(), name='fsl_merge') #merge.inputs.dimension = 't' #merge.inputs.merged_file = "randomise_merged.nii.gz" #wf.connect(inputspec, 'subjects', merge, 'in_files') #mask = pe.Node(interface=fsl.maths.MathsCommand(), name='fsl_maths') #mask.inputs.args = '-abs -Tmin -bin' #mask.inputs.out_file = "randomise_mask.nii.gz" #wf.connect(inputspec, 'subjects', mask, 'in_file') randomise = pe.Node(interface=fsl.Randomise(), name='randomise') randomise.inputs.base_name = "randomise" randomise.inputs.demean = True randomise.inputs.tfce = True wf.connect([(inputspec, randomise, [ ('subjects', 'in_file'), ('design_matrix_file', 'design_mat'), ('constrast_file', 'tcon'), ('permutations', 'num_perm'), ])]) wf.connect(randomise, 'tstat_files', outputspec, 'tstat_files') wf.connect(randomise, 't_corrected_p_files', outputspec, 't_corrected_p_files') #------------- issue here arises while using tfce. By not using tfce, you don't get t_corrected_p files. R V in a conundrum? --------------------# select_tcorrp_files = pe.Node(Function(input_names=['input_list'], output_names=['out_file'], function=select), name='select_t_corrp') wf.connect(randomise, 't_corrected_p_files', select_tcorrp_files, 'input_list') wf.connect(select_tcorrp_files, 'out_file', outputspec, 'out_tcorr_corrected') select_tstat_files = pe.Node(Function(input_names=['input_list'], output_names=['out_file'], function=select), name='select_t_stat') wf.connect(randomise, 'tstat_files', select_tstat_files, 'input_list') wf.connect(select_tstat_files, 'out_file', outputspec, 'out_tstat_corrected') thresh = pe.Node(interface=fsl.Threshold(), name='fsl_threshold_contrast') thresh.inputs.thresh = 0.95 thresh.inputs.out_file = 'rando_pipe_thresh_tstat.nii.gz' wf.connect(select_tstat_files, 'out_file', thresh, 'in_file') wf.connect(thresh, 'out_file', outputspec, 'rando_pipe_thresh_tstat.nii.gz') thresh_bin = pe.Node(interface=fsl.UnaryMaths(), name='fsl_threshold_bin_contrast') thresh_bin.inputs.operation = 'bin' wf.connect(thresh, 'out_file', thresh_bin, 'in_file') wf.connect(thresh_bin, 'out_file', outputspec, 'thresh_bin_out') apply_mask = pe.Node(interface=fsl.ApplyMask(), name='fsl_applymask_contrast') wf.connect(select_tstat_files, 'out_file', apply_mask, 'in_file') wf.connect(thresh_bin, 'out_file', apply_mask, 'mask_file') cluster = pe.Node(interface=fsl.Cluster(), name='cluster_contrast') cluster.inputs.threshold = 0.0001 cluster.inputs.out_index_file = "index_file" cluster.inputs.out_localmax_txt_file = "lmax_contrast.txt" cluster.inputs.out_size_file = "cluster_size_contrast" cluster.inputs.out_threshold_file = True cluster.inputs.out_max_file = True cluster.inputs.out_mean_file = True cluster.inputs.out_pval_file = True cluster.inputs.out_size_file = True wf.connect(apply_mask, 'out_file', cluster, 'in_file') wf.connect(cluster, 'index_file', outputspec, 'index_file') wf.connect(cluster, 'threshold_file', outputspec, 'threshold_file') wf.connect(cluster, 'localmax_txt_file', outputspec, 'localmax_txt_file') wf.connect(cluster, 'localmax_vol_file', outputspec, 'localmax_vol_file') wf.connect(cluster, 'max_file', outputspec, 'max_file') wf.connect(cluster, 'mean_file', outputspec, 'meal_file') wf.connect(cluster, 'pval_file', outputspec, 'pval_file') wf.connect(cluster, 'size_file', outputspec, 'size_file') return wf