def init_dsi_studio_export_wf(omp_nthreads, has_transform, name="dsi_studio_export", params={}, output_suffix=""): """Export scalar maps from a DSI Studio fib file into NIfTI files with correct headers. This workflow exports gfa, fa0, fa1, fa2 and iso. Inputs fibgz A DSI Studio fib file Outputs gfa NIfTI file containing generalized fractional anisotropy (GFA). fa0 Quantitative Anisotropy for the largest fixel in each voxel. fa1 Quantitative Anisotropy for the second-largest fixel in each voxel. fa2 Quantitative Anisotropy for the third-largest fixel in each voxel. iso Isotropic component of the ODF in each voxel. """ inputnode = pe.Node(niu.IdentityInterface(fields=input_fields + ['fibgz']), name="inputnode") outputnode = pe.Node( niu.IdentityInterface(fields=['gfa', 'fa0', 'fa1', 'fa2', 'iso']), name="outputnode") workflow = pe.Workflow(name=name) export = pe.Node(DSIStudioExport(to_export="gfa,fa0,fa1,fa2,fa3,iso"), name='export') fixhdr_nodes = {} for scalar_name in ['gfa', 'fa0', 'fa1', 'fa2', 'iso']: output_name = scalar_name + '_file' fixhdr_nodes[scalar_name] = pe.Node(FixDSIStudioExportHeader(), name='fix_' + scalar_name) connections = [(export, fixhdr_nodes[scalar_name], [(output_name, 'dsi_studio_nifti')]), (inputnode, fixhdr_nodes[scalar_name], [('dwi_file', 'correct_header_nifti')]), (fixhdr_nodes[scalar_name], outputnode, [('out_file', scalar_name)])] if output_suffix: connections += [ (fixhdr_nodes[scalar_name], pe.Node(ReconDerivativesDataSink(desc=scalar_name, suffix=output_suffix), name='ds_%s_%s' % (name, scalar_name)), [('out_file', 'in_file')]) ] workflow.connect(connections) workflow.connect([(inputnode, export, [('fibgz', 'input_file')])]) return workflow
def external_format_datasinks(output_suffix, params, wf): """Add datasinks for Dipy Reconstructions in other formats.""" outputnode = wf.get_node("outputnode") if params["write_fibgz"]: ds_fibgz = pe.Node(ReconDerivativesDataSink(extension='.fib.gz', suffix=output_suffix, compress=True), name='ds_{}_fibgz'.format(output_suffix), run_without_submitting=True) wf.connect(outputnode, 'fibgz', ds_fibgz, 'in_file') if params["write_mif"]: ds_mif = pe.Node(ReconDerivativesDataSink(extension='.mif', suffix=output_suffix, compress=False), name='ds_{}_mif'.format(output_suffix), run_without_submitting=True) wf.connect(outputnode, 'fod_sh_mif', ds_mif, 'in_file')
def init_dsi_studio_recon_wf(name="dsi_studio_recon", output_suffix="", params={}): """Reconstructs diffusion data using DSI Studio. This workflow creates a ``.src.gz`` file from the input dwi, bvals and bvecs, then reconstructs ODFs using GQI. Inputs *Default qsiprep inputs* Outputs fibgz A DSI Studio fib file containing GQI ODFs, peaks and scalar values. Params ratio_of_mean_diffusion_distance: float Default 1.25. Distance to sample EAP at. """ inputnode = pe.Node(niu.IdentityInterface(fields=input_fields), name="inputnode") outputnode = pe.Node(niu.IdentityInterface(fields=['fibgz']), name="outputnode") workflow = pe.Workflow(name=name) create_src = pe.Node(DSIStudioCreateSrc(), name="create_src") gqi_recon = pe.Node(DSIStudioGQIReconstruction(), name="gqi_recon") # Resample anat mask resample_mask = pe.Node(afni.Resample(outputtype='NIFTI_GZ', resample_mode="NN"), name='resample_mask') workflow.connect([ (inputnode, create_src, [('dwi_file', 'input_nifti_file'), ('bval_file', 'input_bvals_file'), ('bvec_file', 'input_bvecs_file')]), (inputnode, resample_mask, [('t1_brain_mask', 'in_file'), ('dwi_file', 'master')]), (create_src, gqi_recon, [('output_src', 'input_src_file')]), (resample_mask, gqi_recon, [('out_file', 'mask')]), (gqi_recon, outputnode, [('output_fib', 'fibgz')]) ]) if output_suffix: # Save the output in the outputs directory ds_gqi_fibgz = pe.Node(ReconDerivativesDataSink(extension='.fib.gz', suffix=output_suffix, compress=True), name='ds_gqi_fibgz', run_without_submitting=True) workflow.connect(gqi_recon, 'output_fib', ds_gqi_fibgz, 'in_file') return workflow
def init_mrtrix_connectivity_wf(name="mrtrix_connectiity", params={}, output_suffix="", n_procs=1): """Runs ``tck2connectome`` on a ``tck`` file.abs Inputs tck_file mrtrix3 tck file. Outputs matfile A MATLAB-format file with numerous connectivity matrices for each atlas. """ inputnode = pe.Node(niu.IdentityInterface( fields=input_fields + ['tck_file', 'sift_weights', 'atlas_configs']), name="inputnode") outputnode = pe.Node(niu.IdentityInterface(fields=['matfile']), name="outputnode") workflow = pe.Workflow(name=name) conmat_params = params.get("tck2connectome", {}) use_sift_weights = params.get("use_sift_weights", False) calc_connectivity = pe.Node(MRTrixAtlasGraph(**conmat_params), name='calc_connectivity') workflow.connect([ (inputnode, calc_connectivity, [('atlas_configs', 'atlas_configs'), ('tck_file', 'in_file')]), (calc_connectivity, outputnode, [('connectivity_matfile', 'matfile')]) ]) if use_sift_weights: workflow.connect([(inputnode, calc_connectivity, [('sift_weights', 'in_weights')])]) if output_suffix: # Save the output in the outputs directory ds_connectivity = pe.Node( ReconDerivativesDataSink(suffix=output_suffix), name='ds_' + name, run_without_submitting=True) workflow.connect(calc_connectivity, 'connectivity_matfile', ds_connectivity, 'in_file') return workflow
def init_controllability_wf(name="controllability", output_suffix="", params={}): """Calculates network controllability from connectivity matrices. Calculates modal and average controllability using the method of Gu et al. 2015. Inputs matfile MATLAB format connectivity matrices from DSI Studio connectivity, MRTrix connectivity or Dipy Connectivity. Outputs matfile MATLAB format controllability values for each node in each connectivity matrix in the input file. """ inputnode = pe.Node(niu.IdentityInterface(fields=input_fields + ['matfile']), name="inputnode") outputnode = pe.Node(niu.IdentityInterface(fields=['matfile']), name="outputnode") calc_control = pe.Node(Controllability(**params), name='calc_control') workflow = pe.Workflow(name=name) workflow.connect([(inputnode, calc_control, [('matfile', 'matfile')]), (calc_control, outputnode, [('controllability', 'matfile')])]) if output_suffix: # Save the output in the outputs directory ds_control = pe.Node(ReconDerivativesDataSink(suffix=output_suffix), name='ds_' + name, run_without_submitting=True) workflow.connect(calc_control, 'controllability', ds_control, 'in_file') return workflow
def init_dsi_studio_connectivity_wf(name="dsi_studio_connectivity", n_procs=1, params={}, output_suffix=""): """Calculate streamline-based connectivity matrices using DSI Studio. DSI Studio has a deterministic tractography algorithm that can be used to estimate pairwise regional connectivity. It calculates multiple connectivity measures. Inputs fibgz A DSI Studio fib file produced by DSI Studio reconstruction. Outputs matfile A MATLAB-format file with numerous connectivity matrices for each atlas. Params fiber_count number of streamlines to generate. Cannot also specify seed_count seed_count Number of seeds to track from. Does not guarantee a fixed number of streamlines and cannot be used with the fiber_count option. method 0: streamline (Euler) 4: Runge Kutta seed_plan 0: = traits.Enum((0, 1), argstr="--seed_plan=%d") initial_dir Seeds begin oriented as 0: the primary orientation of the ODF 1: a random orientation or 2: all orientations connectivity_type "pass" to count streamlines passing through a region. "end" to force streamlines to terminate in regions they count as connecting. connectivity_value "count", "ncount", "fa" used to quantify connection strength. random_seed Setting to True generates truly random (not-reproducible) seeding. fa_threshold If not specified, will use the DSI Studio Otsu threshold. Otherwise specigies the minimum qa value per fixed to be used for tracking. step_size Streamline propagation step size in millimeters. turning_angle Maximum turning angle in degrees for steamline propagation. smoothing DSI Studio smoothing factor min_length Minimum streamline length in millimeters. max_length Maximum streamline length in millimeters. """ inputnode = pe.Node(niu.IdentityInterface(fields=input_fields + ['fibgz', 'atlas_configs']), name="inputnode") outputnode = pe.Node(niu.IdentityInterface(fields=['matfile']), name="outputnode") workflow = pe.Workflow(name=name) calc_connectivity = pe.Node(DSIStudioAtlasGraph(n_procs=n_procs, **params), name='calc_connectivity') workflow.connect([ (inputnode, calc_connectivity, [('atlas_configs', 'atlas_configs'), ('fibgz', 'input_fib')]), (calc_connectivity, outputnode, [('connectivity_matfile', 'matfile')]) ]) if output_suffix: # Save the output in the outputs directory ds_connectivity = pe.Node( ReconDerivativesDataSink(suffix=output_suffix), name='ds_' + name, run_without_submitting=True) workflow.connect(calc_connectivity, 'connectivity_matfile', ds_connectivity, 'in_file') return workflow
def init_dipy_brainsuite_shore_recon_wf(name="dipy_3dshore_recon", output_suffix="", params={}): """Reconstruct EAPs, ODFs, using 3dSHORE (brainsuite-style basis set). Inputs *qsiprep outputs* Outputs shore_coeffs 3dSHORE coefficients rtop Voxelwise Return-to-origin probability. rtap Voxelwise Return-to-axis probability. rtpp Voxelwise Return-to-plane probability. Params write_fibgz: bool True writes out a DSI Studio fib file write_mif: bool True writes out a MRTrix mif file with sh coefficients convert_to_multishell: str either "HCP", "ABCD", "lifespan" will resample the data with this scheme radial_order: int Radial order for spherical harmonics (even) zeta: float Zeta parameter for basis set. tau:float Diffusion parameter (default= 4 * np.pi**2) regularization "L2" or "L1". Default is "L2" lambdaN LambdaN parameter for L2 regularization. (default=1e-8) lambdaL LambdaL parameter for L2 regularization. (default=1e-8) regularization_weighting: int or "CV" L1 regualrization weighting. Default "CV" (use cross-validation). Can specify a static value to use in all voxels. l1_positive_constraint: bool Use positivity constraint. l1_maxiter Maximum number of iterations for L1 optization. (Default=1000) l1_alpha Alpha parameter for L1 optimization. (default=1.0) pos_grid: int Grid points for estimating EAP(default=11) pos_radius Radius for EAP estimation (default=20e-03) """ inputnode = pe.Node(niu.IdentityInterface(fields=input_fields), name="inputnode") outputnode = pe.Node(niu.IdentityInterface(fields=[ 'shore_coeffs_image', 'rtop_image', 'alpha_image', 'r2_image', 'cnr_image', 'regularization_image', 'fibgz', 'fod_sh_mif', 'dwi_file', 'bval_file', 'bvec_file', 'b_file' ]), name="outputnode") workflow = pe.Workflow(name=name) resample_mask = pe.Node(afni.Resample(outputtype='NIFTI_GZ', resample_mode="NN"), name='resample_mask') recon_shore = pe.Node(BrainSuiteShoreReconstruction(**params), name="recon_shore") doing_extrapolation = params.get("extrapolate_scheme") in ("HCP", "ABCD") workflow.connect([ (inputnode, recon_shore, [('dwi_file', 'dwi_file'), ('bval_file', 'bval_file'), ('bvec_file', 'bvec_file')]), (inputnode, resample_mask, [('t1_brain_mask', 'in_file'), ('dwi_file', 'master')]), (resample_mask, recon_shore, [('out_file', 'mask_file')]), (recon_shore, outputnode, [('shore_coeffs_image', 'shore_coeffs_image'), ('rtop_image', 'rtop_image'), ('alpha_image', 'alpha_image'), ('r2_image', 'r2_image'), ('cnr_image', 'cnr_image'), ('regularization_image', 'regularization_image'), ('fibgz', 'fibgz'), ('fod_sh_mif', 'fod_sh_mif'), ('extrapolated_dwi', 'dwi_file'), ('extrapolated_bvals', 'bval_file'), ('extrapolated_bvecs', 'bvec_file'), ('extrapolated_b', 'b_file')]) ]) if output_suffix: external_format_datasinks(output_suffix, params, workflow) ds_rtop = pe.Node(ReconDerivativesDataSink(extension='.nii.gz', desc="rtop", suffix=output_suffix, compress=True), name='ds_bsshore_rtop', run_without_submitting=True) workflow.connect(outputnode, 'rtop_image', ds_rtop, 'in_file') ds_coeff = pe.Node(ReconDerivativesDataSink(extension='.nii.gz', desc="SHOREcoeff", suffix=output_suffix, compress=True), name='ds_bsshore_coeff', run_without_submitting=True) workflow.connect(outputnode, 'shore_coeffs_image', ds_coeff, 'in_file') ds_alpha = pe.Node(ReconDerivativesDataSink(extension='.nii.gz', desc="L1alpha", suffix=output_suffix, compress=True), name='ds_bsshore_alpha', run_without_submitting=True) workflow.connect(outputnode, 'alpha_image', ds_alpha, 'in_file') ds_r2 = pe.Node(ReconDerivativesDataSink(extension='.nii.gz', desc="r2", suffix=output_suffix, compress=True), name='ds_bsshore_r2', run_without_submitting=True) workflow.connect(outputnode, 'r2_image', ds_r2, 'in_file') ds_cnr = pe.Node(ReconDerivativesDataSink(extension='.nii.gz', desc="CNR", suffix=output_suffix, compress=True), name='ds_bsshore_cnr', run_without_submitting=True) workflow.connect(outputnode, 'cnr_image', ds_cnr, 'in_file') ds_regl = pe.Node(ReconDerivativesDataSink(extension='.nii.gz', desc="regularization", suffix=output_suffix, compress=True), name='ds_bsshore_regl', run_without_submitting=True) workflow.connect(outputnode, 'regularization_image', ds_regl, 'in_file') if doing_extrapolation: ds_extrap_dwi = pe.Node(ReconDerivativesDataSink( extension='.nii.gz', desc="extrapolated", suffix=output_suffix, compress=True), name='ds_extrap_dwi', run_without_submitting=True) workflow.connect(outputnode, 'dwi_file', ds_extrap_dwi, 'in_file') ds_extrap_bval = pe.Node(ReconDerivativesDataSink( extension='.bval', desc="extrapolated", suffix=output_suffix), name='ds_extrap_bval', run_without_submitting=True) workflow.connect(outputnode, 'bval_file', ds_extrap_bval, 'in_file') ds_extrap_bvec = pe.Node(ReconDerivativesDataSink( extension='.bvec', desc="extrapolated", suffix=output_suffix), name='ds_extrap_bvec', run_without_submitting=True) workflow.connect(outputnode, 'bvec_file', ds_extrap_bvec, 'in_file') ds_extrap_b = pe.Node(ReconDerivativesDataSink( extension='.b', desc="extrapolated", suffix=output_suffix), name='ds_extrap_b', run_without_submitting=True) workflow.connect(outputnode, 'b_file', ds_extrap_b, 'in_file') return workflow
def init_dipy_mapmri_recon_wf(name="dipy_mapmri_recon", output_suffix="", params={}): """Reconstruct EAPs, ODFs, using 3dSHORE (brainsuite-style basis set). Inputs *qsiprep outputs* Outputs shore_coeffs 3dSHORE coefficients rtop Voxelwise Return-to-origin probability. rtap Voxelwise Return-to-axis probability. rtpp Voxelwise Return-to-plane probability. msd Voxelwise MSD qiv q-space inverse variance lapnorm Voxelwise norm of the Laplacian Params write_fibgz: bool True writes out a DSI Studio fib file write_mif: bool True writes out a MRTrix mif file with sh coefficients radial_order: int An even integer that represent the order of the basis laplacian_regularization: bool Regularize using the Laplacian of the MAP-MRI basis. laplacian_weighting: str or scalar The string 'GCV' makes it use generalized cross-validation to find the regularization weight. A scalar sets the regularization weight to that value and an array will make it selected the optimal weight from the values in the array. positivity_constraint: bool Constrain the propagator to be positive. pos_grid: int Grid points for estimating EAP(default=15) pos_radius Radius for EAP estimation (default=20e-03) or "adaptive" anisotropic_scaling : bool, If True, uses the standard anisotropic MAP-MRI basis. If False, uses the isotropic MAP-MRI basis (equal to 3D-SHORE). eigenvalue_threshold : float, Sets the minimum of the tensor eigenvalues in order to avoid stability problem. bval_threshold : float, Sets the b-value threshold to be used in the scale factor estimation. In order for the estimated non-Gaussianity to have meaning this value should set to a lower value (b<2000 s/mm^2) such that the scale factors are estimated on signal points that reasonably represent the spins at Gaussian diffusion. dti_scale_estimation : bool, Whether or not DTI fitting is used to estimate the isotropic scale factor for isotropic MAP-MRI. When set to False the algorithm presets the isotropic tissue diffusivity to static_diffusivity. This vastly increases fitting speed but at the cost of slightly reduced fitting quality. Can still be used in combination with regularization and constraints. static_diffusivity : float, the tissue diffusivity that is used when dti_scale_estimation is set to False. The default is that of typical white matter D=0.7e-3 _[5]. cvxpy_solver : str, optional cvxpy solver name. Optionally optimize the positivity constraint with a particular cvxpy solver. See http://www.cvxpy.org/ for details. Default: None (cvxpy chooses its own solver) """ inputnode = pe.Node(niu.IdentityInterface(fields=input_fields), name="inputnode") outputnode = pe.Node(niu.IdentityInterface(fields=[ 'mapmri_coeffs', 'rtop', 'rtap', 'rtpp', 'fibgz', 'fod_sh_mif', 'parng', 'perng', 'ng', 'qiv', 'lapnorm', 'msd' ]), name="outputnode") workflow = pe.Workflow(name=name) recon_map = pe.Node(MAPMRIReconstruction(**params), name="recon_map") resample_mask = pe.Node(afni.Resample(outputtype='NIFTI_GZ', resample_mode="NN"), name='resample_mask') workflow.connect([(inputnode, recon_map, [('dwi_file', 'dwi_file'), ('bval_file', 'bval_file'), ('bvec_file', 'bvec_file')]), (inputnode, resample_mask, [('t1_brain_mask', 'in_file'), ('dwi_file', 'master')]), (resample_mask, recon_map, [('out_file', 'mask_file')]), (recon_map, outputnode, [('mapmri_coeffs', 'mapmri_coeffs'), ('rtop', 'rtop'), ('rtap', 'rtap'), ('rtpp', 'rtpp'), ('parng', 'parng'), ('perng', 'perng'), ('msd', 'msd'), ('ng', 'ng'), ('qiv', 'qiv'), ('lapnorm', 'lapnorm'), ('fibgz', 'fibgz'), ('fod_sh_mif', 'fod_sh_mif')])]) if output_suffix: external_format_datasinks(output_suffix, params, workflow) connections = [] for scalar_name in ['rtop', 'rtap', 'rtpp', 'qiv', 'msd', 'lapnorm']: connections += [ (outputnode, pe.Node(ReconDerivativesDataSink(desc=scalar_name, suffix=output_suffix), name='ds_%s_%s' % (name, scalar_name)), [(scalar_name, 'in_file')]) ] workflow.connect(connections) return workflow
def init_dsi_studio_recon_wf(omp_nthreads, has_transform, name="dsi_studio_recon", output_suffix="", params={}): """Reconstructs diffusion data using DSI Studio. This workflow creates a ``.src.gz`` file from the input dwi, bvals and bvecs, then reconstructs ODFs using GQI. Inputs *Default qsiprep inputs* Outputs fibgz A DSI Studio fib file containing GQI ODFs, peaks and scalar values. Params ratio_of_mean_diffusion_distance: float Default 1.25. Distance to sample EAP at. """ inputnode = pe.Node(niu.IdentityInterface(fields=input_fields + ['odf_rois']), name="inputnode") outputnode = pe.Node(niu.IdentityInterface(fields=['fibgz']), name="outputnode") workflow = Workflow(name=name) desc = """DSI Studio Reconstruction : """ create_src = pe.Node(DSIStudioCreateSrc(), name="create_src") romdd = params.get("ratio_of_mean_diffusion_distance", 1.25) gqi_recon = pe.Node( DSIStudioGQIReconstruction(ratio_of_mean_diffusion_distance=romdd), name="gqi_recon") desc += """\ Diffusion orientation distribution functions (ODFs) were reconstructed using generalized q-sampling imaging (GQI, @yeh2010gqi) with a ratio of mean diffusion distance of %02f.""" % romdd # Resample anat mask resample_mask = pe.Node(afni.Resample(outputtype='NIFTI_GZ', resample_mode="NN"), name='resample_mask') # Make a visual report of the model plot_peaks = pe.Node(ReconPeaksReport(subtract_iso=True), name='plot_peaks') ds_report_peaks = pe.Node(ReconDerivativesDataSink(extension='.png', desc="GQIODF", suffix='peaks'), name='ds_report_peaks', run_without_submitting=True) # Plot targeted regions if has_transform: ds_report_odfs = pe.Node(ReconDerivativesDataSink(extension='.png', desc="GQIODF", suffix='odfs'), name='ds_report_odfs', run_without_submitting=True) workflow.connect(plot_peaks, 'odf_report', ds_report_odfs, 'in_file') workflow.connect([ (inputnode, create_src, [('dwi_file', 'input_nifti_file'), ('bval_file', 'input_bvals_file'), ('bvec_file', 'input_bvecs_file')]), (inputnode, resample_mask, [('t1_brain_mask', 'in_file'), ('dwi_file', 'master')]), (create_src, gqi_recon, [('output_src', 'input_src_file')]), (resample_mask, gqi_recon, [('out_file', 'mask')]), (gqi_recon, outputnode, [('output_fib', 'fibgz')]), (gqi_recon, plot_peaks, [('output_fib', 'fib_file')]), (inputnode, plot_peaks, [('dwi_ref', 'background_image'), ('odf_rois', 'odf_rois')]), (resample_mask, plot_peaks, [('out_file', 'mask_file')]), (plot_peaks, ds_report_peaks, [('out_report', 'in_file')]) ]) if output_suffix: # Save the output in the outputs directory ds_gqi_fibgz = pe.Node(ReconDerivativesDataSink(extension='.fib.gz', suffix=output_suffix, compress=True), name='ds_gqi_fibgz', run_without_submitting=True) workflow.connect(gqi_recon, 'output_fib', ds_gqi_fibgz, 'in_file') workflow.__desc__ = desc return workflow
def init_dwi_recon_workflow(dwi_file, workflow_spec, output_dir, reportlets_dir, omp_nthreads, name="recon_wf"): atlas_names = workflow_spec.get('atlases', []) space = workflow_spec['space'] workflow = Workflow(name=_get_wf_name(dwi_file)) inputnode = pe.Node(niu.IdentityInterface(fields=input_fields), name='inputnode') preprocessed_data = pe.Node(QsiReconIngress(), name="preprocessed_data") # For doctests if not workflow_spec['name'] == 'fake': inputnode.inputs.dwi_file = dwi_file preprocessed_data.inputs.dwi_file = dwi_file # Connect the collected diffusion data (gradients, etc) to the inputnode workflow.connect([(preprocessed_data, inputnode, [(trait, trait) for trait in qsiprep_output_names])]) # Resample all atlases to dwi_file's resolution get_atlases = pe.Node(GetConnectivityAtlases(atlas_names=atlas_names, space=space), name='get_atlases', run_without_submitting=True) # Save the atlases if len(atlas_names) > 0: if space == "T1w": workflow.connect([(inputnode, get_atlases, [ ('t1_2_mni_reverse_transform', 'forward_transform') ])]) for atlas in workflow_spec['atlases']: workflow.connect([(get_atlases, pe.Node(ReconDerivativesDataSink(space=space, desc=atlas, suffix="atlas", compress=True), name='ds_atlases_' + atlas, run_without_submitting=True), [(('atlas_configs', _get_resampled, atlas), 'in_file')])]) workflow.connect(inputnode, "dwi_file", get_atlases, "reference_image") # Read nodes from workflow spec, make sure we can implement them nodes_to_add = [] for node_spec in workflow_spec['nodes']: if not node_spec['name']: raise Exception("Node has no name [{}]".format(node_spec)) new_node = workflow_from_spec(node_spec) if new_node is None: raise Exception("Unable to create a node for %s" % node_spec) nodes_to_add.append(new_node) workflow.add_nodes(nodes_to_add) _check_repeats(workflow.list_node_names()) # Now that all nodes are in the workflow, connect them for node_spec in workflow_spec['nodes']: # get the nipype node object node_name = node_spec['name'] node = workflow.get_node(node_name) if node_spec.get('input', 'qsiprep') == 'qsiprep': # directly connect all the qsiprep outputs to every node for from_conn, to_conn in default_connections: workflow.connect(inputnode, from_conn, node, 'inputnode.' + to_conn) _check_repeats(workflow.list_node_names()) # connect the outputs from the upstream node to this node else: upstream_node = workflow.get_node(node_spec['input']) upstream_outputnode_name = node_spec['input'] + '.outputnode' upstream_outputnode = workflow.get_node(upstream_outputnode_name) upstream_outputs = set(upstream_outputnode.outputs.get().keys()) downstream_inputnode_name = node_name + ".inputnode" downstream_inputnode = workflow.get_node(downstream_inputnode_name) downstream_inputs = set(downstream_inputnode.outputs.get().keys()) connect_from_upstream = upstream_outputs.intersection( downstream_inputs) connect_from_qsiprep = default_input_set - connect_from_upstream LOGGER.info("connecting %s from %s to %s", connect_from_qsiprep, inputnode, node) # workflow.connect([(inputnode, node, _as_connections(connect_from_qsiprep))]) for qp_connection in connect_from_qsiprep: workflow.connect(inputnode, qp_connection, node, 'inputnode.' + qp_connection) _check_repeats(workflow.list_node_names()) LOGGER.info("connecting %s from %s to %s", connect_from_upstream, upstream_outputnode_name, downstream_inputnode_name) # workflow.connect([(upstream_node, node, _as_connections(connect_from_upstream))]) for upstream_connection in connect_from_upstream: workflow.connect(upstream_node, "outputnode." + upstream_connection, node, 'inputnode.' + upstream_connection) _check_repeats(workflow.list_node_names()) # If it's a connectivity calculation, send it the atlas configs if node_spec['action'] == 'connectivity': workflow.connect([(get_atlases, node, [ ('atlas_configs', 'inputnode.atlas_configs') ])]) _check_repeats(workflow.list_node_names()) # Fill-in datasinks and reportlet datasinks seen so far for node in workflow.list_node_names(): node_suffix = node.split('.')[-1] if node_suffix.startswith('ds_'): workflow.get_node(node).inputs.source_file = dwi_file workflow.get_node(node).inputs.space = space if "report" in node_suffix: workflow.get_node(node).inputs.base_directory = reportlets_dir else: workflow.get_node(node).inputs.base_directory = output_dir return workflow
def init_dipy_brainsuite_shore_recon_wf(name="dipy_3dshore_recon", output_suffix="", params={}): """Reconstruct EAPs, ODFs, using 3dSHORE (brainsuite-style basis set). Inputs *qsiprep outputs* Outputs shore_coeffs 3dSHORE coefficients rtop Voxelwise Return-to-origin probability. rtap Voxelwise Return-to-axis probability. rtpp Voxelwise Return-to-plane probability. Params write_fibgz: bool True writes out a DSI Studio fib file write_mif: bool True writes out a MRTrix mif file with sh coefficients radial_order: int Radial order for spherical harmonics (even) zeta: float Zeta parameter for basis set. tau:float Diffusion parameter (default= 4 * np.pi**2) regularization "L2" or "L1". Default is "L2" lambdaN LambdaN parameter for L2 regularization. (default=1e-8) lambdaL LambdaL parameter for L2 regularization. (default=1e-8) regularization_weighting: int or "CV" L1 regualrization weighting. Default "CV" (use cross-validation). Can specify a static value to use in all voxels. l1_positive_constraint: bool Use positivity constraint. l1_maxiter Maximum number of iterations for L1 optization. (Default=1000) l1_alpha Alpha parameter for L1 optimization. (default=1.0) pos_grid: int Grid points for estimating EAP(default=11) pos_radius Radius for EAP estimation (default=20e-03) """ inputnode = pe.Node(niu.IdentityInterface(fields=input_fields), name="inputnode") outputnode = pe.Node(niu.IdentityInterface( fields=['shore_coeffs', 'rtop', 'rtap', 'rtpp', 'fibgz', 'fod_sh_mif' ]), name="outputnode") workflow = pe.Workflow(name=name) recon_shore = pe.Node(BrainSuiteShoreReconstruction(**params), name="recon_shore") workflow.connect([(inputnode, recon_shore, [('dwi_file', 'dwi_file'), ('bval_file', 'bval_file'), ('bvec_file', 'bvec_file'), ('mask_file', 'mask_file')]), (recon_shore, outputnode, [('shore_coeffs', 'shore_coeffs'), ('rtop', 'rtop'), ('fibgz', 'fibgz'), ('fod_sh_mif', 'fod_sh_mif')])]) if output_suffix: external_format_datasinks(output_suffix, params, workflow) ds_rtop = pe.Node(ReconDerivativesDataSink(extension='.nii.gz', desc="rtop", suffix=output_suffix, compress=True), name='ds_bsshore_rtop', run_without_submitting=True) workflow.connect(outputnode, 'rtop', ds_rtop, 'in_file') ds_coeff = pe.Node(ReconDerivativesDataSink(extension='.nii.gz', desc="SHOREcoeff", suffix=output_suffix, compress=True), name='ds_bsshore_coeff', run_without_submitting=True) workflow.connect(outputnode, 'shore_coeffs', ds_coeff, 'in_file') return workflow
def init_dwi_recon_workflow(dwi_files, workflow_spec, output_dir, reportlets_dir, has_transform, omp_nthreads, name="recon_wf"): """Convert a workflow spec into a nipype workflow. """ atlas_names = workflow_spec.get('atlases', []) space = workflow_spec['space'] workflow = Workflow(name=name) scans_iter = pe.Node(niu.IdentityInterface(fields=['dwi_file']), name='scans_iter') scans_iter.iterables = ("dwi_file", dwi_files) inputnode = pe.Node(niu.IdentityInterface(fields=input_fields + ['dwi_file']), name='inputnode') qsiprep_preprocessed_dwi_data = pe.Node( QsiReconIngress(), name="qsiprep_preprocessed_dwi_data") # For doctests if not workflow_spec['name'] == 'fake': scans_iter.inputs.dwi_file = dwi_files # Connect the collected diffusion data (gradients, etc) to the inputnode workflow.connect([(scans_iter, qsiprep_preprocessed_dwi_data, ([('dwi_file', 'dwi_file')])), (qsiprep_preprocessed_dwi_data, inputnode, [(trait, trait) for trait in qsiprep_output_names])]) # Resample all atlases to dwi_file's resolution get_atlases = pe.Node(GetConnectivityAtlases(atlas_names=atlas_names, space=space), name='get_atlases', run_without_submitting=True) # Resample ROI targets to DWI resolution for ODF plotting crossing_rois_file = pkgr('qsiprep', 'data/crossing_rois.nii.gz') odf_rois = pe.Node(ants.ApplyTransforms(interpolation="MultiLabel", dimension=3), name="odf_rois") odf_rois.inputs.input_image = crossing_rois_file if has_transform and space == "T1w": workflow.connect(inputnode, 't1_2_mni_reverse_transform', odf_rois, 'transforms') elif space == 'template': odf_rois.inputs.transforms = ['identity'] else: LOGGER.warning("Unable to transform ODF ROIs to dwi data. " "No ODF reports will be created.") odf_rois = pe.Node(niu.IdentityInterface(fields=['output_image']), name='odf_rois') workflow.connect(scans_iter, 'dwi_file', odf_rois, 'reference_image') # Save the atlases if len(atlas_names) > 0: if space == "T1w": if not has_transform: LOGGER.critical( "No reverse transform found, unable to move atlases" " into DWI space") workflow.connect([(inputnode, get_atlases, [ ('t1_2_mni_reverse_transform', 'forward_transform') ])]) for atlas in workflow_spec['atlases']: workflow.connect([ (get_atlases, pe.Node(ReconDerivativesDataSink(space=space, desc=atlas, suffix="atlas", compress=True), name='ds_atlases_' + atlas, run_without_submitting=True), [(('atlas_configs', _get_resampled, atlas, 'dwi_resolution_file'), 'in_file')]), (get_atlases, pe.Node(ReconDerivativesDataSink(space=space, desc=atlas, suffix="atlas", extension=".mif.gz", compress=True), name='ds_atlas_mifs_' + atlas, run_without_submitting=True), [(('atlas_configs', _get_resampled, atlas, 'dwi_resolution_mif'), 'in_file')]), (get_atlases, pe.Node(ReconDerivativesDataSink(space=space, desc=atlas, extension=".txt", suffix="mrtrixLUT"), name='ds_atlas_mrtrix_lut_' + atlas, run_without_submitting=True), [(('atlas_configs', _get_resampled, atlas, 'mrtrix_lut'), 'in_file')]), (get_atlases, pe.Node(ReconDerivativesDataSink(space=space, desc=atlas, extension=".txt", suffix="origLUT"), name='ds_atlas_orig_lut_' + atlas, run_without_submitting=True), [(('atlas_configs', _get_resampled, atlas, 'orig_lut'), 'in_file')]), ]) workflow.connect(inputnode, "dwi_file", get_atlases, "reference_image") # Read nodes from workflow spec, make sure we can implement them nodes_to_add = [] for node_spec in workflow_spec['nodes']: if not node_spec['name']: raise Exception("Node has no name [{}]".format(node_spec)) new_node = workflow_from_spec(omp_nthreads, has_transform or space == 'template', node_spec) if new_node is None: raise Exception("Unable to create a node for %s" % node_spec) nodes_to_add.append(new_node) workflow.add_nodes(nodes_to_add) _check_repeats(workflow.list_node_names()) # Now that all nodes are in the workflow, connect them for node_spec in workflow_spec['nodes']: # get the nipype node object node_name = node_spec['name'] node = workflow.get_node(node_name) if node_spec.get('input', 'qsiprep') == 'qsiprep': # directly connect all the qsiprep outputs to every node workflow.connect([(inputnode, node, _as_connections(input_fields, dest_prefix='inputnode.'))]) # for from_conn, to_conn in default_connections: # workflow.connect(inputnode, from_conn, node, 'inputnode.' + to_conn) # _check_repeats(workflow.list_node_names()) # connect the outputs from the upstream node to this node else: upstream_node = workflow.get_node(node_spec['input']) upstream_outputnode_name = node_spec['input'] + '.outputnode' upstream_outputnode = workflow.get_node(upstream_outputnode_name) upstream_outputs = set(upstream_outputnode.outputs.get().keys()) downstream_inputnode_name = node_name + ".inputnode" downstream_inputnode = workflow.get_node(downstream_inputnode_name) downstream_inputs = set(downstream_inputnode.outputs.get().keys()) connect_from_upstream = upstream_outputs.intersection( downstream_inputs) connect_from_qsiprep = default_input_set - connect_from_upstream # LOGGER.info("connecting %s from %s to %s", connect_from_qsiprep, # inputnode, node) workflow.connect([(inputnode, node, _as_connections(connect_from_qsiprep, dest_prefix='inputnode.'))]) # for qp_connection in connect_from_qsiprep: # workflow.connect(inputnode, qp_connection, node, 'inputnode.' + qp_connection) _check_repeats(workflow.list_node_names()) # LOGGER.info("connecting %s from %s to %s", connect_from_upstream, # upstream_outputnode_name, downstream_inputnode_name) workflow.connect([(upstream_node, node, _as_connections(connect_from_upstream, src_prefix='outputnode.', dest_prefix='inputnode.'))]) # for upstream_connection in connect_from_upstream: # workflow.connect(upstream_node, "outputnode." + upstream_connection, # node, 'inputnode.' + upstream_connection) _check_repeats(workflow.list_node_names()) # If it's a connectivity calculation, send it the atlas configs if node_spec['action'] == 'connectivity': workflow.connect([(get_atlases, node, [ ('atlas_configs', 'inputnode.atlas_configs') ])]) _check_repeats(workflow.list_node_names()) # Send the ODF rois to reconstruction nodes if node_spec['action'] == 'csd' or 'reconstruction' in node_spec[ 'action']: workflow.connect([(odf_rois, node, [('output_image', 'inputnode.odf_rois')])]) _check_repeats(workflow.list_node_names()) # Fill-in datasinks and reportlet datasinks seen so far for node in workflow.list_node_names(): node_suffix = node.split('.')[-1] if node_suffix.startswith('ds_'): workflow.connect(scans_iter, 'dwi_file', workflow.get_node(node), 'source_file') workflow.get_node(node).inputs.space = space if "report" in node_suffix: workflow.get_node(node).inputs.base_directory = reportlets_dir else: workflow.get_node(node).inputs.base_directory = output_dir return workflow
def init_mrtrix_csd_recon_wf(name="mrtrix_recon", output_suffix="", params={}): """Create FOD images for WM, GM and CSF. This workflow uses mrtrix tools to run csd on multishell data. Inputs *Default qsiprep inputs* Outputs wm_txt SH fiber response function for white matter wm_fod FOD SH coefficients for white matter gm_txt SH fiber response function for gray matter gm_fod FOD SH coefficients for gray matter csf_txt SH fiber response function for CSF csf_fod FOD SH coefficients for CSF fod_sh_mif The same file as wm_fod. Params response: dict parameters for estimating the response function. A minimal example would be ``{"algorighm": "dhollander"}`` fod: dict parameters for dwi2fod. A minimal example would be ``{"algorithm": "msmt_csd", "max_sh": [6, 8, 8]}``. """ inputnode = pe.Node(niu.IdentityInterface(fields=input_fields), name="inputnode") outputnode = pe.Node(niu.IdentityInterface(fields=[ 'fod_sh_mif', 'wm_odf', 'wm_txt', 'gm_odf', 'gm_txt', 'csf_odf', 'csf_txt' ]), name="outputnode") # Resample anat mask resample_mask = pe.Node(afni.Resample(outputtype='NIFTI_GZ', resample_mode="NN"), name='resample_mask') # Response estimation response = params.get('response', {}) response_algorithm = response.get('algorithm', 'dhollander') response['algorithm'] = response_algorithm # FOD estimation fod = params.get('fod', {}) fod_algorithm = fod.get('algorithm', 'csd') fod['algorithm'] = fod_algorithm workflow = pe.Workflow(name=name) create_mif = pe.Node(MRTrixIngress(), name='create_mif') estimate_response = pe.Node(Dwi2Response(**response), 'estimate_response') estimate_fod = pe.Node(EstimateFOD(**fod), 'estimate_fod') use_sift2 = params.get("use_sift2", False) if response_algorithm == 'msmt_5tt': workflow.connect([(inputnode, estimate_response, [('mrtrix_5tt', 'mtt_file')])]) # Connect all response functions if it's multi-response if fod_algorithm == 'msmt_csd': workflow.connect([(estimate_response, estimate_fod, [('wm_file', 'wm_txt'), ('gm_file', 'gm_txt'), ('csf_file', 'csf_txt')])]) else: workflow.connect([(estimate_response, estimate_fod, [('wm_file', 'wm_txt')])]) workflow.connect([ (inputnode, resample_mask, [('t1_brain_mask', 'in_file'), ('dwi_file', 'master')]), (inputnode, create_mif, [('dwi_file', 'dwi_file'), ('bval_file', 'bval_file'), ('bvec_file', 'bvec_file'), ('b_file', 'b_file')]), (create_mif, estimate_response, [('mif_file', 'in_file')]), (resample_mask, estimate_response, [('out_file', 'in_mask')]), (estimate_response, outputnode, [('wm_file', 'wm_txt'), ('gm_file', 'gm_txt'), ('csf_file', 'csf_txt')]), (create_mif, estimate_fod, [('mif_file', 'in_file')]), (resample_mask, estimate_fod, [('out_file', 'mask_file')]), (estimate_fod, outputnode, [('wm_odf', 'fod_sh_mif'), ('wm_odf', 'wm_odf'), ('gm_odf', 'gm_odf'), ('csf_odf', 'csf_odf')]), ]) if output_suffix: ds_wm_odf = pe.Node(ReconDerivativesDataSink(extension='.mif.gz', desc="wmFOD", suffix=output_suffix, compress=True), name='ds_wm_odf', run_without_submitting=True) workflow.connect(outputnode, 'wm_odf', ds_wm_odf, 'in_file') ds_wm_txt = pe.Node(ReconDerivativesDataSink(extension='.txt', desc="wmFOD", suffix=output_suffix), name='ds_wm_txt', run_without_submitting=True) workflow.connect(outputnode, 'wm_txt', ds_wm_txt, 'in_file') if fod_algorithm == 'msmt_csd': ds_gm_odf = pe.Node(ReconDerivativesDataSink(extension='.mif.gz', desc="gmFOD", suffix=output_suffix, compress=True), name='ds_gm_odf', run_without_submitting=True) workflow.connect(outputnode, 'gm_odf', ds_gm_odf, 'in_file') ds_gm_txt = pe.Node(ReconDerivativesDataSink(extension='.txt', desc="gmFOD", suffix=output_suffix), name='ds_gm_txt', run_without_submitting=True) workflow.connect(outputnode, 'gm_txt', ds_gm_txt, 'in_file') ds_csf_odf = pe.Node(ReconDerivativesDataSink(extension='.mif.gz', desc="csfFOD", suffix=output_suffix, compress=True), name='ds_csf_odf', run_without_submitting=True) workflow.connect(outputnode, 'csf_odf', ds_csf_odf, 'in_file') ds_csf_txt = pe.Node(ReconDerivativesDataSink( extension='.txt', desc="csfFOD", suffix=output_suffix), name='ds_csf_txt', run_without_submitting=True) workflow.connect(outputnode, 'csf_txt', ds_csf_txt, 'in_file') return workflow
def init_mrtrix_tractography_wf(name="mrtrix_tracking", output_suffix="", params={}): """Run tractography This workflow uses mrtrix tools to run csd on multishell data. Inputs fod_sh_mif mif file containing spherical harmonics for tractography Outputs global_wm_fod FOD SH image enhanced by global tractography global_iso_fod FOD SH coefficients for other tissue compartments. l1_penalty the residual data energy image, including the L1-penalty imposed by the particle potential """ inputnode = pe.Node(niu.IdentityInterface(fields=input_fields + ['fod_sh_mif']), name="inputnode") outputnode = pe.Node( niu.IdentityInterface(fields=['tck_file', 'sift_weights']), name="outputnode") workflow = pe.Workflow(name=name) # Resample anat mask resample_mask = pe.Node(afni.Resample(outputtype='NIFTI_GZ', resample_mode="NN"), name='resample_mask') tracking_params = params.get("tckgen", {}) use_sift2 = params.get("use_sift2", True) use_5tt = params.get("use_5tt", False) sift_params = params.get("sift2", {}) tracking = pe.Node(TckGen(**tracking_params), name='tractography') workflow.connect([(inputnode, resample_mask, [('t1_brain_mask', 'in_file'), ('dwi_file', 'master')]), (inputnode, tracking, [('fod_sh_mif', 'in_file'), ('fod_sh_mif', 'seed_dynamic')]), (tracking, outputnode, [("out_file", "tck_file")])]) if use_5tt: workflow.connect(inputnode, 'mrtrix_5tt', tracking, 'act_file') if use_sift2: tck_sift2 = pe.Node(SIFT2(**sift_params), name="tck_sift2") workflow.connect([(inputnode, tck_sift2, [('fod_sh_mif', 'in_fod')]), (tracking, tck_sift2, [('out_file', 'in_tracks')]), (tck_sift2, outputnode, [('out_mu', 'mu'), ('out_weights', 'sift_weights')])]) if output_suffix: ds_sift_weights = pe.Node(ReconDerivativesDataSink( extension='.csv', desc="siftweights", suffix=output_suffix), name='ds_sift_weights', run_without_submitting=True) workflow.connect(outputnode, 'sift_weights', ds_sift_weights, 'in_file') if use_5tt: workflow.connect(inputnode, "mrtrix_5tt", tck_sift2, "act_file") if output_suffix: ds_tck_file = pe.Node(ReconDerivativesDataSink(extension='.tck', desc="tracks", suffix=output_suffix), name='ds_tck_file', run_without_submitting=True) workflow.connect(outputnode, 'tck_file', ds_tck_file, 'in_file') return workflow
def init_global_tractography_wf(name="mrtrix_recon", output_suffix="", params={}): """Run multi-shell, multi-tissue global tractography This workflow uses mrtrix tools to run csd on multishell data. Inputs dwi_file Preprocessed DWI series wm_txt SH fiber response function for white matter gm_txt SH fiber response function for gray matter csf_txt SH fiber response function for CSF Outputs global_wm_fod FOD SH image enhanced by global tractography global_iso_fod FOD SH coefficients for other tissue compartments. l1_penalty the residual data energy image, including the L1-penalty imposed by the particle potential """ inputnode = pe.Node(niu.IdentityInterface(fields=input_fields + ['gm_txt', 'wm_txt', 'csf_txt']), name="inputnode") outputnode = pe.Node(niu.IdentityInterface( fields=['fod_sh_mif', 'wm_odf', 'iso_fraction', 'tck_file']), name="outputnode") workflow = pe.Workflow(name=name) create_mif = pe.Node(MRTrixIngress(), name='create_mif') # Resample anat mask resample_mask = pe.Node(afni.Resample(outputtype='NIFTI_GZ', resample_mode="NN"), name='resample_mask') tck_global = pe.Node(GlobalTractography(**params), name='tck_global') workflow.connect([(inputnode, resample_mask, [('t1_brain_mask', 'in_file'), ('dwi_file', 'master')]), (inputnode, create_mif, [('dwi_file', 'dwi_file'), ('bval_file', 'bval_file'), ('bvec_file', 'bvec_file'), ('b_file', 'b_file')]), (create_mif, tck_global, [('mif_file', 'dwi_file')]), (resample_mask, tck_global, [('out_file', 'mask')]), (inputnode, tck_global, [("wm_txt", "wm_txt"), ("gm_txt", "gm_txt"), ("csf_txt", "csf_txt")]), (tck_global, outputnode, [("wm_odf", "wm_odf"), ("isotropic_fraction", "isotropic_fraction"), ("tck_file", "tck_file"), ("residual_energy", "residual_energy"), ("wm_odf", "fod_sh_mif")])]) if output_suffix: ds_globalwm_odf = pe.Node(ReconDerivativesDataSink( extension='.mif.gz', desc="globalwmFOD", suffix=output_suffix, compress=True), name='ds_globalwm_odf', run_without_submitting=True) workflow.connect(outputnode, 'wm_odf', ds_globalwm_odf, 'in_file') ds_isotropic_fraction = pe.Node(ReconDerivativesDataSink( extension='.mif.gz', desc="ISOfraction", suffix=output_suffix), name='ds_isotropic_fraction', run_without_submitting=True) workflow.connect(outputnode, 'isotropic_fraction', ds_isotropic_fraction, 'in_file') ds_tck_file = pe.Node(ReconDerivativesDataSink(extension='.tck.gz', desc="global", suffix=output_suffix, compress=True), name='ds_tck_file', run_without_submitting=True) workflow.connect(outputnode, 'tck_file', ds_tck_file, 'in_file') ds_residual_energy = pe.Node(ReconDerivativesDataSink( extension='.tck.gz', desc="residualEnergy", suffix=output_suffix, compress=True), name='ds_residual_energy', run_without_submitting=True) workflow.connect(outputnode, 'residual_energy', ds_residual_energy, 'in_file') return workflow