def init_spaces(checkpoint=True): """Initialize the :attr:`~workflow.spaces` setting.""" from niworkflows.utils.spaces import Reference, SpatialReferences spaces = execution.output_spaces or SpatialReferences() if not isinstance(spaces, SpatialReferences): spaces = SpatialReferences([ ref for s in spaces.split(" ") for ref in Reference.from_string(s) ]) if checkpoint and not spaces.is_cached(): spaces.checkpoint() # # Add the default standard space if not already present (required by several sub-workflows) # if "MNI152NLin2009cAsym" not in spaces.get_spaces(nonstandard=False, dim=(3,)): # spaces.add(Reference("MNI152NLin2009cAsym", {})) # Ensure user-defined spatial references for outputs are correctly parsed. # Certain options require normalization to a space not explicitly defined by users. # These spaces will not be included in the final outputs. # if workflow.use_aroma: # # Make sure there's a normalization to FSL for AROMA to use. # spaces.add(Reference("MNI152NLin6Asym", {"res": "2"})) cifti_output = workflow.cifti_output if workflow.cifti_output: # CIFTI grayordinates to corresponding FSL-MNI resolutions. vol_res = "2" if cifti_output == "91k" else "1" spaces.add(Reference("fsaverage", {"den": "164k"})) spaces.add(Reference("MNI152NLin6Asym", {"res": vol_res})) # Make the SpatialReferences object available workflow.spaces = spaces
def init_spaces(checkpoint=True): """Initialize the :attr:`~workflow.spaces` setting.""" from niworkflows.utils.spaces import Reference, SpatialReferences spaces = execution.output_spaces or SpatialReferences() if not isinstance(spaces, SpatialReferences): spaces = SpatialReferences( [ref for s in spaces.split(' ') for ref in Reference.from_string(s)] ) if checkpoint and not spaces.is_cached(): spaces.checkpoint() # Add the default standard space if not already present (required by several sub-workflows) if "Fischer344" not in spaces.get_spaces(nonstandard=False, dim=(3,)): spaces.add( Reference("Fischer344", {}) ) # Make the SpatialReferences object available workflow.spaces = spaces
def init_spaces(checkpoint=True): """Initialize the :attr:`~workflow.spaces` setting.""" from niworkflows.utils.spaces import Reference, SpatialReferences spaces = execution.output_spaces or SpatialReferences() if not isinstance(spaces, SpatialReferences): spaces = SpatialReferences([ ref for s in spaces.split(' ') for ref in Reference.from_string(s) ]) if checkpoint and not spaces.is_cached(): spaces.checkpoint() # Make the SpatialReferences object available workflow.spaces = spaces
def parse_args(args=None, namespace=None): """Parse args and run further checks on the command line.""" import logging from niworkflows.utils.spaces import Reference, SpatialReferences parser = _build_parser() opts = parser.parse_args(args, namespace) config.execution.log_level = int( max(25 - 5 * opts.verbose_count, logging.DEBUG)) config.from_dict(vars(opts)) config.loggers.init() # Initialize --output-spaces if not defined if config.execution.output_spaces is None: config.execution.output_spaces = SpatialReferences( [Reference("MNI152NLin2009cAsym", {"res": "native"})]) # Retrieve logging level build_log = config.loggers.cli if config.execution.fs_license_file is None: raise RuntimeError("""\ ERROR: a valid license file is required for FreeSurfer to run. fMRIPrep looked for an existing \ license file at several paths, in this order: 1) command line argument ``--fs-license-file``; \ 2) ``$FS_LICENSE`` environment variable; and 3) the ``$FREESURFER_HOME/license.txt`` path. Get it \ (for free) by registering at https://surfer.nmr.mgh.harvard.edu/registration.html""" ) os.environ['FS_LICENSE'] = str(config.execution.fs_license_file) # Load base plugin_settings from file if --use-plugin if opts.use_plugin is not None: from yaml import load as loadyml with open(opts.use_plugin) as f: plugin_settings = loadyml(f) _plugin = plugin_settings.get('plugin') if _plugin: config.nipype.plugin = _plugin config.nipype.plugin_args = plugin_settings.get('plugin_args', {}) config.nipype.nprocs = config.nipype.plugin_args.get( 'nprocs', config.nipype.nprocs) # Resource management options # Note that we're making strong assumptions about valid plugin args # This may need to be revisited if people try to use batch plugins if 1 < config.nipype.nprocs < config.nipype.omp_nthreads: build_log.warning( 'Per-process threads (--omp-nthreads=%d) exceed total ' 'threads (--nthreads/--n_cpus=%d)', config.nipype.omp_nthread, config.nipype.nprocs) bids_dir = config.execution.bids_dir output_dir = config.execution.output_dir work_dir = config.execution.work_dir version = config.environment.version if config.execution.fs_subjects_dir is None: config.execution.fs_subjects_dir = output_dir / 'freesurfer' # Wipe out existing work_dir if opts.clean_workdir and work_dir.exists(): from niworkflows.utils.misc import clean_directory build_log.log("Clearing previous fMRIPrep working directory: %s", work_dir) if not clean_directory(work_dir): build_log.warning( "Could not clear all contents of working directory: %s", work_dir) # Ensure input and output folders are not the same if output_dir == bids_dir: parser.error( 'The selected output folder is the same as the input BIDS folder. ' 'Please modify the output path (suggestion: %s).' % bids_dir / 'derivatives' / ('fmriprep-%s' % version.split('+')[0])) if bids_dir in work_dir.parents: parser.error( 'The selected working directory is a subdirectory of the input BIDS folder. ' 'Please modify the output path.') # Validate inputs if not opts.skip_bids_validation: from ..utils.bids import validate_input_dir build_log.info( "Making sure the input data is BIDS compliant (warnings can be ignored in most " "cases).") validate_input_dir(config.environment.exec_env, opts.bids_dir, opts.participant_label) # Setup directories config.execution.log_dir = output_dir / 'fmriprep' / 'logs' # Check and create output and working directories config.execution.log_dir.mkdir(exist_ok=True, parents=True) output_dir.mkdir(exist_ok=True, parents=True) work_dir.mkdir(exist_ok=True, parents=True) # Force initialization of the BIDSLayout config.execution.init() all_subjects = config.execution.layout.get_subjects() if config.execution.participant_label is None: config.execution.participant_label = all_subjects participant_label = set(config.execution.participant_label) missing_subjects = participant_label - set(all_subjects) if missing_subjects: parser.error( "One or more participant labels were not found in the BIDS directory: " "%s." % ", ".join(missing_subjects)) config.execution.participant_label = sorted(participant_label) config.workflow.skull_strip_template = config.workflow.skull_strip_template[ 0]
def parse_args(args=None, namespace=None): """Parse args and run further checks on the command line.""" import logging from niworkflows.utils.spaces import Reference, SpatialReferences parser = _build_parser() opts = parser.parse_args(args, namespace) config.execution.log_level = int( max(25 - 5 * opts.verbose_count, logging.DEBUG)) config.from_dict(vars(opts)) # Initialize --output-spaces if not defined if config.execution.output_spaces is None: config.execution.output_spaces = SpatialReferences( [Reference("MNI152NLin2009cAsym", {"res": "native"})]) # Retrieve logging level build_log = config.loggers.cli # Load base plugin_settings from file if --use-plugin if opts.use_plugin is not None: import yaml with open(opts.use_plugin) as f: plugin_settings = yaml.load(f, Loader=yaml.FullLoader) _plugin = plugin_settings.get("plugin") if _plugin: config.nipype.plugin = _plugin config.nipype.plugin_args = plugin_settings.get("plugin_args", {}) config.nipype.nprocs = opts.nprocs or config.nipype.plugin_args.get( "n_procs", config.nipype.nprocs) # Resource management options # Note that we're making strong assumptions about valid plugin args # This may need to be revisited if people try to use batch plugins if 1 < config.nipype.nprocs < config.nipype.omp_nthreads: build_log.warning( f"Per-process threads (--omp-nthreads={config.nipype.omp_nthreads}) exceed " f"total threads (--nthreads/--n_cpus={config.nipype.nprocs})") # Inform the user about the risk of using brain-extracted images if config.workflow.skull_strip_t1w == "auto": build_log.warning("""\ Option ``--skull-strip-t1w`` was set to 'auto'. A heuristic will be \ applied to determine whether the input T1w image(s) have already been skull-stripped. If that were the case, brain extraction and INU correction will be skipped for those T1w \ inputs. Please, BEWARE OF THE RISKS TO THE CONSISTENCY of results when using varying \ processing workflows across participants. To determine whether a participant has been run \ through the shortcut pipeline (meaning, brain extraction was skipped), please check the \ citation boilerplate. When reporting results with varying pipelines, please make sure you \ mention this particular variant of fMRIPrep listing the participants for which it was \ applied.""") bids_dir = config.execution.bids_dir output_dir = config.execution.output_dir work_dir = config.execution.work_dir version = config.environment.version if config.execution.fs_subjects_dir is None: config.execution.fs_subjects_dir = output_dir / "freesurfer" # Wipe out existing work_dir if opts.clean_workdir and work_dir.exists(): from niworkflows.utils.misc import clean_directory build_log.info( f"Clearing previous fMRIPrep working directory: {work_dir}") if not clean_directory(work_dir): build_log.warning( f"Could not clear all contents of working directory: {work_dir}" ) # Ensure input and output folders are not the same if output_dir == bids_dir: parser.error( "The selected output folder is the same as the input BIDS folder. " "Please modify the output path (suggestion: %s)." % bids_dir / "derivatives" / ("fmriprep-%s" % version.split("+")[0])) if bids_dir in work_dir.parents: parser.error( "The selected working directory is a subdirectory of the input BIDS folder. " "Please modify the output path.") # Validate inputs if not opts.skip_bids_validation: from ..utils.bids import validate_input_dir build_log.info( "Making sure the input data is BIDS compliant (warnings can be ignored in most " "cases).") validate_input_dir(config.environment.exec_env, opts.bids_dir, opts.participant_label) # Setup directories config.execution.log_dir = output_dir / "fmriprep" / "logs" # Check and create output and working directories config.execution.log_dir.mkdir(exist_ok=True, parents=True) output_dir.mkdir(exist_ok=True, parents=True) work_dir.mkdir(exist_ok=True, parents=True) # Force initialization of the BIDSLayout config.execution.init() all_subjects = config.execution.layout.get_subjects() if config.execution.participant_label is None: config.execution.participant_label = all_subjects participant_label = set(config.execution.participant_label) missing_subjects = participant_label - set(all_subjects) if missing_subjects: parser.error( "One or more participant labels were not found in the BIDS directory: " "%s." % ", ".join(missing_subjects)) config.execution.participant_label = sorted(participant_label) config.workflow.skull_strip_template = config.workflow.skull_strip_template[ 0]
Exec, Select, MergeColumns, ApplyTransforms, MakeResultdicts, ResultdictDatasink, Vals, FilterRegressor, ) from ...resource import get as getresource from ...utils import firststr, loadints from ..constants import constants from ..memory import MemoryCalculator spaces = SpatialReferences([Reference("MNI152NLin6Asym", {"res": "2"})]) if not spaces.is_cached(): spaces.checkpoint() def _aroma_column_names(melodic_mix=None, aroma_noise_ics=None): import numpy as np from halfpipe.utils import ncol ncomponents = ncol(melodic_mix) leading_zeros = int(np.ceil(np.log10(ncomponents))) column_names = [] for i in range(1, ncomponents + 1): if i in aroma_noise_ics: column_names.append(f"aroma_noise_{i:0{leading_zeros}d}") else:
def init_single_subject_wf(subject_id): """ Organize the preprocessing pipeline for a single subject. It collects and reports information about the subject, and prepares sub-workflows to perform anatomical and functional preprocessing. Anatomical preprocessing is performed in a single workflow, regardless of the number of sessions. Functional preprocessing is performed using a separate workflow for each individual BOLD series. Workflow Graph .. workflow:: :graph2use: orig :simple_form: yes from fprodents.workflows.tests import mock_config from fprodents.workflows.base import init_single_subject_wf with mock_config(): wf = init_single_subject_wf('01') Parameters ---------- subject_id : :obj:`str` Subject label for this single-subject workflow. Inputs ------ subjects_dir : :obj:`str` FreeSurfer's ``$SUBJECTS_DIR``. """ from niworkflows.engine.workflows import LiterateWorkflow as Workflow from niworkflows.interfaces.bids import BIDSInfo from niworkflows.interfaces.nilearn import NILEARN_VERSION from niworkflows.utils.bids import collect_data from niworkflows.utils.connections import listify from niworkflows.utils.spaces import Reference from niworkflows.workflows.epi.refmap import init_epi_reference_wf from ..patch.interfaces import BIDSDataGrabber from ..patch.utils import extract_entities, fix_multi_source_name from ..patch.workflows.anatomical import init_anat_preproc_wf subject_data = collect_data( config.execution.layout, subject_id, config.execution.task_id, config.execution.echo_idx, bids_filters=config.execution.bids_filters, )[0] anat_only = config.workflow.anat_only # Make sure we always go through these two checks if not anat_only and not subject_data["bold"]: task_id = config.execution.task_id raise RuntimeError( f"No BOLD images found for participant <{subject_id}> and " f"task <{task_id or 'all'}>. All workflows require BOLD images.") workflow = Workflow(name=f"single_subject_{subject_id}_wf") workflow.__desc__ = """ Results included in this manuscript come from preprocessing performed using *fMRIPrep-rodents* {fmriprep_ver} (@fmriprep1; @fmriprep2; RRID:SCR_016216), which is based on *Nipype* {nipype_ver} (@nipype1; @nipype2; RRID:SCR_002502). """.format( fmriprep_ver=config.environment.version, nipype_ver=config.environment.nipype_version, ) workflow.__postdesc__ = """ Many internal operations of *fMRIPrep* use *Nilearn* {nilearn_ver} [@nilearn, RRID:SCR_001362], mostly within the functional processing workflow. For more details of the pipeline, see [the section corresponding to workflows in *fMRIPrep*'s documentation]\ (https://fmriprep-rodents.readthedocs.io/en/latest/workflows.html \ "FMRIPrep's documentation"). ### Copyright Waiver The above boilerplate text was automatically generated by fMRIPrep with the express intention that users should copy and paste this text into their manuscripts *unchanged*. It is released under the [CC0]\ (https://creativecommons.org/publicdomain/zero/1.0/) license. ### References """.format(nilearn_ver=NILEARN_VERSION) spaces = config.workflow.spaces output_dir = str(config.execution.output_dir) inputnode = pe.Node(niu.IdentityInterface(fields=["subjects_dir"]), name="inputnode") bidssrc = pe.Node( BIDSDataGrabber(subject_data=subject_data, anat_only=anat_only, subject_id=subject_id), name="bidssrc", ) bids_info = pe.Node( BIDSInfo(bids_dir=config.execution.bids_dir, bids_validate=False), name="bids_info", ) summary = pe.Node( SubjectSummary( std_spaces=spaces.get_spaces(nonstandard=False), nstd_spaces=spaces.get_spaces(standard=False), ), name="summary", run_without_submitting=True, ) about = pe.Node( AboutSummary(version=config.environment.version, command=" ".join(sys.argv)), name="about", run_without_submitting=True, ) ds_report_summary = pe.Node( DerivativesDataSink( base_directory=output_dir, desc="summary", datatype="figures", dismiss_entities=("echo", ), ), name="ds_report_summary", run_without_submitting=True, ) ds_report_about = pe.Node( DerivativesDataSink( base_directory=output_dir, desc="about", datatype="figures", dismiss_entities=("echo", ), ), name="ds_report_about", run_without_submitting=True, ) anat_derivatives = config.execution.anat_derivatives if anat_derivatives: from smriprep.utils.bids import collect_derivatives std_spaces = spaces.get_spaces(nonstandard=False, dim=(3, )) anat_derivatives = collect_derivatives( anat_derivatives.absolute(), subject_id, std_spaces, False, ) if anat_derivatives is None: config.loggers.workflow.warning(f"""\ Attempted to access pre-existing anatomical derivatives at \ <{config.execution.anat_derivatives}>, however not all expectations of fMRIPrep \ were met (for participant <{subject_id}>, spaces <{', '.join(std_spaces)}>.""") # Preprocessing of T1w (includes registration to MNI) anat_preproc_wf = init_anat_preproc_wf( bids_root=str(config.execution.bids_dir), debug=config.execution.debug is True, existing_derivatives=anat_derivatives, longitudinal=config.workflow.longitudinal, omp_nthreads=config.nipype.omp_nthreads, output_dir=output_dir, skull_strip_fixed_seed=config.workflow.skull_strip_fixed_seed, skull_strip_mode=config.workflow.skull_strip_t1w, skull_strip_template=Reference.from_string( config.workflow.skull_strip_template)[0], spaces=spaces, t2w=subject_data["t2w"], ) # fmt:off workflow.connect([ (bidssrc, bids_info, [(('t2w', fix_multi_source_name), 'in_file')]), (inputnode, summary, [('subjects_dir', 'subjects_dir')]), (bidssrc, summary, [('t1w', 't1w'), ('t2w', 't2w'), ('bold', 'bold')]), (bids_info, summary, [('subject', 'subject_id')]), (bidssrc, anat_preproc_wf, [('t2w', 'inputnode.t2w'), ('roi', 'inputnode.roi')]), (bidssrc, ds_report_summary, [(('t2w', fix_multi_source_name), 'source_file')]), (summary, ds_report_summary, [('out_report', 'in_file')]), (bidssrc, ds_report_about, [(('t2w', fix_multi_source_name), 'source_file')]), (about, ds_report_about, [('out_report', 'in_file')]), ]) # fmt:on # Overwrite ``out_path_base`` of smriprep's DataSinks for node in workflow.list_node_names(): if node.split(".")[-1].startswith("ds_"): workflow.get_node(node).interface.out_path_base = "fmriprep" if anat_only: return workflow # Append the functional section to the existing anatomical exerpt # That way we do not need to stream down the number of bold datasets anat_preproc_wf.__postdesc__ = ((anat_preproc_wf.__postdesc__ or "") + """ Functional data preprocessing : For each of the {num_bold} BOLD runs found per subject (across all tasks and sessions), the following preprocessing was performed. """.format(num_bold=len(subject_data["bold"]))) for bold_file in subject_data["bold"]: echoes = extract_entities(bold_file).get("echo", []) echo_idxs = listify(echoes) multiecho = len(echo_idxs) > 2 # The default N4 shrink factor (4) appears to artificially blur values across # anisotropic voxels. Shrink factors are intended to speed up calculation # but in most cases, the extra calculation time appears to be minimal. # Similarly, the use of an asymmetric bspline grid improves performance # in anisotropic voxels. The number of N4 iterations are also reduced. bold_ref_wf = init_epi_reference_wf( auto_bold_nss=True, omp_nthreads=config.nipype.omp_nthreads, n4_iter=4, adaptive_bspline_grid=True, shrink_factor=1, ) bold_ref_wf.inputs.inputnode.in_files = (bold_file if not multiecho else bold_file[0]) func_preproc_wf = init_func_preproc_wf(bold_file) # fmt:off workflow.connect([ (anat_preproc_wf, func_preproc_wf, [('outputnode.t2w_preproc', 'inputnode.anat_preproc'), ('outputnode.t2w_mask', 'inputnode.anat_mask'), ('outputnode.t2w_dseg', 'inputnode.anat_dseg'), ('outputnode.t2w_tpms', 'inputnode.anat_tpms'), ('outputnode.template', 'inputnode.template'), ('outputnode.anat2std_xfm', 'inputnode.anat2std_xfm'), ('outputnode.std2anat_xfm', 'inputnode.std2anat_xfm')]), (bold_ref_wf, func_preproc_wf, [('outputnode.epi_ref_file', 'inputnode.ref_file'), ('outputnode.xfm_files', 'inputnode.bold_ref_xfm'), ('outputnode.validation_report', 'inputnode.validation_report'), (('outputnode.n_dummy', _pop), 'inputnode.n_dummy_scans')]), ]) # fmt:on return workflow
def init_anat_preproc_wf( workdir=None, freesurfer=False, no_compose_transforms=False, skull_strip_algorithm="ants", name="anat_preproc_wf", ): """ modified from smriprep/workflows/anatomical.py """ workflow = pe.Workflow(name=name) inputnode = pe.Node(niu.IdentityInterface(fields=["t1w", "metadata"]), name="inputnode") buffernode = pe.Node( niu.IdentityInterface(fields=["t1w_brain", "t1w_mask"]), name="buffernode") outputnode = pe.Node( niu.IdentityInterface(fields=anat_preproc_wf_output_attrs, ), name="outputnode", ) skull_strip_template = Reference.from_string( config.workflow.skull_strip_template)[0] # Step 1 anat_validate = pe.Node(ValidateImage(), name="anat_validate", run_without_submitting=True) if skull_strip_algorithm == "none": brain_extraction_wf = init_n4_only_wf( omp_nthreads=config.nipype.omp_nthreads, atropos_use_random_seed=not config.workflow.skull_strip_fixed_seed, ) elif skull_strip_algorithm == "ants": brain_extraction_wf = init_brain_extraction_wf( in_template=skull_strip_template.space, template_spec=skull_strip_template.spec, atropos_use_random_seed=not config.workflow.skull_strip_fixed_seed, omp_nthreads=config.nipype.omp_nthreads, normalization_quality="precise", ) else: raise ValueError( f'Unknown skull_strip_algorithm "{skull_strip_algorithm}"') workflow.connect([ (inputnode, anat_validate, [("t1w", "in_file")]), (anat_validate, brain_extraction_wf, [("out_file", "inputnode.in_files")]), ( brain_extraction_wf, outputnode, [("outputnode.bias_corrected", "t1w_preproc")], ), ( brain_extraction_wf, buffernode, [ (("outputnode.out_file", first), "t1w_brain"), ("outputnode.out_mask", "t1w_mask"), ], ), ( buffernode, outputnode, [("t1w_brain", "t1w_brain"), ("t1w_mask", "t1w_mask")], ), ]) # Step 2 t1w_dseg = pe.Node( fsl.FAST(segments=True, no_bias=True, probability_maps=True), name="t1w_dseg", mem_gb=3, ) workflow.connect([ (buffernode, t1w_dseg, [("t1w_brain", "in_files")]), ( t1w_dseg, outputnode, [("tissue_class_map", "t1w_dseg"), ("probability_maps", "t1w_tpms")], ), ]) # Step 3 anat_norm_wf = init_anat_norm_wf( debug=config.execution.debug, omp_nthreads=config.nipype.omp_nthreads, templates=norm_templates if not no_compose_transforms else norm_templates + extra_templates, ) workflow.connect([ ( inputnode, anat_norm_wf, [("t1w", "inputnode.orig_t1w")], ), ( brain_extraction_wf, anat_norm_wf, [(("outputnode.bias_corrected", first), "inputnode.moving_image")], ), (buffernode, anat_norm_wf, [("t1w_mask", "inputnode.moving_mask")]), ( t1w_dseg, anat_norm_wf, [("tissue_class_map", "inputnode.moving_segmentation")], ), (t1w_dseg, anat_norm_wf, [("probability_maps", "inputnode.moving_tpms") ]), ]) # Write outputs anat_reports_wf = init_anat_reports_wf(freesurfer=freesurfer, output_dir="/") workflow.connect([ ( outputnode, anat_reports_wf, [ ("t1w_preproc", "inputnode.t1w_preproc"), ("t1w_mask", "inputnode.t1w_mask"), ("t1w_dseg", "inputnode.t1w_dseg"), ], ), (inputnode, anat_reports_wf, [("t1w", "inputnode.source_file")]), ( anat_norm_wf, anat_reports_wf, [ ("poutputnode.template", "inputnode.template"), ("poutputnode.standardized", "inputnode.std_t1w"), ("poutputnode.std_mask", "inputnode.std_mask"), ], ), ]) # Custom add_templates_by_composing_transforms( workflow, templates=extra_templates if not no_compose_transforms else []) make_reportnode(workflow, spaces=True) assert workdir is not None make_reportnode_datasink(workflow, workdir) if freesurfer: def get_subject(dic): return dic.get("subject") # 5. Surface reconstruction (--fs-no-reconall not set) surface_recon_wf = init_surface_recon_wf( name="surface_recon_wf", omp_nthreads=config.nipype.omp_nthreads, hires=config.workflow.hires, ) subjects_dir = Path(workdir) / "subjects_dir" subjects_dir.mkdir(parents=True, exist_ok=True) surface_recon_wf.get_node("inputnode").inputs.subjects_dir = str( subjects_dir) workflow.connect([ ( inputnode, surface_recon_wf, [(("metadata", get_subject), "inputnode.subject_id")], ), (anat_validate, surface_recon_wf, [("out_file", "inputnode.t1w")]), ( brain_extraction_wf, surface_recon_wf, [ (("outputnode.out_file", first), "inputnode.skullstripped_t1"), ("outputnode.out_segm", "inputnode.ants_segs"), (("outputnode.bias_corrected", first), "inputnode.corrected_t1"), ], ), ( surface_recon_wf, anat_reports_wf, [ ("outputnode.subject_id", "inputnode.subject_id"), ("outputnode.subjects_dir", "inputnode.subjects_dir"), ], ), ]) return workflow
def init_single_subject_wf(subject_id): """ Organize the preprocessing pipeline for a single subject. It collects and reports information about the subject, and prepares sub-workflows to perform anatomical and functional preprocessing. Anatomical preprocessing is performed in a single workflow, regardless of the number of sessions. Functional preprocessing is performed using a separate workflow for each individual BOLD series. Workflow Graph .. workflow:: :graph2use: orig :simple_form: yes from fmriprep.workflows.tests import mock_config from fmriprep.workflows.base import init_single_subject_wf with mock_config(): wf = init_single_subject_wf('01') Parameters ---------- subject_id : :obj:`str` Subject label for this single-subject workflow. Inputs ------ subjects_dir : :obj:`str` FreeSurfer's ``$SUBJECTS_DIR``. """ from niworkflows.engine.workflows import LiterateWorkflow as Workflow from niworkflows.interfaces.bids import BIDSInfo, BIDSDataGrabber from niworkflows.interfaces.nilearn import NILEARN_VERSION from niworkflows.utils.bids import collect_data from niworkflows.utils.misc import fix_multi_T1w_source_name from niworkflows.utils.spaces import Reference from smriprep.workflows.anatomical import init_anat_preproc_wf name = "single_subject_%s_wf" % subject_id subject_data = collect_data(config.execution.layout, subject_id, config.execution.task_id, config.execution.echo_idx, bids_filters=config.execution.bids_filters)[0] if 'flair' in config.workflow.ignore: subject_data['flair'] = [] if 't2w' in config.workflow.ignore: subject_data['t2w'] = [] anat_only = config.workflow.anat_only # Make sure we always go through these two checks if not anat_only and not subject_data['bold']: task_id = config.execution.task_id raise RuntimeError( "No BOLD images found for participant {} and task {}. " "All workflows require BOLD images.".format( subject_id, task_id if task_id else '<all>')) if not subject_data['t1w']: raise Exception("No T1w images found for participant {}. " "All workflows require T1w images.".format(subject_id)) workflow = Workflow(name=name) workflow.__desc__ = """ Results included in this manuscript come from preprocessing performed using *fMRIPrep* {fmriprep_ver} (@fmriprep1; @fmriprep2; RRID:SCR_016216), which is based on *Nipype* {nipype_ver} (@nipype1; @nipype2; RRID:SCR_002502). """.format(fmriprep_ver=config.environment.version, nipype_ver=config.environment.nipype_version) workflow.__postdesc__ = """ Many internal operations of *fMRIPrep* use *Nilearn* {nilearn_ver} [@nilearn, RRID:SCR_001362], mostly within the functional processing workflow. For more details of the pipeline, see [the section corresponding to workflows in *fMRIPrep*'s documentation]\ (https://fmriprep.readthedocs.io/en/latest/workflows.html \ "FMRIPrep's documentation"). ### Copyright Waiver The above boilerplate text was automatically generated by fMRIPrep with the express intention that users should copy and paste this text into their manuscripts *unchanged*. It is released under the [CC0]\ (https://creativecommons.org/publicdomain/zero/1.0/) license. ### References """.format(nilearn_ver=NILEARN_VERSION) spaces = config.workflow.spaces reportlets_dir = str(config.execution.work_dir / 'reportlets') inputnode = pe.Node(niu.IdentityInterface(fields=['subjects_dir']), name='inputnode') bidssrc = pe.Node(BIDSDataGrabber(subject_data=subject_data, anat_only=anat_only, subject_id=subject_id), name='bidssrc') bids_info = pe.Node(BIDSInfo(bids_dir=config.execution.bids_dir, bids_validate=False), name='bids_info') summary = pe.Node(SubjectSummary( std_spaces=spaces.get_spaces(nonstandard=False), nstd_spaces=spaces.get_spaces(standard=False)), name='summary', run_without_submitting=True) about = pe.Node(AboutSummary(version=config.environment.version, command=' '.join(sys.argv)), name='about', run_without_submitting=True) ds_report_summary = pe.Node(DerivativesDataSink( base_directory=reportlets_dir, desc='summary', keep_dtype=True), name='ds_report_summary', run_without_submitting=True) ds_report_about = pe.Node(DerivativesDataSink( base_directory=reportlets_dir, desc='about', keep_dtype=True), name='ds_report_about', run_without_submitting=True) # Preprocessing of T1w (includes registration to MNI) anat_preproc_wf = init_anat_preproc_wf( bids_root=str(config.execution.bids_dir), debug=config.execution.debug is True, freesurfer=config.workflow.run_reconall, hires=config.workflow.hires, longitudinal=config.workflow.longitudinal, omp_nthreads=config.nipype.omp_nthreads, output_dir=str(config.execution.output_dir), reportlets_dir=reportlets_dir, skull_strip_fixed_seed=config.workflow.skull_strip_fixed_seed, skull_strip_mode=config.workflow.skull_strip_t1w, skull_strip_template=Reference.from_string( config.workflow.skull_strip_template)[0], spaces=spaces, t1w=subject_data['t1w'], ) workflow.connect([ (inputnode, anat_preproc_wf, [('subjects_dir', 'inputnode.subjects_dir')]), (bidssrc, bids_info, [(('t1w', fix_multi_T1w_source_name), 'in_file') ]), (inputnode, summary, [('subjects_dir', 'subjects_dir')]), (bidssrc, summary, [('t1w', 't1w'), ('t2w', 't2w'), ('bold', 'bold')]), (bids_info, summary, [('subject', 'subject_id')]), (bids_info, anat_preproc_wf, [(('subject', _prefix), 'inputnode.subject_id')]), (bidssrc, anat_preproc_wf, [('t1w', 'inputnode.t1w'), ('t2w', 'inputnode.t2w'), ('roi', 'inputnode.roi'), ('flair', 'inputnode.flair')]), (bidssrc, ds_report_summary, [(('t1w', fix_multi_T1w_source_name), 'source_file')]), (summary, ds_report_summary, [('out_report', 'in_file')]), (bidssrc, ds_report_about, [(('t1w', fix_multi_T1w_source_name), 'source_file')]), (about, ds_report_about, [('out_report', 'in_file')]), ]) # Overwrite ``out_path_base`` of smriprep's DataSinks for node in workflow.list_node_names(): if node.split('.')[-1].startswith('ds_'): workflow.get_node(node).interface.out_path_base = 'fmriprep' if anat_only: return workflow # Append the functional section to the existing anatomical exerpt # That way we do not need to stream down the number of bold datasets anat_preproc_wf.__postdesc__ = (anat_preproc_wf.__postdesc__ or '') + """ Functional data preprocessing : For each of the {num_bold} BOLD runs found per subject (across all tasks and sessions), the following preprocessing was performed. """.format(num_bold=len(subject_data['bold'])) for bold_file in subject_data['bold']: func_preproc_wf = init_func_preproc_wf(bold_file) workflow.connect([ ( anat_preproc_wf, func_preproc_wf, [ ('outputnode.t1w_preproc', 'inputnode.t1w_preproc'), ('outputnode.t1w_mask', 'inputnode.t1w_mask'), ('outputnode.t1w_dseg', 'inputnode.t1w_dseg'), ('outputnode.t1w_aseg', 'inputnode.t1w_aseg'), ('outputnode.t1w_aparc', 'inputnode.t1w_aparc'), ('outputnode.t1w_tpms', 'inputnode.t1w_tpms'), ('outputnode.template', 'inputnode.template'), ('outputnode.anat2std_xfm', 'inputnode.anat2std_xfm'), ('outputnode.std2anat_xfm', 'inputnode.std2anat_xfm'), # Undefined if --fs-no-reconall, but this is safe ('outputnode.subjects_dir', 'inputnode.subjects_dir'), ('outputnode.subject_id', 'inputnode.subject_id'), ('outputnode.t1w2fsnative_xfm', 'inputnode.t1w2fsnative_xfm'), ('outputnode.fsnative2t1w_xfm', 'inputnode.fsnative2t1w_xfm') ]), ]) return workflow
def init_single_subject_wf(subject_id): """ Organize the preprocessing pipeline for a single subject. It collects and reports information about the subject, and prepares sub-workflows to perform anatomical and functional preprocessing. Anatomical preprocessing is performed in a single workflow, regardless of the number of sessions. Functional preprocessing is performed using a separate workflow for each individual BOLD series. Workflow Graph .. workflow:: :graph2use: orig :simple_form: yes from nibabies.workflows.tests import mock_config from nibabies.workflows.base import init_single_subject_wf with mock_config(): wf = init_single_subject_wf('01') Parameters ---------- subject_id : :obj:`str` Subject label for this single-subject workflow. Inputs ------ subjects_dir : :obj:`str` FreeSurfer's ``$SUBJECTS_DIR``. """ from niworkflows.engine.workflows import LiterateWorkflow as Workflow from niworkflows.interfaces.bids import BIDSInfo, BIDSDataGrabber from niworkflows.interfaces.nilearn import NILEARN_VERSION from niworkflows.utils.bids import collect_data from niworkflows.utils.spaces import Reference from .anatomical import init_infant_anat_wf from ..utils.misc import fix_multi_source_name name = "single_subject_%s_wf" % subject_id subject_data = collect_data( config.execution.layout, subject_id, config.execution.task_id, config.execution.echo_idx, bids_filters=config.execution.bids_filters, )[0] if "flair" in config.workflow.ignore: subject_data["flair"] = [] if "t2w" in config.workflow.ignore: subject_data["t2w"] = [] anat_only = config.workflow.anat_only anat_derivatives = config.execution.anat_derivatives anat_modality = config.workflow.anat_modality spaces = config.workflow.spaces # Make sure we always go through these two checks if not anat_only and not subject_data["bold"]: task_id = config.execution.task_id raise RuntimeError( "No BOLD images found for participant {} and task {}. " "All workflows require BOLD images.".format( subject_id, task_id if task_id else "<all>")) if anat_derivatives: from smriprep.utils.bids import collect_derivatives std_spaces = spaces.get_spaces(nonstandard=False, dim=(3, )) anat_derivatives = collect_derivatives( anat_derivatives.absolute(), subject_id, std_spaces, config.workflow.run_reconall, ) if anat_derivatives is None: config.loggers.workflow.warning(f"""\ Attempted to access pre-existing anatomical derivatives at \ <{config.execution.anat_derivatives}>, however not all expectations of fMRIPrep \ were met (for participant <{subject_id}>, spaces <{', '.join(std_spaces)}>, \ reconall <{config.workflow.run_reconall}>).""") if not anat_derivatives and not subject_data[anat_modality]: raise Exception( f"No {anat_modality} images found for participant {subject_id}. " "All workflows require T1w images.") workflow = Workflow(name=name) workflow.__desc__ = """ Results included in this manuscript come from preprocessing performed using *fMRIPrep* {fmriprep_ver} (@fmriprep1; @fmriprep2; RRID:SCR_016216), which is based on *Nipype* {nipype_ver} (@nipype1; @nipype2; RRID:SCR_002502). """.format( fmriprep_ver=config.environment.version, nipype_ver=config.environment.nipype_version, ) workflow.__postdesc__ = """ Many internal operations of *fMRIPrep* use *Nilearn* {nilearn_ver} [@nilearn, RRID:SCR_001362], mostly within the functional processing workflow. For more details of the pipeline, see [the section corresponding to workflows in *fMRIPrep*'s documentation]\ (https://nibabies.readthedocs.io/en/latest/workflows.html \ "FMRIPrep's documentation"). ### Copyright Waiver The above boilerplate text was automatically generated by fMRIPrep with the express intention that users should copy and paste this text into their manuscripts *unchanged*. It is released under the [CC0]\ (https://creativecommons.org/publicdomain/zero/1.0/) license. ### References """.format(nilearn_ver=NILEARN_VERSION) fmriprep_dir = str(config.execution.fmriprep_dir) inputnode = pe.Node(niu.IdentityInterface(fields=["subjects_dir"]), name="inputnode") bidssrc = pe.Node( BIDSDataGrabber( subject_data=subject_data, anat_only=anat_only, anat_derivatives=anat_derivatives, subject_id=subject_id, ), name="bidssrc", ) bids_info = pe.Node( BIDSInfo(bids_dir=config.execution.bids_dir, bids_validate=False), name="bids_info", ) summary = pe.Node( SubjectSummary( std_spaces=spaces.get_spaces(nonstandard=False), nstd_spaces=spaces.get_spaces(standard=False), ), name="summary", run_without_submitting=True, ) about = pe.Node( AboutSummary(version=config.environment.version, command=" ".join(sys.argv)), name="about", run_without_submitting=True, ) ds_report_summary = pe.Node( DerivativesDataSink( base_directory=fmriprep_dir, desc="summary", datatype="figures", dismiss_entities=("echo", ), ), name="ds_report_summary", run_without_submitting=True, ) ds_report_about = pe.Node( DerivativesDataSink( base_directory=fmriprep_dir, desc="about", datatype="figures", dismiss_entities=("echo", ), ), name="ds_report_about", run_without_submitting=True, ) # Preprocessing of anatomical (includes registration to UNCInfant) anat_preproc_wf = init_infant_anat_wf( ants_affine_init=config.workflow.ants_affine_init or True, age_months=config.workflow.age_months, anat_modality=anat_modality, t1w=subject_data['t1w'], t2w=subject_data['t2w'], bids_root=config.execution.bids_dir, existing_derivatives=anat_derivatives, freesurfer=config.workflow.run_reconall, longitudinal=config.workflow.longitudinal, omp_nthreads=config.nipype.omp_nthreads, output_dir=fmriprep_dir, segmentation_atlases=config.execution.segmentation_atlases_dir, skull_strip_mode=config.workflow.skull_strip_t1w, skull_strip_template=Reference.from_string( config.workflow.skull_strip_template)[0], sloppy=config.execution.sloppy, spaces=spaces, ) # fmt: off workflow.connect([ (inputnode, anat_preproc_wf, [ ('subjects_dir', 'inputnode.subjects_dir'), ]), (inputnode, summary, [ ('subjects_dir', 'subjects_dir'), ]), (bidssrc, summary, [ ('bold', 'bold'), ]), (bids_info, summary, [ ('subject', 'subject_id'), ]), (bids_info, anat_preproc_wf, [ (('subject', _prefix), 'inputnode.subject_id'), ]), ( bidssrc, anat_preproc_wf, [ ('t1w', 'inputnode.t1w'), ('t2w', 'inputnode.t2w'), # ('roi', 'inputnode.roi'), # ('flair', 'inputnode.flair'), ]), (summary, ds_report_summary, [ ('out_report', 'in_file'), ]), (about, ds_report_about, [ ('out_report', 'in_file'), ]), ]) if not anat_derivatives: workflow.connect([ (bidssrc, bids_info, [ (('t1w', fix_multi_source_name), 'in_file'), ]), (bidssrc, summary, [ ('t1w', 't1w'), ('t2w', 't2w'), ]), (bidssrc, ds_report_summary, [ (('t1w', fix_multi_source_name), 'source_file'), ]), (bidssrc, ds_report_about, [ (('t1w', fix_multi_source_name), 'source_file'), ]), ]) else: workflow.connect([ (bidssrc, bids_info, [ (('bold', fix_multi_source_name), 'in_file'), ]), (anat_preproc_wf, summary, [ ('outputnode.t1w_preproc', 't1w'), ]), (anat_preproc_wf, ds_report_summary, [ ('outputnode.t1w_preproc', 'source_file'), ]), (anat_preproc_wf, ds_report_about, [ ('outputnode.t1w_preproc', 'source_file'), ]), ]) # fmt: on # Overwrite ``out_path_base`` of smriprep's DataSinks for node in workflow.list_node_names(): if node.split(".")[-1].startswith("ds_"): workflow.get_node(node).interface.out_path_base = "" if anat_only: return workflow raise NotImplementedError("BOLD processing is not yet implemented.") # Append the functional section to the existing anatomical exerpt # That way we do not need to stream down the number of bold datasets anat_preproc_wf.__postdesc__ = ((anat_preproc_wf.__postdesc__ or "") + f""" Functional data preprocessing : For each of the {len(subject_data['bold'])} BOLD runs found per subject (across all tasks and sessions), the following preprocessing was performed. """) for bold_file in subject_data["bold"]: func_preproc_wf = init_func_preproc_wf(bold_file) # fmt: off workflow.connect([ ( anat_preproc_wf, func_preproc_wf, [ ('outputnode.anat_preproc', 'inputnode.anat_preproc'), ('outputnode.anat_mask', 'inputnode.anat_mask'), ('outputnode.anat_dseg', 'inputnode.anat_dseg'), ('outputnode.anat_aseg', 'inputnode.anat_aseg'), ('outputnode.anat_aparc', 'inputnode.anat_aparc'), ('outputnode.anat_tpms', 'inputnode.anat_tpms'), ('outputnode.template', 'inputnode.template'), ('outputnode.anat2std_xfm', 'inputnode.anat2std_xfm'), ('outputnode.std2anat_xfm', 'inputnode.std2anat_xfm'), # Undefined if --fs-no-reconall, but this is safe ('outputnode.subjects_dir', 'inputnode.subjects_dir'), ('outputnode.subject_id', 'inputnode.subject_id'), ('outputnode.anat2fsnative_xfm', 'inputnode.t1w2fsnative_xfm'), ('outputnode.fsnative2anat_xfm', 'inputnode.fsnative2t1w_xfm'), ]), ]) # fmt: on return workflow
def init_single_subject_wf(subject_id): """ Set-up the preprocessing pipeline for a single subject. It collects and reports information about the subject, and prepares sub-workflows to perform anatomical and diffusion MRI preprocessing. Anatomical preprocessing is performed in a single workflow, regardless of the number of sessions. Diffusion MRI preprocessing is performed using a separate workflow for a full :abbr:`DWI (diffusion weighted imaging)` *entity*. A DWI *entity* may comprehend one or several runs (for instance, two opposed :abbr:`PE (phase-encoding)` directions. Workflow Graph .. workflow:: :graph2use: orig :simple_form: yes from dmriprep.config.testing import mock_config from dmriprep.workflows.base import init_single_subject_wf with mock_config(): wf = init_single_subject_wf("THP0005") Parameters ---------- subject_id : str List of subject labels Inputs ------ subjects_dir : os.pathlike FreeSurfer's ``$SUBJECTS_DIR`` """ from ..utils.misc import sub_prefix as _prefix name = f"single_subject_{subject_id}_wf" subject_data = collect_data(config.execution.layout, subject_id)[0] if "flair" in config.workflow.ignore: subject_data["flair"] = [] if "t2w" in config.workflow.ignore: subject_data["t2w"] = [] anat_only = config.workflow.anat_only # Make sure we always go through these two checks if not anat_only and not subject_data["dwi"]: raise Exception(f"No DWI data found for participant {subject_id}. " "All workflows require DWI images.") if not subject_data["t1w"]: raise Exception(f"No T1w images found for participant {subject_id}. " "All workflows require T1w images.") workflow = Workflow(name=name) workflow.__desc__ = f""" Results included in this manuscript come from preprocessing performed using *dMRIPrep* {config.environment.version} (@dmriprep; RRID:SCR_017412), which is based on *Nipype* {config.environment.nipype_version} (@nipype1; @nipype2; RRID:SCR_002502). """ workflow.__postdesc__ = """ For more details of the pipeline, see [the section corresponding to workflows in *dMRIPrep*'s documentation]\ (https://nipreps.github.io/dmriprep/master/workflows.html \ "dMRIPrep's documentation"). ### Copyright Waiver The above boilerplate text was automatically generated by dMRIPrep with the express intention that users should copy and paste this text into their manuscripts *unchanged*. It is released under the [CC0]\ (https://creativecommons.org/publicdomain/zero/1.0/) license. ### References """ spaces = config.workflow.spaces output_dir = config.execution.output_dir fsinputnode = pe.Node(niu.IdentityInterface(fields=["subjects_dir"]), name="fsinputnode") bidssrc = pe.Node(BIDSDataGrabber(subject_data=subject_data, anat_only=anat_only), name="bidssrc") bids_info = pe.Node( BIDSInfo(bids_dir=config.execution.bids_dir, bids_validate=False), name="bids_info", ) summary = pe.Node( SubjectSummary( std_spaces=spaces.get_spaces(nonstandard=False), nstd_spaces=spaces.get_spaces(standard=False), ), name="summary", run_without_submitting=True, ) about = pe.Node( AboutSummary(version=config.environment.version, command=" ".join(sys.argv)), name="about", run_without_submitting=True, ) ds_report_summary = pe.Node( DerivativesDataSink(base_directory=str(output_dir), desc="summary", datatype="figures"), name="ds_report_summary", run_without_submitting=True, ) ds_report_about = pe.Node( DerivativesDataSink(base_directory=str(output_dir), desc="about", datatype="figures"), name="ds_report_about", run_without_submitting=True, ) anat_derivatives = config.execution.anat_derivatives if anat_derivatives: from smriprep.utils.bids import collect_derivatives std_spaces = spaces.get_spaces(nonstandard=False, dim=(3, )) anat_derivatives = collect_derivatives( anat_derivatives.absolute(), subject_id, std_spaces, config.workflow.run_reconall, ) # Preprocessing of T1w (includes registration to MNI) anat_preproc_wf = init_anat_preproc_wf( bids_root=str(config.execution.bids_dir), debug=config.execution.debug is True, existing_derivatives=anat_derivatives, freesurfer=config.workflow.run_reconall, hires=config.workflow.hires, longitudinal=config.workflow.longitudinal, omp_nthreads=config.nipype.omp_nthreads, output_dir=str(output_dir), skull_strip_fixed_seed=config.workflow.skull_strip_fixed_seed, skull_strip_mode="force", skull_strip_template=Reference.from_string( config.workflow.skull_strip_template)[0], spaces=spaces, t1w=subject_data["t1w"], ) anat_preproc_wf.__desc__ = f"\n\n{anat_preproc_wf.__desc__}" # fmt:off workflow.connect([ (fsinputnode, anat_preproc_wf, [("subjects_dir", "inputnode.subjects_dir")]), (bidssrc, bids_info, [(("t1w", fix_multi_T1w_source_name), "in_file") ]), (fsinputnode, summary, [("subjects_dir", "subjects_dir")]), (bidssrc, summary, [("t1w", "t1w"), ("t2w", "t2w"), ("dwi", "dwi")]), (bids_info, summary, [("subject", "subject_id")]), (bids_info, anat_preproc_wf, [(("subject", _prefix), "inputnode.subject_id")]), (bidssrc, anat_preproc_wf, [ ("t1w", "inputnode.t1w"), ("t2w", "inputnode.t2w"), ("roi", "inputnode.roi"), ("flair", "inputnode.flair"), ]), (bidssrc, ds_report_summary, [ (("t1w", fix_multi_T1w_source_name), "source_file"), ]), (summary, ds_report_summary, [("out_report", "in_file")]), (bidssrc, ds_report_about, [(("t1w", fix_multi_T1w_source_name), "source_file")]), (about, ds_report_about, [("out_report", "in_file")]), ]) # fmt:off # Overwrite ``out_path_base`` of smriprep's DataSinks for node in workflow.list_node_names(): if node.split(".")[-1].startswith("ds_"): workflow.get_node(node).interface.out_path_base = "dmriprep" if anat_only: return workflow from .dwi.base import init_dwi_preproc_wf # Append the dMRI section to the existing anatomical excerpt # That way we do not need to stream down the number of DWI datasets anat_preproc_wf.__postdesc__ = f"""\ {anat_preproc_wf.__postdesc__ or ''} Diffusion data preprocessing : For each of the {len(subject_data["dwi"])} DWI scans found per subject (across all sessions), the gradient table was vetted and converted into the *RASb* format (i.e., given in RAS+ scanner coordinates, normalized b-vectors and scaled b-values), and a *b=0* average for reference to the subsequent steps of preprocessing was calculated. """ # SDC Step 0: Determine whether fieldmaps can/should be estimated fmap_estimators = None if "fieldmap" not in config.workflow.ignore: from sdcflows import fieldmaps as fm from sdcflows.utils.wrangler import find_estimators from sdcflows.workflows.base import init_fmap_preproc_wf # SDC Step 1: Run basic heuristics to identify available data for fieldmap estimation fmap_estimators = find_estimators( layout=config.execution.layout, subject=subject_id, fmapless=False, ) # Add fieldmap-less estimators if not fmap_estimators and config.workflow.use_syn: # estimators = [fm.FieldmapEstimation()] raise NotImplementedError # Nuts and bolts: initialize individual run's pipeline dwi_preproc_list = [] for dwi_file in subject_data["dwi"]: dwi_preproc_wf = init_dwi_preproc_wf( dwi_file, has_fieldmap=bool(fmap_estimators), ) # fmt: off workflow.connect([ ( anat_preproc_wf, dwi_preproc_wf, [ ("outputnode.t1w_preproc", "inputnode.t1w_preproc"), ("outputnode.t1w_mask", "inputnode.t1w_mask"), ("outputnode.t1w_dseg", "inputnode.t1w_dseg"), ("outputnode.t1w_aseg", "inputnode.t1w_aseg"), ("outputnode.t1w_aparc", "inputnode.t1w_aparc"), ("outputnode.t1w_tpms", "inputnode.t1w_tpms"), ("outputnode.template", "inputnode.template"), ("outputnode.anat2std_xfm", "inputnode.anat2std_xfm"), ("outputnode.std2anat_xfm", "inputnode.std2anat_xfm"), # Undefined if --fs-no-reconall, but this is safe ("outputnode.subjects_dir", "inputnode.subjects_dir"), ("outputnode.t1w2fsnative_xfm", "inputnode.t1w2fsnative_xfm"), ("outputnode.fsnative2t1w_xfm", "inputnode.fsnative2t1w_xfm"), ]), (bids_info, dwi_preproc_wf, [("subject", "inputnode.subject_id")]), ]) # fmt: on # Keep a handle to each workflow dwi_preproc_list.append(dwi_preproc_wf) if not fmap_estimators: return workflow # SDC Step 2: Manually add further estimators (e.g., fieldmap-less) fmap_wf = init_fmap_preproc_wf( debug=config.execution.debug, estimators=fmap_estimators, omp_nthreads=config.nipype.omp_nthreads, output_dir=str(output_dir), subject=subject_id, ) fmap_wf.__desc__ = f""" *B<sub>0</sub>* fieldmap data preprocessing : A total of {len(fmap_estimators)} fieldmaps were found available within the input BIDS structure for this particular subject. """ # TODO: Requires nipreps/sdcflows#147 for dwi_preproc_wf in dwi_preproc_list: # fmt: off workflow.connect([ (fmap_wf, dwi_preproc_wf, [ ("outputnode.fmap", "inputnode.fmap"), ("outputnode.fmap_ref", "inputnode.fmap_ref"), ("outputnode.fmap_coeff", "inputnode.fmap_coeff"), ("outputnode.fmap_mask", "inputnode.fmap_mask"), ("outputnode.fmap_id", "inputnode.fmap_id"), ]), ]) # fmt: on # Overwrite ``out_path_base`` of sdcflows's DataSinks for node in fmap_wf.list_node_names(): if node.split(".")[-1].startswith("ds_"): fmap_wf.get_node(node).interface.out_path_base = "dmriprep" # Step 3: Manually connect PEPOLAR for estimator in fmap_estimators: if estimator.method != fm.EstimatorType.PEPOLAR: continue suffices = set(s.suffix for s in estimator.sources) if sorted(suffices) == ["epi"]: getattr(fmap_wf.inputs, f"in_{estimator.bids_id}").in_data = [ str(s.path) for s in estimator.sources ] getattr(fmap_wf.inputs, f"in_{estimator.bids_id}").metadata = [ s.metadata for s in estimator.sources ] else: raise NotImplementedError # from niworkflows.interfaces.utility import KeySelect # est_id = estimator.bids_id # estim_select = pe.MapNode( # KeySelect(fields=["metadata", "dwi_reference", "dwi_mask", "gradients_rasb",]), # name=f"fmap_select_{est_id}", # run_without_submitting=True, # iterfields=["key"] # ) # estim_select.inputs.key = [ # str(s.path) for s in estimator.sources if s.suffix in ("epi", "dwi", "sbref") # ] # # fmt:off # workflow.connect([ # (referencenode, estim_select, [("dwi_file", "keys"), # ("metadata", "metadata"), # ("dwi_reference", "dwi_reference"), # ("gradients_rasb", "gradients_rasb")]), # ]) # # fmt:on return workflow
def init_alt_bold_std_trans_wf( name="alt_bold_std_trans_wf", spaces=SpatialReferences( Reference.from_string("MNI152NLin6Asym:res-2"), checkpoint=True), memcalc=MemoryCalculator.default(), ): workflow = pe.Workflow(name=name) inputnode = pe.Node( niu.IdentityInterface(fields=[ "bold_file", "bold_mask", "itk_bold_to_t1", "out_warp", "anat2std_xfm", "bold_split", "xforms", ]), name="inputnode", ) bold_std_trans_wf_outputs = [ "bold_std", "bold_mask_std", "spatial_reference" ] outputnode = pe.Node( niu.IdentityInterface( fields=[f"alt_{a}" for a in bold_std_trans_wf_outputs]), name="outputnode", ) # alt_reference_spaces = spaces.get_spaces(nonstandard=False, dim=(3, )) # mergexfm = pe.MapNode(niu.Merge(numinputs=2), iterfield="in1", name="mergexfm") mergexfm.inputs.in1 = [ get_resource( f"tpl_{alt}_from_{constants.reference_space}_mode_image_xfm.h5") for alt in alt_reference_spaces ] workflow.connect(inputnode, "anat2std_xfm", mergexfm, "in2") # bold_std_trans_wf = init_bold_std_trans_wf( freesurfer=False, mem_gb=memcalc.volume_std_gb, omp_nthreads=config.nipype.omp_nthreads, spaces=spaces, name="bold_std_trans_wf", use_compression=not config.execution.low_mem, ) bold_std_trans_wf_inputnode = bold_std_trans_wf.get_node("inputnode") assert isinstance(bold_std_trans_wf_inputnode, pe.Node) bold_std_trans_wf_inputnode.inputs.templates = ["MNI152NLin6Asym"] workflow.connect(mergexfm, "out", bold_std_trans_wf, "inputnode.anat2std_xfm") workflow.connect(inputnode, "bold_file", bold_std_trans_wf, "inputnode.name_source") workflow.connect(inputnode, "bold_split", bold_std_trans_wf, "inputnode.bold_split") workflow.connect(inputnode, "xforms", bold_std_trans_wf, "inputnode.hmc_xforms") workflow.connect(inputnode, "itk_bold_to_t1", bold_std_trans_wf, "inputnode.itk_bold_to_t1") workflow.connect(inputnode, "bold_mask", bold_std_trans_wf, "inputnode.bold_mask") workflow.connect(inputnode, "out_warp", bold_std_trans_wf, "inputnode.fieldwarp") for a in bold_std_trans_wf_outputs: workflow.connect(bold_std_trans_wf, f"outputnode.{a}", outputnode, f"alt_{a}") return workflow
def init_single_subject_wf(subject_id): """ Set-up the preprocessing pipeline for a single subject. It collects and reports information about the subject, and prepares sub-workflows to perform anatomical and diffusion MRI preprocessing. Anatomical preprocessing is performed in a single workflow, regardless of the number of sessions. Diffusion MRI preprocessing is performed using a separate workflow for a full :abbr:`DWI (diffusion weighted imaging)` *entity*. A DWI *entity* may comprehend one or several runs (for instance, two opposed :abbr:`PE (phase-encoding)` directions. Workflow Graph .. workflow:: :graph2use: orig :simple_form: yes from dmriprep.config.testing import mock_config from dmriprep.workflows.base import init_single_subject_wf with mock_config(): wf = init_single_subject_wf('THP0005') Parameters ---------- subject_id : str List of subject labels Inputs ------ subjects_dir : os.pathlike FreeSurfer's ``$SUBJECTS_DIR`` """ name = "single_subject_%s_wf" % subject_id subject_data = collect_data(config.execution.layout, subject_id)[0] if 'flair' in config.workflow.ignore: subject_data['flair'] = [] if 't2w' in config.workflow.ignore: subject_data['t2w'] = [] anat_only = config.workflow.anat_only # Make sure we always go through these two checks if not anat_only and not subject_data['dwi']: raise Exception(f"No DWI data found for participant {subject_id}. " "All workflows require DWI images.") if not subject_data['t1w']: raise Exception(f"No T1w images found for participant {subject_id}. " "All workflows require T1w images.") workflow = Workflow(name=name) workflow.__desc__ = f""" Results included in this manuscript come from preprocessing performed using *dMRIPrep* {config.environment.version} (@dmriprep; RRID:SCR_017412), which is based on *Nipype* {config.environment.nipype_version} (@nipype1; @nipype2; RRID:SCR_002502). """ workflow.__postdesc__ = """ For more details of the pipeline, see [the section corresponding to workflows in *dMRIPrep*'s documentation]\ (https://nipreps.github.io/dmriprep/master/workflows.html \ "dMRIPrep's documentation"). ### Copyright Waiver The above boilerplate text was automatically generated by dMRIPrep with the express intention that users should copy and paste this text into their manuscripts *unchanged*. It is released under the [CC0]\ (https://creativecommons.org/publicdomain/zero/1.0/) license. ### References """ spaces = config.workflow.spaces reportlets_dir = str(config.execution.work_dir / 'reportlets') inputnode = pe.Node(niu.IdentityInterface(fields=['subjects_dir']), name='inputnode') bidssrc = pe.Node(BIDSDataGrabber(subject_data=subject_data, anat_only=anat_only), name='bidssrc') bids_info = pe.Node(BIDSInfo(bids_dir=config.execution.bids_dir, bids_validate=False), name='bids_info') summary = pe.Node(SubjectSummary( std_spaces=spaces.get_spaces(nonstandard=False), nstd_spaces=spaces.get_spaces(standard=False)), name='summary', run_without_submitting=True) about = pe.Node(AboutSummary(version=config.environment.version, command=' '.join(sys.argv)), name='about', run_without_submitting=True) ds_report_summary = pe.Node(DerivativesDataSink( base_directory=reportlets_dir, desc='summary', keep_dtype=True), name='ds_report_summary', run_without_submitting=True) ds_report_about = pe.Node(DerivativesDataSink( base_directory=reportlets_dir, desc='about', keep_dtype=True), name='ds_report_about', run_without_submitting=True) # Preprocessing of T1w (includes registration to MNI) anat_preproc_wf = init_anat_preproc_wf( bids_root=str(config.execution.bids_dir), debug=config.execution.debug is True, freesurfer=config.workflow.run_reconall, hires=config.workflow.hires, longitudinal=config.workflow.longitudinal, omp_nthreads=config.nipype.omp_nthreads, output_dir=str(config.execution.output_dir), reportlets_dir=reportlets_dir, skull_strip_fixed_seed=config.workflow.skull_strip_fixed_seed, skull_strip_mode='force', skull_strip_template=Reference.from_string( config.workflow.skull_strip_template)[0], spaces=spaces, t1w=subject_data['t1w'], ) workflow.connect([ (inputnode, anat_preproc_wf, [('subjects_dir', 'inputnode.subjects_dir')]), (bidssrc, bids_info, [(('t1w', fix_multi_T1w_source_name), 'in_file') ]), (inputnode, summary, [('subjects_dir', 'subjects_dir')]), (bidssrc, summary, [('t1w', 't1w'), ('t2w', 't2w'), ('dwi', 'dwi')]), (bids_info, summary, [('subject', 'subject_id')]), (bids_info, anat_preproc_wf, [(('subject', _prefix), 'inputnode.subject_id')]), (bidssrc, anat_preproc_wf, [('t1w', 'inputnode.t1w'), ('t2w', 'inputnode.t2w'), ('roi', 'inputnode.roi'), ('flair', 'inputnode.flair')]), (bidssrc, ds_report_summary, [(('t1w', fix_multi_T1w_source_name), 'source_file')]), (summary, ds_report_summary, [('out_report', 'in_file')]), (bidssrc, ds_report_about, [(('t1w', fix_multi_T1w_source_name), 'source_file')]), (about, ds_report_about, [('out_report', 'in_file')]), ]) # Overwrite ``out_path_base`` of smriprep's DataSinks for node in workflow.list_node_names(): if node.split('.')[-1].startswith('ds_'): workflow.get_node(node).interface.out_path_base = 'dmriprep' if anat_only: return workflow # Append the dMRI section to the existing anatomical excerpt # That way we do not need to stream down the number of bold datasets anat_preproc_wf.__postdesc__ = (anat_preproc_wf.__postdesc__ or '') + f""" Diffusion data preprocessing : For each of the {len(subject_data["dwi"])} dwi scans found per subject (across all sessions), the following preprocessing was performed.""" for dwi_file in subject_data['dwi']: dwi_preproc_wf = init_dwi_preproc_wf(dwi_file) workflow.connect([ ( anat_preproc_wf, dwi_preproc_wf, [ (('outputnode.t1w_preproc', _pop), 'inputnode.t1w_preproc'), ('outputnode.t1w_mask', 'inputnode.t1w_mask'), ('outputnode.t1w_dseg', 'inputnode.t1w_dseg'), ('outputnode.t1w_aseg', 'inputnode.t1w_aseg'), ('outputnode.t1w_aparc', 'inputnode.t1w_aparc'), ('outputnode.t1w_tpms', 'inputnode.t1w_tpms'), ('outputnode.template', 'inputnode.template'), ('outputnode.anat2std_xfm', 'inputnode.anat2std_xfm'), ('outputnode.std2anat_xfm', 'inputnode.std2anat_xfm'), # Undefined if --fs-no-reconall, but this is safe ('outputnode.subjects_dir', 'inputnode.subjects_dir'), ('outputnode.subject_id', 'inputnode.subject_id'), ('outputnode.t1w2fsnative_xfm', 'inputnode.t1w2fsnative_xfm'), ('outputnode.fsnative2t1w_xfm', 'inputnode.fsnative2t1w_xfm') ]), ]) return workflow
def init_single_subject_wf(subject_id): """ Organize the preprocessing pipeline for a single subject. It collects and reports information about the subject, and prepares sub-workflows to perform anatomical and functional preprocessing. Anatomical preprocessing is performed in a single workflow, regardless of the number of sessions. Functional preprocessing is performed using a separate workflow for each individual BOLD series. Workflow Graph .. workflow:: :graph2use: orig :simple_form: yes from nibabies.workflows.tests import mock_config from nibabies.workflows.base import init_single_subject_wf with mock_config(): wf = init_single_subject_wf('01') Parameters ---------- subject_id : :obj:`str` Subject label for this single-subject workflow. Inputs ------ subjects_dir : :obj:`str` FreeSurfer's ``$SUBJECTS_DIR``. """ from niworkflows.engine.workflows import LiterateWorkflow as Workflow from niworkflows.interfaces.bids import BIDSInfo, BIDSDataGrabber from niworkflows.interfaces.nilearn import NILEARN_VERSION from niworkflows.utils.bids import collect_data from niworkflows.utils.spaces import Reference from .anatomical import init_infant_anat_wf from ..utils.misc import fix_multi_source_name name = "single_subject_%s_wf" % subject_id subject_data = collect_data( config.execution.layout, subject_id, config.execution.task_id, config.execution.echo_idx, bids_filters=config.execution.bids_filters, )[0] if "flair" in config.workflow.ignore: subject_data["flair"] = [] if "t2w" in config.workflow.ignore: subject_data["t2w"] = [] anat_only = config.workflow.anat_only anat_derivatives = config.execution.anat_derivatives anat_modality = config.workflow.anat_modality spaces = config.workflow.spaces # Make sure we always go through these two checks if not anat_only and not subject_data["bold"]: task_id = config.execution.task_id raise RuntimeError( "No BOLD images found for participant {} and task {}. " "All workflows require BOLD images.".format( subject_id, task_id if task_id else "<all>")) if anat_derivatives: from smriprep.utils.bids import collect_derivatives std_spaces = spaces.get_spaces(nonstandard=False, dim=(3, )) anat_derivatives = collect_derivatives( anat_derivatives.absolute(), subject_id, std_spaces, config.workflow.run_reconall, ) if anat_derivatives is None: config.loggers.workflow.warning(f"""\ Attempted to access pre-existing anatomical derivatives at \ <{config.execution.anat_derivatives}>, however not all expectations of fMRIPrep \ were met (for participant <{subject_id}>, spaces <{', '.join(std_spaces)}>, \ reconall <{config.workflow.run_reconall}>).""") if not anat_derivatives and not subject_data[anat_modality]: raise Exception( f"No {anat_modality} images found for participant {subject_id}. " "All workflows require T1w images.") workflow = Workflow(name=name) workflow.__desc__ = """ Results included in this manuscript come from preprocessing performed using *fMRIPrep* {fmriprep_ver} (@fmriprep1; @fmriprep2; RRID:SCR_016216), which is based on *Nipype* {nipype_ver} (@nipype1; @nipype2; RRID:SCR_002502). """.format( fmriprep_ver=config.environment.version, nipype_ver=config.environment.nipype_version, ) workflow.__postdesc__ = """ Many internal operations of *fMRIPrep* use *Nilearn* {nilearn_ver} [@nilearn, RRID:SCR_001362], mostly within the functional processing workflow. For more details of the pipeline, see [the section corresponding to workflows in *fMRIPrep*'s documentation]\ (https://nibabies.readthedocs.io/en/latest/workflows.html \ "FMRIPrep's documentation"). ### Copyright Waiver The above boilerplate text was automatically generated by fMRIPrep with the express intention that users should copy and paste this text into their manuscripts *unchanged*. It is released under the [CC0]\ (https://creativecommons.org/publicdomain/zero/1.0/) license. ### References """.format(nilearn_ver=NILEARN_VERSION) nibabies_dir = str(config.execution.nibabies_dir) inputnode = pe.Node(niu.IdentityInterface(fields=["subjects_dir"]), name="inputnode") bidssrc = pe.Node( BIDSDataGrabber( subject_data=subject_data, anat_only=anat_only, anat_derivatives=anat_derivatives, subject_id=subject_id, ), name="bidssrc", ) bids_info = pe.Node( BIDSInfo(bids_dir=config.execution.bids_dir, bids_validate=False), name="bids_info", ) summary = pe.Node( SubjectSummary( std_spaces=spaces.get_spaces(nonstandard=False), nstd_spaces=spaces.get_spaces(standard=False), ), name="summary", run_without_submitting=True, ) about = pe.Node( AboutSummary(version=config.environment.version, command=" ".join(sys.argv)), name="about", run_without_submitting=True, ) ds_report_summary = pe.Node( DerivativesDataSink( base_directory=nibabies_dir, desc="summary", datatype="figures", dismiss_entities=("echo", ), ), name="ds_report_summary", run_without_submitting=True, ) ds_report_about = pe.Node( DerivativesDataSink( base_directory=nibabies_dir, desc="about", datatype="figures", dismiss_entities=("echo", ), ), name="ds_report_about", run_without_submitting=True, ) # Preprocessing of anatomical (includes registration to UNCInfant) anat_preproc_wf = init_infant_anat_wf( ants_affine_init=config.workflow.ants_affine_init or True, age_months=config.workflow.age_months, anat_modality=anat_modality, t1w=subject_data["t1w"], t2w=subject_data["t2w"], bids_root=config.execution.bids_dir, existing_derivatives=anat_derivatives, freesurfer=config.workflow.run_reconall, longitudinal=config.workflow.longitudinal, omp_nthreads=config.nipype.omp_nthreads, output_dir=nibabies_dir, segmentation_atlases=config.execution.segmentation_atlases_dir, skull_strip_mode=config.workflow.skull_strip_t1w, skull_strip_template=Reference.from_string( config.workflow.skull_strip_template)[0], sloppy=config.execution.sloppy, spaces=spaces, ) # fmt: off workflow.connect([ (inputnode, anat_preproc_wf, [ ('subjects_dir', 'inputnode.subjects_dir'), ]), (inputnode, summary, [ ('subjects_dir', 'subjects_dir'), ]), (bidssrc, summary, [ ('bold', 'bold'), ]), (bids_info, summary, [ ('subject', 'subject_id'), ]), (bids_info, anat_preproc_wf, [ (('subject', _prefix), 'inputnode.subject_id'), ]), ( bidssrc, anat_preproc_wf, [ ('t1w', 'inputnode.t1w'), ('t2w', 'inputnode.t2w'), # ('roi', 'inputnode.roi'), # ('flair', 'inputnode.flair'), ]), (summary, ds_report_summary, [ ('out_report', 'in_file'), ]), (about, ds_report_about, [ ('out_report', 'in_file'), ]), ]) if not anat_derivatives: workflow.connect([ (bidssrc, bids_info, [ (('t1w', fix_multi_source_name), 'in_file'), ]), (bidssrc, summary, [ ('t1w', 't1w'), ('t2w', 't2w'), ]), (bidssrc, ds_report_summary, [ (('t1w', fix_multi_source_name), 'source_file'), ]), (bidssrc, ds_report_about, [ (('t1w', fix_multi_source_name), 'source_file'), ]), ]) else: workflow.connect([ (bidssrc, bids_info, [ (('bold', fix_multi_source_name), 'in_file'), ]), (anat_preproc_wf, summary, [ ('outputnode.t1w_preproc', 't1w'), ]), (anat_preproc_wf, ds_report_summary, [ ('outputnode.t1w_preproc', 'source_file'), ]), (anat_preproc_wf, ds_report_about, [ ('outputnode.t1w_preproc', 'source_file'), ]), ]) # fmt: on # Overwrite ``out_path_base`` of smriprep's DataSinks for node in workflow.list_node_names(): if node.split(".")[-1].startswith("ds_"): workflow.get_node(node).interface.out_path_base = "" if anat_only: return workflow # Susceptibility distortion correction fmap_estimators = None if "fieldmap" not in config.workflow.ignore: from sdcflows.utils.wrangler import find_estimators from sdcflows.workflows.base import init_fmap_preproc_wf # SDC Step 1: Run basic heuristics to identify available data for fieldmap estimation # For now, no fmapless fmap_estimators = find_estimators( layout=config.execution.layout, subject=subject_id, fmapless=False, # config.workflow.use_syn, force_fmapless=False, # config.workflow.force_syn, ) # Append the functional section to the existing anatomical exerpt # That way we do not need to stream down the number of bold datasets anat_preproc_wf.__postdesc__ = ((anat_preproc_wf.__postdesc__ if hasattr( anat_preproc_wf, '__postdesc__') else "") + f""" Functional data preprocessing : For each of the {len(subject_data['bold'])} BOLD runs found per subject (across all tasks and sessions), the following preprocessing was performed. """) # calculate reference image(s) for BOLD images # group all BOLD files based on same: # 1) session # 2) PE direction # 3) total readout time from niworkflows.workflows.epi.refmap import init_epi_reference_wf _, bold_groupings = group_bolds_ref(layout=config.execution.layout, subject=subject_id) if any(not x for x in bold_groupings): print("No BOLD files found for one or more reference groupings") return workflow func_preproc_wfs = [] for idx, bold_files in enumerate(bold_groupings): bold_ref_wf = init_epi_reference_wf( auto_bold_nss=True, name=f'bold_reference_wf{idx}', omp_nthreads=config.nipype.omp_nthreads) bold_ref_wf.inputs.inputnode.in_files = bold_files for idx, bold_file in enumerate(bold_files): func_preproc_wf = init_func_preproc_wf( bold_file, has_fieldmap=bool(fmap_estimators)) # fmt: off workflow.connect([ (bold_ref_wf, func_preproc_wf, [ ('outputnode.epi_ref_file', 'inputnode.bold_ref'), (('outputnode.xfm_files', _select_iter_idx, idx), 'inputnode.bold_ref_xfm'), (('outputnode.n_dummy', _select_iter_idx, idx), 'inputnode.n_dummy_scans'), ]), ( anat_preproc_wf, func_preproc_wf, [ ('outputnode.anat_preproc', 'inputnode.anat_preproc'), ('outputnode.anat_mask', 'inputnode.anat_mask'), ('outputnode.anat_brain', 'inputnode.anat_brain'), ('outputnode.anat_dseg', 'inputnode.anat_dseg'), ('outputnode.anat_aseg', 'inputnode.anat_aseg'), ('outputnode.anat_aparc', 'inputnode.anat_aparc'), ('outputnode.anat_tpms', 'inputnode.anat_tpms'), ('outputnode.template', 'inputnode.template'), ('outputnode.anat2std_xfm', 'inputnode.anat2std_xfm'), ('outputnode.std2anat_xfm', 'inputnode.std2anat_xfm'), # Undefined if --fs-no-reconall, but this is safe ('outputnode.subjects_dir', 'inputnode.subjects_dir'), ('outputnode.subject_id', 'inputnode.subject_id'), ('outputnode.anat2fsnative_xfm', 'inputnode.anat2fsnative_xfm'), ('outputnode.fsnative2anat_xfm', 'inputnode.fsnative2anat_xfm'), ]), ]) # fmt: on func_preproc_wfs.append(func_preproc_wf) if not fmap_estimators: config.loggers.workflow.warning( "Data for fieldmap estimation not present. Please note that these data " "will not be corrected for susceptibility distortions.") return workflow config.loggers.workflow.info( f"Fieldmap estimators found: {[e.method for e in fmap_estimators]}") from sdcflows.workflows.base import init_fmap_preproc_wf from sdcflows import fieldmaps as fm fmap_wf = init_fmap_preproc_wf( debug=bool( config.execution.debug), # TODO: Add debug option for fieldmaps estimators=fmap_estimators, omp_nthreads=config.nipype.omp_nthreads, output_dir=nibabies_dir, subject=subject_id, ) fmap_wf.__desc__ = f""" Fieldmap data preprocessing : A total of {len(fmap_estimators)} fieldmaps were found available within the input BIDS structure for this particular subject. """ for func_preproc_wf in func_preproc_wfs: # fmt: off workflow.connect([ (fmap_wf, func_preproc_wf, [ ("outputnode.fmap", "inputnode.fmap"), ("outputnode.fmap_ref", "inputnode.fmap_ref"), ("outputnode.fmap_coeff", "inputnode.fmap_coeff"), ("outputnode.fmap_mask", "inputnode.fmap_mask"), ("outputnode.fmap_id", "inputnode.fmap_id"), ]), ]) # fmt: on # Overwrite ``out_path_base`` of sdcflows's DataSinks for node in fmap_wf.list_node_names(): if node.split(".")[-1].startswith("ds_"): fmap_wf.get_node(node).interface.out_path_base = "" # Step 3: Manually connect PEPOLAR for estimator in fmap_estimators: config.loggers.workflow.info(f"""\ Setting-up fieldmap "{estimator.bids_id}" ({estimator.method}) with \ <{', '.join(s.path.name for s in estimator.sources)}>""") if estimator.method in (fm.EstimatorType.MAPPED, fm.EstimatorType.PHASEDIFF): continue suffices = set(s.suffix for s in estimator.sources) if estimator.method == fm.EstimatorType.PEPOLAR and sorted( suffices) == ["epi"]: getattr(fmap_wf.inputs, f"in_{estimator.bids_id}").in_data = [ str(s.path) for s in estimator.sources ] getattr(fmap_wf.inputs, f"in_{estimator.bids_id}").metadata = [ s.metadata for s in estimator.sources ] continue if estimator.method == fm.EstimatorType.PEPOLAR: raise NotImplementedError( "Sophisticated PEPOLAR schemes (e.g., using DWI+EPI) are unsupported." ) return workflow
def init_single_subject_wf(subject_id): """ Set-up the preprocessing pipeline for a single subject. It collects and reports information about the subject, and prepares sub-workflows to perform anatomical and diffusion MRI preprocessing. Anatomical preprocessing is performed in a single workflow, regardless of the number of sessions. Diffusion MRI preprocessing is performed using a separate workflow for a full :abbr:`DWI (diffusion weighted imaging)` *entity*. A DWI *entity* may comprehend one or several runs (for instance, two opposed :abbr:`PE (phase-encoding)` directions. Workflow Graph .. workflow:: :graph2use: orig :simple_form: yes from dmriprep.config.testing import mock_config from dmriprep.workflows.base import init_single_subject_wf with mock_config(): wf = init_single_subject_wf("THP0005") Parameters ---------- subject_id : str List of subject labels Inputs ------ subjects_dir : os.pathlike FreeSurfer's ``$SUBJECTS_DIR`` """ name = f"single_subject_{subject_id}_wf" subject_data = collect_data(config.execution.layout, subject_id)[0] if "flair" in config.workflow.ignore: subject_data["flair"] = [] if "t2w" in config.workflow.ignore: subject_data["t2w"] = [] anat_only = config.workflow.anat_only # Make sure we always go through these two checks if not anat_only and not subject_data["dwi"]: raise Exception(f"No DWI data found for participant {subject_id}. " "All workflows require DWI images.") if not subject_data["t1w"]: raise Exception(f"No T1w images found for participant {subject_id}. " "All workflows require T1w images.") workflow = Workflow(name=name) workflow.__desc__ = f""" Results included in this manuscript come from preprocessing performed using *dMRIPrep* {config.environment.version} (@dmriprep; RRID:SCR_017412), which is based on *Nipype* {config.environment.nipype_version} (@nipype1; @nipype2; RRID:SCR_002502). """ workflow.__postdesc__ = """ For more details of the pipeline, see [the section corresponding to workflows in *dMRIPrep*'s documentation]\ (https://nipreps.github.io/dmriprep/master/workflows.html \ "dMRIPrep's documentation"). ### Copyright Waiver The above boilerplate text was automatically generated by dMRIPrep with the express intention that users should copy and paste this text into their manuscripts *unchanged*. It is released under the [CC0]\ (https://creativecommons.org/publicdomain/zero/1.0/) license. ### References """ spaces = config.workflow.spaces output_dir = config.execution.output_dir fsinputnode = pe.Node(niu.IdentityInterface(fields=["subjects_dir"]), name="fsinputnode") bidssrc = pe.Node(BIDSDataGrabber(subject_data=subject_data, anat_only=anat_only), name="bidssrc") bids_info = pe.Node(BIDSInfo(bids_dir=config.execution.bids_dir, bids_validate=False), name="bids_info") summary = pe.Node(SubjectSummary( std_spaces=spaces.get_spaces(nonstandard=False), nstd_spaces=spaces.get_spaces(standard=False)), name="summary", run_without_submitting=True) about = pe.Node(AboutSummary(version=config.environment.version, command=" ".join(sys.argv)), name="about", run_without_submitting=True) ds_report_summary = pe.Node(DerivativesDataSink( base_directory=str(output_dir), desc="summary", datatype="figures"), name="ds_report_summary", run_without_submitting=True) ds_report_about = pe.Node(DerivativesDataSink( base_directory=str(output_dir), desc="about", datatype="figures"), name="ds_report_about", run_without_submitting=True) anat_derivatives = config.execution.anat_derivatives if anat_derivatives: from smriprep.utils.bids import collect_derivatives std_spaces = spaces.get_spaces(nonstandard=False, dim=(3, )) anat_derivatives = collect_derivatives( anat_derivatives.absolute(), subject_id, std_spaces, config.workflow.run_reconall, ) # Preprocessing of T1w (includes registration to MNI) anat_preproc_wf = init_anat_preproc_wf( bids_root=str(config.execution.bids_dir), debug=config.execution.debug is True, existing_derivatives=anat_derivatives, freesurfer=config.workflow.run_reconall, hires=config.workflow.hires, longitudinal=config.workflow.longitudinal, omp_nthreads=config.nipype.omp_nthreads, output_dir=str(output_dir), skull_strip_fixed_seed=config.workflow.skull_strip_fixed_seed, skull_strip_mode="force", skull_strip_template=Reference.from_string( config.workflow.skull_strip_template)[0], spaces=spaces, t1w=subject_data["t1w"], ) workflow.connect([ (fsinputnode, anat_preproc_wf, [("subjects_dir", "inputnode.subjects_dir")]), (bidssrc, bids_info, [(("t1w", fix_multi_T1w_source_name), "in_file") ]), (fsinputnode, summary, [("subjects_dir", "subjects_dir")]), (bidssrc, summary, [("t1w", "t1w"), ("t2w", "t2w"), ("dwi", "dwi")]), (bids_info, summary, [("subject", "subject_id")]), (bids_info, anat_preproc_wf, [(("subject", _prefix), "inputnode.subject_id")]), (bidssrc, anat_preproc_wf, [("t1w", "inputnode.t1w"), ("t2w", "inputnode.t2w"), ("roi", "inputnode.roi"), ("flair", "inputnode.flair")]), (bidssrc, ds_report_summary, [(("t1w", fix_multi_T1w_source_name), "source_file")]), (summary, ds_report_summary, [("out_report", "in_file")]), (bidssrc, ds_report_about, [(("t1w", fix_multi_T1w_source_name), "source_file")]), (about, ds_report_about, [("out_report", "in_file")]), ]) # Overwrite ``out_path_base`` of smriprep's DataSinks for node in workflow.list_node_names(): if node.split(".")[-1].startswith("ds_"): workflow.get_node(node).interface.out_path_base = "dmriprep" if anat_only: return workflow # Append the dMRI section to the existing anatomical excerpt # That way we do not need to stream down the number of bold datasets anat_preproc_wf.__postdesc__ = (anat_preproc_wf.__postdesc__ or "") + f""" Diffusion data preprocessing : For each of the {len(subject_data["dwi"])} DWI scans found per subject (across all sessions), the gradient table was vetted and converted into the *RASb* format (i.e., given in RAS+ scanner coordinates, normalized b-vectors and scaled b-values), and a *b=0* average for reference to the subsequent steps of preprocessing was calculated. """ layout = config.execution.layout dwi_data = tuple([(dwi, layout.get_metadata(dwi), layout.get_bvec(dwi), layout.get_bval(dwi)) for dwi in subject_data["dwi"]]) inputnode = pe.Node(niu.IdentityInterface(fields=["dwi_data"]), name="inputnode") inputnode.iterables = [("dwi_data", dwi_data)] referencenode = pe.JoinNode(niu.IdentityInterface(fields=[ "dwi_file", "metadata", "dwi_reference", "dwi_mask", "gradients_rasb" ]), name="referencenode", joinsource="inputnode", run_without_submitting=True) split_info = pe.Node(niu.Function( function=_unpack, output_names=["dwi_file", "metadata", "bvec", "bval"]), name="split_info", run_without_submitting=True) early_b0ref_wf = init_early_b0ref_wf() workflow.connect([ (inputnode, split_info, [("dwi_data", "in_tuple")]), (split_info, early_b0ref_wf, [("dwi_file", "inputnode.dwi_file"), ("bvec", "inputnode.in_bvec"), ("bval", "inputnode.in_bval")]), (split_info, referencenode, [("dwi_file", "dwi_file"), ("metadata", "metadata")]), (early_b0ref_wf, referencenode, [ ("outputnode.dwi_reference", "dwi_reference"), ("outputnode.dwi_mask", "dwi_mask"), ("outputnode.gradients_rasb", "gradients_rasb"), ]), ]) fmap_estimation_wf = init_fmap_estimation_wf(subject_data["dwi"], debug=config.execution.debug) workflow.connect([ (referencenode, fmap_estimation_wf, [("dwi_reference", "inputnode.dwi_reference"), ("dwi_mask", "inputnode.dwi_mask")]), ]) return workflow