def init_single_subject_wf(subject_id): """ Organize the preprocessing pipeline for a single subject. It collects and reports information about the subject, and prepares sub-workflows to perform anatomical and functional preprocessing. Anatomical preprocessing is performed in a single workflow, regardless of the number of sessions. Functional preprocessing is performed using a separate workflow for each individual BOLD series. Workflow Graph .. workflow:: :graph2use: orig :simple_form: yes from fprodents.workflows.tests import mock_config from fprodents.workflows.base import init_single_subject_wf with mock_config(): wf = init_single_subject_wf('01') Parameters ---------- subject_id : :obj:`str` Subject label for this single-subject workflow. Inputs ------ subjects_dir : :obj:`str` FreeSurfer's ``$SUBJECTS_DIR``. """ from niworkflows.engine.workflows import LiterateWorkflow as Workflow from niworkflows.interfaces.bids import BIDSInfo from niworkflows.interfaces.nilearn import NILEARN_VERSION from niworkflows.utils.bids import collect_data from niworkflows.utils.connections import listify from niworkflows.utils.spaces import Reference from niworkflows.workflows.epi.refmap import init_epi_reference_wf from ..patch.interfaces import BIDSDataGrabber from ..patch.utils import extract_entities, fix_multi_source_name from ..patch.workflows.anatomical import init_anat_preproc_wf subject_data = collect_data( config.execution.layout, subject_id, config.execution.task_id, config.execution.echo_idx, bids_filters=config.execution.bids_filters, )[0] anat_only = config.workflow.anat_only # Make sure we always go through these two checks if not anat_only and not subject_data["bold"]: task_id = config.execution.task_id raise RuntimeError( f"No BOLD images found for participant <{subject_id}> and " f"task <{task_id or 'all'}>. All workflows require BOLD images.") workflow = Workflow(name=f"single_subject_{subject_id}_wf") workflow.__desc__ = """ Results included in this manuscript come from preprocessing performed using *fMRIPrep-rodents* {fmriprep_ver} (@fmriprep1; @fmriprep2; RRID:SCR_016216), which is based on *Nipype* {nipype_ver} (@nipype1; @nipype2; RRID:SCR_002502). """.format( fmriprep_ver=config.environment.version, nipype_ver=config.environment.nipype_version, ) workflow.__postdesc__ = """ Many internal operations of *fMRIPrep* use *Nilearn* {nilearn_ver} [@nilearn, RRID:SCR_001362], mostly within the functional processing workflow. For more details of the pipeline, see [the section corresponding to workflows in *fMRIPrep*'s documentation]\ (https://fmriprep-rodents.readthedocs.io/en/latest/workflows.html \ "FMRIPrep's documentation"). ### Copyright Waiver The above boilerplate text was automatically generated by fMRIPrep with the express intention that users should copy and paste this text into their manuscripts *unchanged*. It is released under the [CC0]\ (https://creativecommons.org/publicdomain/zero/1.0/) license. ### References """.format(nilearn_ver=NILEARN_VERSION) spaces = config.workflow.spaces output_dir = str(config.execution.output_dir) inputnode = pe.Node(niu.IdentityInterface(fields=["subjects_dir"]), name="inputnode") bidssrc = pe.Node( BIDSDataGrabber(subject_data=subject_data, anat_only=anat_only, subject_id=subject_id), name="bidssrc", ) bids_info = pe.Node( BIDSInfo(bids_dir=config.execution.bids_dir, bids_validate=False), name="bids_info", ) summary = pe.Node( SubjectSummary( std_spaces=spaces.get_spaces(nonstandard=False), nstd_spaces=spaces.get_spaces(standard=False), ), name="summary", run_without_submitting=True, ) about = pe.Node( AboutSummary(version=config.environment.version, command=" ".join(sys.argv)), name="about", run_without_submitting=True, ) ds_report_summary = pe.Node( DerivativesDataSink( base_directory=output_dir, desc="summary", datatype="figures", dismiss_entities=("echo", ), ), name="ds_report_summary", run_without_submitting=True, ) ds_report_about = pe.Node( DerivativesDataSink( base_directory=output_dir, desc="about", datatype="figures", dismiss_entities=("echo", ), ), name="ds_report_about", run_without_submitting=True, ) anat_derivatives = config.execution.anat_derivatives if anat_derivatives: from smriprep.utils.bids import collect_derivatives std_spaces = spaces.get_spaces(nonstandard=False, dim=(3, )) anat_derivatives = collect_derivatives( anat_derivatives.absolute(), subject_id, std_spaces, False, ) if anat_derivatives is None: config.loggers.workflow.warning(f"""\ Attempted to access pre-existing anatomical derivatives at \ <{config.execution.anat_derivatives}>, however not all expectations of fMRIPrep \ were met (for participant <{subject_id}>, spaces <{', '.join(std_spaces)}>.""") # Preprocessing of T1w (includes registration to MNI) anat_preproc_wf = init_anat_preproc_wf( bids_root=str(config.execution.bids_dir), debug=config.execution.debug is True, existing_derivatives=anat_derivatives, longitudinal=config.workflow.longitudinal, omp_nthreads=config.nipype.omp_nthreads, output_dir=output_dir, skull_strip_fixed_seed=config.workflow.skull_strip_fixed_seed, skull_strip_mode=config.workflow.skull_strip_t1w, skull_strip_template=Reference.from_string( config.workflow.skull_strip_template)[0], spaces=spaces, t2w=subject_data["t2w"], ) # fmt:off workflow.connect([ (bidssrc, bids_info, [(('t2w', fix_multi_source_name), 'in_file')]), (inputnode, summary, [('subjects_dir', 'subjects_dir')]), (bidssrc, summary, [('t1w', 't1w'), ('t2w', 't2w'), ('bold', 'bold')]), (bids_info, summary, [('subject', 'subject_id')]), (bidssrc, anat_preproc_wf, [('t2w', 'inputnode.t2w'), ('roi', 'inputnode.roi')]), (bidssrc, ds_report_summary, [(('t2w', fix_multi_source_name), 'source_file')]), (summary, ds_report_summary, [('out_report', 'in_file')]), (bidssrc, ds_report_about, [(('t2w', fix_multi_source_name), 'source_file')]), (about, ds_report_about, [('out_report', 'in_file')]), ]) # fmt:on # Overwrite ``out_path_base`` of smriprep's DataSinks for node in workflow.list_node_names(): if node.split(".")[-1].startswith("ds_"): workflow.get_node(node).interface.out_path_base = "fmriprep" if anat_only: return workflow # Append the functional section to the existing anatomical exerpt # That way we do not need to stream down the number of bold datasets anat_preproc_wf.__postdesc__ = ((anat_preproc_wf.__postdesc__ or "") + """ Functional data preprocessing : For each of the {num_bold} BOLD runs found per subject (across all tasks and sessions), the following preprocessing was performed. """.format(num_bold=len(subject_data["bold"]))) for bold_file in subject_data["bold"]: echoes = extract_entities(bold_file).get("echo", []) echo_idxs = listify(echoes) multiecho = len(echo_idxs) > 2 # The default N4 shrink factor (4) appears to artificially blur values across # anisotropic voxels. Shrink factors are intended to speed up calculation # but in most cases, the extra calculation time appears to be minimal. # Similarly, the use of an asymmetric bspline grid improves performance # in anisotropic voxels. The number of N4 iterations are also reduced. bold_ref_wf = init_epi_reference_wf( auto_bold_nss=True, omp_nthreads=config.nipype.omp_nthreads, n4_iter=4, adaptive_bspline_grid=True, shrink_factor=1, ) bold_ref_wf.inputs.inputnode.in_files = (bold_file if not multiecho else bold_file[0]) func_preproc_wf = init_func_preproc_wf(bold_file) # fmt:off workflow.connect([ (anat_preproc_wf, func_preproc_wf, [('outputnode.t2w_preproc', 'inputnode.anat_preproc'), ('outputnode.t2w_mask', 'inputnode.anat_mask'), ('outputnode.t2w_dseg', 'inputnode.anat_dseg'), ('outputnode.t2w_tpms', 'inputnode.anat_tpms'), ('outputnode.template', 'inputnode.template'), ('outputnode.anat2std_xfm', 'inputnode.anat2std_xfm'), ('outputnode.std2anat_xfm', 'inputnode.std2anat_xfm')]), (bold_ref_wf, func_preproc_wf, [('outputnode.epi_ref_file', 'inputnode.ref_file'), ('outputnode.xfm_files', 'inputnode.bold_ref_xfm'), ('outputnode.validation_report', 'inputnode.validation_report'), (('outputnode.n_dummy', _pop), 'inputnode.n_dummy_scans')]), ]) # fmt:on return workflow
def init_single_subject_wf(subject_id): """ Organize the preprocessing pipeline for a single subject. It collects and reports information about the subject, and prepares sub-workflows to perform anatomical and functional preprocessing. Anatomical preprocessing is performed in a single workflow, regardless of the number of sessions. Functional preprocessing is performed using a separate workflow for each individual BOLD series. Workflow Graph .. workflow:: :graph2use: orig :simple_form: yes from nibabies.workflows.tests import mock_config from nibabies.workflows.base import init_single_subject_wf with mock_config(): wf = init_single_subject_wf('01') Parameters ---------- subject_id : :obj:`str` Subject label for this single-subject workflow. Inputs ------ subjects_dir : :obj:`str` FreeSurfer's ``$SUBJECTS_DIR``. """ from niworkflows.engine.workflows import LiterateWorkflow as Workflow from niworkflows.interfaces.bids import BIDSInfo, BIDSDataGrabber from niworkflows.interfaces.nilearn import NILEARN_VERSION from niworkflows.utils.bids import collect_data from niworkflows.utils.spaces import Reference from .anatomical import init_infant_anat_wf from ..utils.misc import fix_multi_source_name name = "single_subject_%s_wf" % subject_id subject_data = collect_data( config.execution.layout, subject_id, config.execution.task_id, config.execution.echo_idx, bids_filters=config.execution.bids_filters, )[0] if "flair" in config.workflow.ignore: subject_data["flair"] = [] if "t2w" in config.workflow.ignore: subject_data["t2w"] = [] anat_only = config.workflow.anat_only anat_derivatives = config.execution.anat_derivatives anat_modality = config.workflow.anat_modality spaces = config.workflow.spaces # Make sure we always go through these two checks if not anat_only and not subject_data["bold"]: task_id = config.execution.task_id raise RuntimeError( "No BOLD images found for participant {} and task {}. " "All workflows require BOLD images.".format( subject_id, task_id if task_id else "<all>")) if anat_derivatives: from smriprep.utils.bids import collect_derivatives std_spaces = spaces.get_spaces(nonstandard=False, dim=(3, )) anat_derivatives = collect_derivatives( anat_derivatives.absolute(), subject_id, std_spaces, config.workflow.run_reconall, ) if anat_derivatives is None: config.loggers.workflow.warning(f"""\ Attempted to access pre-existing anatomical derivatives at \ <{config.execution.anat_derivatives}>, however not all expectations of fMRIPrep \ were met (for participant <{subject_id}>, spaces <{', '.join(std_spaces)}>, \ reconall <{config.workflow.run_reconall}>).""") if not anat_derivatives and not subject_data[anat_modality]: raise Exception( f"No {anat_modality} images found for participant {subject_id}. " "All workflows require T1w images.") workflow = Workflow(name=name) workflow.__desc__ = """ Results included in this manuscript come from preprocessing performed using *fMRIPrep* {fmriprep_ver} (@fmriprep1; @fmriprep2; RRID:SCR_016216), which is based on *Nipype* {nipype_ver} (@nipype1; @nipype2; RRID:SCR_002502). """.format( fmriprep_ver=config.environment.version, nipype_ver=config.environment.nipype_version, ) workflow.__postdesc__ = """ Many internal operations of *fMRIPrep* use *Nilearn* {nilearn_ver} [@nilearn, RRID:SCR_001362], mostly within the functional processing workflow. For more details of the pipeline, see [the section corresponding to workflows in *fMRIPrep*'s documentation]\ (https://nibabies.readthedocs.io/en/latest/workflows.html \ "FMRIPrep's documentation"). ### Copyright Waiver The above boilerplate text was automatically generated by fMRIPrep with the express intention that users should copy and paste this text into their manuscripts *unchanged*. It is released under the [CC0]\ (https://creativecommons.org/publicdomain/zero/1.0/) license. ### References """.format(nilearn_ver=NILEARN_VERSION) fmriprep_dir = str(config.execution.fmriprep_dir) inputnode = pe.Node(niu.IdentityInterface(fields=["subjects_dir"]), name="inputnode") bidssrc = pe.Node( BIDSDataGrabber( subject_data=subject_data, anat_only=anat_only, anat_derivatives=anat_derivatives, subject_id=subject_id, ), name="bidssrc", ) bids_info = pe.Node( BIDSInfo(bids_dir=config.execution.bids_dir, bids_validate=False), name="bids_info", ) summary = pe.Node( SubjectSummary( std_spaces=spaces.get_spaces(nonstandard=False), nstd_spaces=spaces.get_spaces(standard=False), ), name="summary", run_without_submitting=True, ) about = pe.Node( AboutSummary(version=config.environment.version, command=" ".join(sys.argv)), name="about", run_without_submitting=True, ) ds_report_summary = pe.Node( DerivativesDataSink( base_directory=fmriprep_dir, desc="summary", datatype="figures", dismiss_entities=("echo", ), ), name="ds_report_summary", run_without_submitting=True, ) ds_report_about = pe.Node( DerivativesDataSink( base_directory=fmriprep_dir, desc="about", datatype="figures", dismiss_entities=("echo", ), ), name="ds_report_about", run_without_submitting=True, ) # Preprocessing of anatomical (includes registration to UNCInfant) anat_preproc_wf = init_infant_anat_wf( ants_affine_init=config.workflow.ants_affine_init or True, age_months=config.workflow.age_months, anat_modality=anat_modality, t1w=subject_data['t1w'], t2w=subject_data['t2w'], bids_root=config.execution.bids_dir, existing_derivatives=anat_derivatives, freesurfer=config.workflow.run_reconall, longitudinal=config.workflow.longitudinal, omp_nthreads=config.nipype.omp_nthreads, output_dir=fmriprep_dir, segmentation_atlases=config.execution.segmentation_atlases_dir, skull_strip_mode=config.workflow.skull_strip_t1w, skull_strip_template=Reference.from_string( config.workflow.skull_strip_template)[0], sloppy=config.execution.sloppy, spaces=spaces, ) # fmt: off workflow.connect([ (inputnode, anat_preproc_wf, [ ('subjects_dir', 'inputnode.subjects_dir'), ]), (inputnode, summary, [ ('subjects_dir', 'subjects_dir'), ]), (bidssrc, summary, [ ('bold', 'bold'), ]), (bids_info, summary, [ ('subject', 'subject_id'), ]), (bids_info, anat_preproc_wf, [ (('subject', _prefix), 'inputnode.subject_id'), ]), ( bidssrc, anat_preproc_wf, [ ('t1w', 'inputnode.t1w'), ('t2w', 'inputnode.t2w'), # ('roi', 'inputnode.roi'), # ('flair', 'inputnode.flair'), ]), (summary, ds_report_summary, [ ('out_report', 'in_file'), ]), (about, ds_report_about, [ ('out_report', 'in_file'), ]), ]) if not anat_derivatives: workflow.connect([ (bidssrc, bids_info, [ (('t1w', fix_multi_source_name), 'in_file'), ]), (bidssrc, summary, [ ('t1w', 't1w'), ('t2w', 't2w'), ]), (bidssrc, ds_report_summary, [ (('t1w', fix_multi_source_name), 'source_file'), ]), (bidssrc, ds_report_about, [ (('t1w', fix_multi_source_name), 'source_file'), ]), ]) else: workflow.connect([ (bidssrc, bids_info, [ (('bold', fix_multi_source_name), 'in_file'), ]), (anat_preproc_wf, summary, [ ('outputnode.t1w_preproc', 't1w'), ]), (anat_preproc_wf, ds_report_summary, [ ('outputnode.t1w_preproc', 'source_file'), ]), (anat_preproc_wf, ds_report_about, [ ('outputnode.t1w_preproc', 'source_file'), ]), ]) # fmt: on # Overwrite ``out_path_base`` of smriprep's DataSinks for node in workflow.list_node_names(): if node.split(".")[-1].startswith("ds_"): workflow.get_node(node).interface.out_path_base = "" if anat_only: return workflow raise NotImplementedError("BOLD processing is not yet implemented.") # Append the functional section to the existing anatomical exerpt # That way we do not need to stream down the number of bold datasets anat_preproc_wf.__postdesc__ = ((anat_preproc_wf.__postdesc__ or "") + f""" Functional data preprocessing : For each of the {len(subject_data['bold'])} BOLD runs found per subject (across all tasks and sessions), the following preprocessing was performed. """) for bold_file in subject_data["bold"]: func_preproc_wf = init_func_preproc_wf(bold_file) # fmt: off workflow.connect([ ( anat_preproc_wf, func_preproc_wf, [ ('outputnode.anat_preproc', 'inputnode.anat_preproc'), ('outputnode.anat_mask', 'inputnode.anat_mask'), ('outputnode.anat_dseg', 'inputnode.anat_dseg'), ('outputnode.anat_aseg', 'inputnode.anat_aseg'), ('outputnode.anat_aparc', 'inputnode.anat_aparc'), ('outputnode.anat_tpms', 'inputnode.anat_tpms'), ('outputnode.template', 'inputnode.template'), ('outputnode.anat2std_xfm', 'inputnode.anat2std_xfm'), ('outputnode.std2anat_xfm', 'inputnode.std2anat_xfm'), # Undefined if --fs-no-reconall, but this is safe ('outputnode.subjects_dir', 'inputnode.subjects_dir'), ('outputnode.subject_id', 'inputnode.subject_id'), ('outputnode.anat2fsnative_xfm', 'inputnode.t1w2fsnative_xfm'), ('outputnode.fsnative2anat_xfm', 'inputnode.fsnative2t1w_xfm'), ]), ]) # fmt: on return workflow
def init_single_subject_wf(subject_id): """ Set-up the preprocessing pipeline for a single subject. It collects and reports information about the subject, and prepares sub-workflows to perform anatomical and diffusion MRI preprocessing. Anatomical preprocessing is performed in a single workflow, regardless of the number of sessions. Diffusion MRI preprocessing is performed using a separate workflow for a full :abbr:`DWI (diffusion weighted imaging)` *entity*. A DWI *entity* may comprehend one or several runs (for instance, two opposed :abbr:`PE (phase-encoding)` directions. Workflow Graph .. workflow:: :graph2use: orig :simple_form: yes from dmriprep.config.testing import mock_config from dmriprep.workflows.base import init_single_subject_wf with mock_config(): wf = init_single_subject_wf("THP0005") Parameters ---------- subject_id : str List of subject labels Inputs ------ subjects_dir : os.pathlike FreeSurfer's ``$SUBJECTS_DIR`` """ from ..utils.misc import sub_prefix as _prefix name = f"single_subject_{subject_id}_wf" subject_data = collect_data(config.execution.layout, subject_id)[0] if "flair" in config.workflow.ignore: subject_data["flair"] = [] if "t2w" in config.workflow.ignore: subject_data["t2w"] = [] anat_only = config.workflow.anat_only # Make sure we always go through these two checks if not anat_only and not subject_data["dwi"]: raise Exception(f"No DWI data found for participant {subject_id}. " "All workflows require DWI images.") if not subject_data["t1w"]: raise Exception(f"No T1w images found for participant {subject_id}. " "All workflows require T1w images.") workflow = Workflow(name=name) workflow.__desc__ = f""" Results included in this manuscript come from preprocessing performed using *dMRIPrep* {config.environment.version} (@dmriprep; RRID:SCR_017412), which is based on *Nipype* {config.environment.nipype_version} (@nipype1; @nipype2; RRID:SCR_002502). """ workflow.__postdesc__ = """ For more details of the pipeline, see [the section corresponding to workflows in *dMRIPrep*'s documentation]\ (https://nipreps.github.io/dmriprep/master/workflows.html \ "dMRIPrep's documentation"). ### Copyright Waiver The above boilerplate text was automatically generated by dMRIPrep with the express intention that users should copy and paste this text into their manuscripts *unchanged*. It is released under the [CC0]\ (https://creativecommons.org/publicdomain/zero/1.0/) license. ### References """ spaces = config.workflow.spaces output_dir = config.execution.output_dir fsinputnode = pe.Node(niu.IdentityInterface(fields=["subjects_dir"]), name="fsinputnode") bidssrc = pe.Node(BIDSDataGrabber(subject_data=subject_data, anat_only=anat_only), name="bidssrc") bids_info = pe.Node( BIDSInfo(bids_dir=config.execution.bids_dir, bids_validate=False), name="bids_info", ) summary = pe.Node( SubjectSummary( std_spaces=spaces.get_spaces(nonstandard=False), nstd_spaces=spaces.get_spaces(standard=False), ), name="summary", run_without_submitting=True, ) about = pe.Node( AboutSummary(version=config.environment.version, command=" ".join(sys.argv)), name="about", run_without_submitting=True, ) ds_report_summary = pe.Node( DerivativesDataSink(base_directory=str(output_dir), desc="summary", datatype="figures"), name="ds_report_summary", run_without_submitting=True, ) ds_report_about = pe.Node( DerivativesDataSink(base_directory=str(output_dir), desc="about", datatype="figures"), name="ds_report_about", run_without_submitting=True, ) anat_derivatives = config.execution.anat_derivatives if anat_derivatives: from smriprep.utils.bids import collect_derivatives std_spaces = spaces.get_spaces(nonstandard=False, dim=(3, )) anat_derivatives = collect_derivatives( anat_derivatives.absolute(), subject_id, std_spaces, config.workflow.run_reconall, ) # Preprocessing of T1w (includes registration to MNI) anat_preproc_wf = init_anat_preproc_wf( bids_root=str(config.execution.bids_dir), debug=config.execution.debug is True, existing_derivatives=anat_derivatives, freesurfer=config.workflow.run_reconall, hires=config.workflow.hires, longitudinal=config.workflow.longitudinal, omp_nthreads=config.nipype.omp_nthreads, output_dir=str(output_dir), skull_strip_fixed_seed=config.workflow.skull_strip_fixed_seed, skull_strip_mode="force", skull_strip_template=Reference.from_string( config.workflow.skull_strip_template)[0], spaces=spaces, t1w=subject_data["t1w"], ) anat_preproc_wf.__desc__ = f"\n\n{anat_preproc_wf.__desc__}" # fmt:off workflow.connect([ (fsinputnode, anat_preproc_wf, [("subjects_dir", "inputnode.subjects_dir")]), (bidssrc, bids_info, [(("t1w", fix_multi_T1w_source_name), "in_file") ]), (fsinputnode, summary, [("subjects_dir", "subjects_dir")]), (bidssrc, summary, [("t1w", "t1w"), ("t2w", "t2w"), ("dwi", "dwi")]), (bids_info, summary, [("subject", "subject_id")]), (bids_info, anat_preproc_wf, [(("subject", _prefix), "inputnode.subject_id")]), (bidssrc, anat_preproc_wf, [ ("t1w", "inputnode.t1w"), ("t2w", "inputnode.t2w"), ("roi", "inputnode.roi"), ("flair", "inputnode.flair"), ]), (bidssrc, ds_report_summary, [ (("t1w", fix_multi_T1w_source_name), "source_file"), ]), (summary, ds_report_summary, [("out_report", "in_file")]), (bidssrc, ds_report_about, [(("t1w", fix_multi_T1w_source_name), "source_file")]), (about, ds_report_about, [("out_report", "in_file")]), ]) # fmt:off # Overwrite ``out_path_base`` of smriprep's DataSinks for node in workflow.list_node_names(): if node.split(".")[-1].startswith("ds_"): workflow.get_node(node).interface.out_path_base = "dmriprep" if anat_only: return workflow from .dwi.base import init_dwi_preproc_wf # Append the dMRI section to the existing anatomical excerpt # That way we do not need to stream down the number of DWI datasets anat_preproc_wf.__postdesc__ = f"""\ {anat_preproc_wf.__postdesc__ or ''} Diffusion data preprocessing : For each of the {len(subject_data["dwi"])} DWI scans found per subject (across all sessions), the gradient table was vetted and converted into the *RASb* format (i.e., given in RAS+ scanner coordinates, normalized b-vectors and scaled b-values), and a *b=0* average for reference to the subsequent steps of preprocessing was calculated. """ # SDC Step 0: Determine whether fieldmaps can/should be estimated fmap_estimators = None if "fieldmap" not in config.workflow.ignore: from sdcflows import fieldmaps as fm from sdcflows.utils.wrangler import find_estimators from sdcflows.workflows.base import init_fmap_preproc_wf # SDC Step 1: Run basic heuristics to identify available data for fieldmap estimation fmap_estimators = find_estimators( layout=config.execution.layout, subject=subject_id, fmapless=False, ) # Add fieldmap-less estimators if not fmap_estimators and config.workflow.use_syn: # estimators = [fm.FieldmapEstimation()] raise NotImplementedError # Nuts and bolts: initialize individual run's pipeline dwi_preproc_list = [] for dwi_file in subject_data["dwi"]: dwi_preproc_wf = init_dwi_preproc_wf( dwi_file, has_fieldmap=bool(fmap_estimators), ) # fmt: off workflow.connect([ ( anat_preproc_wf, dwi_preproc_wf, [ ("outputnode.t1w_preproc", "inputnode.t1w_preproc"), ("outputnode.t1w_mask", "inputnode.t1w_mask"), ("outputnode.t1w_dseg", "inputnode.t1w_dseg"), ("outputnode.t1w_aseg", "inputnode.t1w_aseg"), ("outputnode.t1w_aparc", "inputnode.t1w_aparc"), ("outputnode.t1w_tpms", "inputnode.t1w_tpms"), ("outputnode.template", "inputnode.template"), ("outputnode.anat2std_xfm", "inputnode.anat2std_xfm"), ("outputnode.std2anat_xfm", "inputnode.std2anat_xfm"), # Undefined if --fs-no-reconall, but this is safe ("outputnode.subjects_dir", "inputnode.subjects_dir"), ("outputnode.t1w2fsnative_xfm", "inputnode.t1w2fsnative_xfm"), ("outputnode.fsnative2t1w_xfm", "inputnode.fsnative2t1w_xfm"), ]), (bids_info, dwi_preproc_wf, [("subject", "inputnode.subject_id")]), ]) # fmt: on # Keep a handle to each workflow dwi_preproc_list.append(dwi_preproc_wf) if not fmap_estimators: return workflow # SDC Step 2: Manually add further estimators (e.g., fieldmap-less) fmap_wf = init_fmap_preproc_wf( debug=config.execution.debug, estimators=fmap_estimators, omp_nthreads=config.nipype.omp_nthreads, output_dir=str(output_dir), subject=subject_id, ) fmap_wf.__desc__ = f""" *B<sub>0</sub>* fieldmap data preprocessing : A total of {len(fmap_estimators)} fieldmaps were found available within the input BIDS structure for this particular subject. """ # TODO: Requires nipreps/sdcflows#147 for dwi_preproc_wf in dwi_preproc_list: # fmt: off workflow.connect([ (fmap_wf, dwi_preproc_wf, [ ("outputnode.fmap", "inputnode.fmap"), ("outputnode.fmap_ref", "inputnode.fmap_ref"), ("outputnode.fmap_coeff", "inputnode.fmap_coeff"), ("outputnode.fmap_mask", "inputnode.fmap_mask"), ("outputnode.fmap_id", "inputnode.fmap_id"), ]), ]) # fmt: on # Overwrite ``out_path_base`` of sdcflows's DataSinks for node in fmap_wf.list_node_names(): if node.split(".")[-1].startswith("ds_"): fmap_wf.get_node(node).interface.out_path_base = "dmriprep" # Step 3: Manually connect PEPOLAR for estimator in fmap_estimators: if estimator.method != fm.EstimatorType.PEPOLAR: continue suffices = set(s.suffix for s in estimator.sources) if sorted(suffices) == ["epi"]: getattr(fmap_wf.inputs, f"in_{estimator.bids_id}").in_data = [ str(s.path) for s in estimator.sources ] getattr(fmap_wf.inputs, f"in_{estimator.bids_id}").metadata = [ s.metadata for s in estimator.sources ] else: raise NotImplementedError # from niworkflows.interfaces.utility import KeySelect # est_id = estimator.bids_id # estim_select = pe.MapNode( # KeySelect(fields=["metadata", "dwi_reference", "dwi_mask", "gradients_rasb",]), # name=f"fmap_select_{est_id}", # run_without_submitting=True, # iterfields=["key"] # ) # estim_select.inputs.key = [ # str(s.path) for s in estimator.sources if s.suffix in ("epi", "dwi", "sbref") # ] # # fmt:off # workflow.connect([ # (referencenode, estim_select, [("dwi_file", "keys"), # ("metadata", "metadata"), # ("dwi_reference", "dwi_reference"), # ("gradients_rasb", "gradients_rasb")]), # ]) # # fmt:on return workflow
def init_single_subject_wf(subject_id): """ Organize the preprocessing pipeline for a single subject. It collects and reports information about the subject, and prepares sub-workflows to perform anatomical and functional preprocessing. Anatomical preprocessing is performed in a single workflow, regardless of the number of sessions. Functional preprocessing is performed using a separate workflow for each individual BOLD series. Workflow Graph .. workflow:: :graph2use: orig :simple_form: yes from fmriprep.workflows.tests import mock_config from fmriprep.workflows.base import init_single_subject_wf with mock_config(): wf = init_single_subject_wf('01') Parameters ---------- subject_id : :obj:`str` Subject label for this single-subject workflow. Inputs ------ subjects_dir : :obj:`str` FreeSurfer's ``$SUBJECTS_DIR``. """ from niworkflows.engine.workflows import LiterateWorkflow as Workflow from niworkflows.interfaces.bids import BIDSInfo, BIDSDataGrabber from niworkflows.interfaces.nilearn import NILEARN_VERSION from niworkflows.utils.bids import collect_data from niworkflows.utils.misc import fix_multi_T1w_source_name from niworkflows.utils.spaces import Reference from smriprep.workflows.anatomical import init_anat_preproc_wf name = "single_subject_%s_wf" % subject_id subject_data = collect_data(config.execution.layout, subject_id, config.execution.task_id, config.execution.echo_idx, bids_filters=config.execution.bids_filters)[0] if 'flair' in config.workflow.ignore: subject_data['flair'] = [] if 't2w' in config.workflow.ignore: subject_data['t2w'] = [] anat_only = config.workflow.anat_only # Make sure we always go through these two checks if not anat_only and not subject_data['bold']: task_id = config.execution.task_id raise RuntimeError( "No BOLD images found for participant {} and task {}. " "All workflows require BOLD images.".format( subject_id, task_id if task_id else '<all>')) if not subject_data['t1w']: raise Exception("No T1w images found for participant {}. " "All workflows require T1w images.".format(subject_id)) workflow = Workflow(name=name) workflow.__desc__ = """ Results included in this manuscript come from preprocessing performed using *fMRIPrep* {fmriprep_ver} (@fmriprep1; @fmriprep2; RRID:SCR_016216), which is based on *Nipype* {nipype_ver} (@nipype1; @nipype2; RRID:SCR_002502). """.format(fmriprep_ver=config.environment.version, nipype_ver=config.environment.nipype_version) workflow.__postdesc__ = """ Many internal operations of *fMRIPrep* use *Nilearn* {nilearn_ver} [@nilearn, RRID:SCR_001362], mostly within the functional processing workflow. For more details of the pipeline, see [the section corresponding to workflows in *fMRIPrep*'s documentation]\ (https://fmriprep.readthedocs.io/en/latest/workflows.html \ "FMRIPrep's documentation"). ### Copyright Waiver The above boilerplate text was automatically generated by fMRIPrep with the express intention that users should copy and paste this text into their manuscripts *unchanged*. It is released under the [CC0]\ (https://creativecommons.org/publicdomain/zero/1.0/) license. ### References """.format(nilearn_ver=NILEARN_VERSION) spaces = config.workflow.spaces output_dir = str(config.execution.output_dir) inputnode = pe.Node(niu.IdentityInterface(fields=['subjects_dir']), name='inputnode') bidssrc = pe.Node(BIDSDataGrabber(subject_data=subject_data, anat_only=anat_only, subject_id=subject_id), name='bidssrc') bids_info = pe.Node(BIDSInfo(bids_dir=config.execution.bids_dir, bids_validate=False), name='bids_info') summary = pe.Node(SubjectSummary( std_spaces=spaces.get_spaces(nonstandard=False), nstd_spaces=spaces.get_spaces(standard=False)), name='summary', run_without_submitting=True) about = pe.Node(AboutSummary(version=config.environment.version, command=' '.join(sys.argv)), name='about', run_without_submitting=True) ds_report_summary = pe.Node(DerivativesDataSink(base_directory=output_dir, desc='summary', datatype="figures"), name='ds_report_summary', run_without_submitting=True) ds_report_about = pe.Node(DerivativesDataSink(base_directory=output_dir, desc='about', datatype="figures"), name='ds_report_about', run_without_submitting=True) anat_derivatives = config.execution.anat_derivatives if anat_derivatives: from smriprep.utils.bids import collect_derivatives std_spaces = spaces.get_spaces(nonstandard=False, dim=(3, )) anat_derivatives = collect_derivatives( anat_derivatives.absolute(), subject_id, std_spaces, config.workflow.run_reconall, ) # Preprocessing of T1w (includes registration to MNI) anat_preproc_wf = init_anat_preproc_wf( bids_root=str(config.execution.bids_dir), debug=config.execution.debug is True, existing_derivatives=anat_derivatives, freesurfer=config.workflow.run_reconall, hires=config.workflow.hires, longitudinal=config.workflow.longitudinal, omp_nthreads=config.nipype.omp_nthreads, output_dir=output_dir, skull_strip_fixed_seed=config.workflow.skull_strip_fixed_seed, skull_strip_mode=config.workflow.skull_strip_t1w, skull_strip_template=Reference.from_string( config.workflow.skull_strip_template)[0], spaces=spaces, t1w=subject_data['t1w'], ) workflow.connect([ (inputnode, anat_preproc_wf, [('subjects_dir', 'inputnode.subjects_dir')]), (bidssrc, bids_info, [(('t1w', fix_multi_T1w_source_name), 'in_file') ]), (inputnode, summary, [('subjects_dir', 'subjects_dir')]), (bidssrc, summary, [('t1w', 't1w'), ('t2w', 't2w'), ('bold', 'bold')]), (bids_info, summary, [('subject', 'subject_id')]), (bids_info, anat_preproc_wf, [(('subject', _prefix), 'inputnode.subject_id')]), (bidssrc, anat_preproc_wf, [('t1w', 'inputnode.t1w'), ('t2w', 'inputnode.t2w'), ('roi', 'inputnode.roi'), ('flair', 'inputnode.flair')]), (bidssrc, ds_report_summary, [(('t1w', fix_multi_T1w_source_name), 'source_file')]), (summary, ds_report_summary, [('out_report', 'in_file')]), (bidssrc, ds_report_about, [(('t1w', fix_multi_T1w_source_name), 'source_file')]), (about, ds_report_about, [('out_report', 'in_file')]), ]) # Overwrite ``out_path_base`` of smriprep's DataSinks for node in workflow.list_node_names(): if node.split('.')[-1].startswith('ds_'): workflow.get_node(node).interface.out_path_base = 'fmriprep' if anat_only: return workflow # Append the functional section to the existing anatomical exerpt # That way we do not need to stream down the number of bold datasets anat_preproc_wf.__postdesc__ = (anat_preproc_wf.__postdesc__ or '') + """ Functional data preprocessing : For each of the {num_bold} BOLD runs found per subject (across all tasks and sessions), the following preprocessing was performed. """.format(num_bold=len(subject_data['bold'])) for bold_file in subject_data['bold']: func_preproc_wf = init_func_preproc_wf(bold_file) workflow.connect([ ( anat_preproc_wf, func_preproc_wf, [ ('outputnode.t1w_preproc', 'inputnode.t1w_preproc'), ('outputnode.t1w_mask', 'inputnode.t1w_mask'), ('outputnode.t1w_dseg', 'inputnode.t1w_dseg'), ('outputnode.t1w_aseg', 'inputnode.t1w_aseg'), ('outputnode.t1w_aparc', 'inputnode.t1w_aparc'), ('outputnode.t1w_tpms', 'inputnode.t1w_tpms'), ('outputnode.template', 'inputnode.template'), ('outputnode.anat2std_xfm', 'inputnode.anat2std_xfm'), ('outputnode.std2anat_xfm', 'inputnode.std2anat_xfm'), # Undefined if --fs-no-reconall, but this is safe ('outputnode.subjects_dir', 'inputnode.subjects_dir'), ('outputnode.subject_id', 'inputnode.subject_id'), ('outputnode.t1w2fsnative_xfm', 'inputnode.t1w2fsnative_xfm'), ('outputnode.fsnative2t1w_xfm', 'inputnode.fsnative2t1w_xfm') ]), ]) return workflow
def init_single_subject_wf(subject_id): """ Organize the preprocessing pipeline for a single subject. It collects and reports information about the subject, and prepares sub-workflows to perform anatomical and functional preprocessing. Anatomical preprocessing is performed in a single workflow, regardless of the number of sessions. Functional preprocessing is performed using a separate workflow for each individual BOLD series. Workflow Graph .. workflow:: :graph2use: orig :simple_form: yes from nibabies.workflows.tests import mock_config from nibabies.workflows.base import init_single_subject_wf with mock_config(): wf = init_single_subject_wf('01') Parameters ---------- subject_id : :obj:`str` Subject label for this single-subject workflow. Inputs ------ subjects_dir : :obj:`str` FreeSurfer's ``$SUBJECTS_DIR``. """ from niworkflows.engine.workflows import LiterateWorkflow as Workflow from niworkflows.interfaces.bids import BIDSInfo, BIDSDataGrabber from niworkflows.interfaces.nilearn import NILEARN_VERSION from niworkflows.utils.bids import collect_data from niworkflows.utils.spaces import Reference from .anatomical import init_infant_anat_wf from ..utils.misc import fix_multi_source_name name = "single_subject_%s_wf" % subject_id subject_data = collect_data( config.execution.layout, subject_id, config.execution.task_id, config.execution.echo_idx, bids_filters=config.execution.bids_filters, )[0] if "flair" in config.workflow.ignore: subject_data["flair"] = [] if "t2w" in config.workflow.ignore: subject_data["t2w"] = [] anat_only = config.workflow.anat_only anat_derivatives = config.execution.anat_derivatives anat_modality = config.workflow.anat_modality spaces = config.workflow.spaces # Make sure we always go through these two checks if not anat_only and not subject_data["bold"]: task_id = config.execution.task_id raise RuntimeError( "No BOLD images found for participant {} and task {}. " "All workflows require BOLD images.".format( subject_id, task_id if task_id else "<all>")) if anat_derivatives: from smriprep.utils.bids import collect_derivatives std_spaces = spaces.get_spaces(nonstandard=False, dim=(3, )) anat_derivatives = collect_derivatives( anat_derivatives.absolute(), subject_id, std_spaces, config.workflow.run_reconall, ) if anat_derivatives is None: config.loggers.workflow.warning(f"""\ Attempted to access pre-existing anatomical derivatives at \ <{config.execution.anat_derivatives}>, however not all expectations of fMRIPrep \ were met (for participant <{subject_id}>, spaces <{', '.join(std_spaces)}>, \ reconall <{config.workflow.run_reconall}>).""") if not anat_derivatives and not subject_data[anat_modality]: raise Exception( f"No {anat_modality} images found for participant {subject_id}. " "All workflows require T1w images.") workflow = Workflow(name=name) workflow.__desc__ = """ Results included in this manuscript come from preprocessing performed using *fMRIPrep* {fmriprep_ver} (@fmriprep1; @fmriprep2; RRID:SCR_016216), which is based on *Nipype* {nipype_ver} (@nipype1; @nipype2; RRID:SCR_002502). """.format( fmriprep_ver=config.environment.version, nipype_ver=config.environment.nipype_version, ) workflow.__postdesc__ = """ Many internal operations of *fMRIPrep* use *Nilearn* {nilearn_ver} [@nilearn, RRID:SCR_001362], mostly within the functional processing workflow. For more details of the pipeline, see [the section corresponding to workflows in *fMRIPrep*'s documentation]\ (https://nibabies.readthedocs.io/en/latest/workflows.html \ "FMRIPrep's documentation"). ### Copyright Waiver The above boilerplate text was automatically generated by fMRIPrep with the express intention that users should copy and paste this text into their manuscripts *unchanged*. It is released under the [CC0]\ (https://creativecommons.org/publicdomain/zero/1.0/) license. ### References """.format(nilearn_ver=NILEARN_VERSION) nibabies_dir = str(config.execution.nibabies_dir) inputnode = pe.Node(niu.IdentityInterface(fields=["subjects_dir"]), name="inputnode") bidssrc = pe.Node( BIDSDataGrabber( subject_data=subject_data, anat_only=anat_only, anat_derivatives=anat_derivatives, subject_id=subject_id, ), name="bidssrc", ) bids_info = pe.Node( BIDSInfo(bids_dir=config.execution.bids_dir, bids_validate=False), name="bids_info", ) summary = pe.Node( SubjectSummary( std_spaces=spaces.get_spaces(nonstandard=False), nstd_spaces=spaces.get_spaces(standard=False), ), name="summary", run_without_submitting=True, ) about = pe.Node( AboutSummary(version=config.environment.version, command=" ".join(sys.argv)), name="about", run_without_submitting=True, ) ds_report_summary = pe.Node( DerivativesDataSink( base_directory=nibabies_dir, desc="summary", datatype="figures", dismiss_entities=("echo", ), ), name="ds_report_summary", run_without_submitting=True, ) ds_report_about = pe.Node( DerivativesDataSink( base_directory=nibabies_dir, desc="about", datatype="figures", dismiss_entities=("echo", ), ), name="ds_report_about", run_without_submitting=True, ) # Preprocessing of anatomical (includes registration to UNCInfant) anat_preproc_wf = init_infant_anat_wf( ants_affine_init=config.workflow.ants_affine_init or True, age_months=config.workflow.age_months, anat_modality=anat_modality, t1w=subject_data["t1w"], t2w=subject_data["t2w"], bids_root=config.execution.bids_dir, existing_derivatives=anat_derivatives, freesurfer=config.workflow.run_reconall, longitudinal=config.workflow.longitudinal, omp_nthreads=config.nipype.omp_nthreads, output_dir=nibabies_dir, segmentation_atlases=config.execution.segmentation_atlases_dir, skull_strip_mode=config.workflow.skull_strip_t1w, skull_strip_template=Reference.from_string( config.workflow.skull_strip_template)[0], sloppy=config.execution.sloppy, spaces=spaces, ) # fmt: off workflow.connect([ (inputnode, anat_preproc_wf, [ ('subjects_dir', 'inputnode.subjects_dir'), ]), (inputnode, summary, [ ('subjects_dir', 'subjects_dir'), ]), (bidssrc, summary, [ ('bold', 'bold'), ]), (bids_info, summary, [ ('subject', 'subject_id'), ]), (bids_info, anat_preproc_wf, [ (('subject', _prefix), 'inputnode.subject_id'), ]), ( bidssrc, anat_preproc_wf, [ ('t1w', 'inputnode.t1w'), ('t2w', 'inputnode.t2w'), # ('roi', 'inputnode.roi'), # ('flair', 'inputnode.flair'), ]), (summary, ds_report_summary, [ ('out_report', 'in_file'), ]), (about, ds_report_about, [ ('out_report', 'in_file'), ]), ]) if not anat_derivatives: workflow.connect([ (bidssrc, bids_info, [ (('t1w', fix_multi_source_name), 'in_file'), ]), (bidssrc, summary, [ ('t1w', 't1w'), ('t2w', 't2w'), ]), (bidssrc, ds_report_summary, [ (('t1w', fix_multi_source_name), 'source_file'), ]), (bidssrc, ds_report_about, [ (('t1w', fix_multi_source_name), 'source_file'), ]), ]) else: workflow.connect([ (bidssrc, bids_info, [ (('bold', fix_multi_source_name), 'in_file'), ]), (anat_preproc_wf, summary, [ ('outputnode.t1w_preproc', 't1w'), ]), (anat_preproc_wf, ds_report_summary, [ ('outputnode.t1w_preproc', 'source_file'), ]), (anat_preproc_wf, ds_report_about, [ ('outputnode.t1w_preproc', 'source_file'), ]), ]) # fmt: on # Overwrite ``out_path_base`` of smriprep's DataSinks for node in workflow.list_node_names(): if node.split(".")[-1].startswith("ds_"): workflow.get_node(node).interface.out_path_base = "" if anat_only: return workflow # Susceptibility distortion correction fmap_estimators = None if "fieldmap" not in config.workflow.ignore: from sdcflows.utils.wrangler import find_estimators from sdcflows.workflows.base import init_fmap_preproc_wf # SDC Step 1: Run basic heuristics to identify available data for fieldmap estimation # For now, no fmapless fmap_estimators = find_estimators( layout=config.execution.layout, subject=subject_id, fmapless=False, # config.workflow.use_syn, force_fmapless=False, # config.workflow.force_syn, ) # Append the functional section to the existing anatomical exerpt # That way we do not need to stream down the number of bold datasets anat_preproc_wf.__postdesc__ = ((anat_preproc_wf.__postdesc__ if hasattr( anat_preproc_wf, '__postdesc__') else "") + f""" Functional data preprocessing : For each of the {len(subject_data['bold'])} BOLD runs found per subject (across all tasks and sessions), the following preprocessing was performed. """) # calculate reference image(s) for BOLD images # group all BOLD files based on same: # 1) session # 2) PE direction # 3) total readout time from niworkflows.workflows.epi.refmap import init_epi_reference_wf _, bold_groupings = group_bolds_ref(layout=config.execution.layout, subject=subject_id) if any(not x for x in bold_groupings): print("No BOLD files found for one or more reference groupings") return workflow func_preproc_wfs = [] for idx, bold_files in enumerate(bold_groupings): bold_ref_wf = init_epi_reference_wf( auto_bold_nss=True, name=f'bold_reference_wf{idx}', omp_nthreads=config.nipype.omp_nthreads) bold_ref_wf.inputs.inputnode.in_files = bold_files for idx, bold_file in enumerate(bold_files): func_preproc_wf = init_func_preproc_wf( bold_file, has_fieldmap=bool(fmap_estimators)) # fmt: off workflow.connect([ (bold_ref_wf, func_preproc_wf, [ ('outputnode.epi_ref_file', 'inputnode.bold_ref'), (('outputnode.xfm_files', _select_iter_idx, idx), 'inputnode.bold_ref_xfm'), (('outputnode.n_dummy', _select_iter_idx, idx), 'inputnode.n_dummy_scans'), ]), ( anat_preproc_wf, func_preproc_wf, [ ('outputnode.anat_preproc', 'inputnode.anat_preproc'), ('outputnode.anat_mask', 'inputnode.anat_mask'), ('outputnode.anat_brain', 'inputnode.anat_brain'), ('outputnode.anat_dseg', 'inputnode.anat_dseg'), ('outputnode.anat_aseg', 'inputnode.anat_aseg'), ('outputnode.anat_aparc', 'inputnode.anat_aparc'), ('outputnode.anat_tpms', 'inputnode.anat_tpms'), ('outputnode.template', 'inputnode.template'), ('outputnode.anat2std_xfm', 'inputnode.anat2std_xfm'), ('outputnode.std2anat_xfm', 'inputnode.std2anat_xfm'), # Undefined if --fs-no-reconall, but this is safe ('outputnode.subjects_dir', 'inputnode.subjects_dir'), ('outputnode.subject_id', 'inputnode.subject_id'), ('outputnode.anat2fsnative_xfm', 'inputnode.anat2fsnative_xfm'), ('outputnode.fsnative2anat_xfm', 'inputnode.fsnative2anat_xfm'), ]), ]) # fmt: on func_preproc_wfs.append(func_preproc_wf) if not fmap_estimators: config.loggers.workflow.warning( "Data for fieldmap estimation not present. Please note that these data " "will not be corrected for susceptibility distortions.") return workflow config.loggers.workflow.info( f"Fieldmap estimators found: {[e.method for e in fmap_estimators]}") from sdcflows.workflows.base import init_fmap_preproc_wf from sdcflows import fieldmaps as fm fmap_wf = init_fmap_preproc_wf( debug=bool( config.execution.debug), # TODO: Add debug option for fieldmaps estimators=fmap_estimators, omp_nthreads=config.nipype.omp_nthreads, output_dir=nibabies_dir, subject=subject_id, ) fmap_wf.__desc__ = f""" Fieldmap data preprocessing : A total of {len(fmap_estimators)} fieldmaps were found available within the input BIDS structure for this particular subject. """ for func_preproc_wf in func_preproc_wfs: # fmt: off workflow.connect([ (fmap_wf, func_preproc_wf, [ ("outputnode.fmap", "inputnode.fmap"), ("outputnode.fmap_ref", "inputnode.fmap_ref"), ("outputnode.fmap_coeff", "inputnode.fmap_coeff"), ("outputnode.fmap_mask", "inputnode.fmap_mask"), ("outputnode.fmap_id", "inputnode.fmap_id"), ]), ]) # fmt: on # Overwrite ``out_path_base`` of sdcflows's DataSinks for node in fmap_wf.list_node_names(): if node.split(".")[-1].startswith("ds_"): fmap_wf.get_node(node).interface.out_path_base = "" # Step 3: Manually connect PEPOLAR for estimator in fmap_estimators: config.loggers.workflow.info(f"""\ Setting-up fieldmap "{estimator.bids_id}" ({estimator.method}) with \ <{', '.join(s.path.name for s in estimator.sources)}>""") if estimator.method in (fm.EstimatorType.MAPPED, fm.EstimatorType.PHASEDIFF): continue suffices = set(s.suffix for s in estimator.sources) if estimator.method == fm.EstimatorType.PEPOLAR and sorted( suffices) == ["epi"]: getattr(fmap_wf.inputs, f"in_{estimator.bids_id}").in_data = [ str(s.path) for s in estimator.sources ] getattr(fmap_wf.inputs, f"in_{estimator.bids_id}").metadata = [ s.metadata for s in estimator.sources ] continue if estimator.method == fm.EstimatorType.PEPOLAR: raise NotImplementedError( "Sophisticated PEPOLAR schemes (e.g., using DWI+EPI) are unsupported." ) return workflow
def init_single_subject_wf(subject_id): """ Set-up the preprocessing pipeline for a single subject. It collects and reports information about the subject, and prepares sub-workflows to perform anatomical and diffusion MRI preprocessing. Anatomical preprocessing is performed in a single workflow, regardless of the number of sessions. Diffusion MRI preprocessing is performed using a separate workflow for a full :abbr:`DWI (diffusion weighted imaging)` *entity*. A DWI *entity* may comprehend one or several runs (for instance, two opposed :abbr:`PE (phase-encoding)` directions. Workflow Graph .. workflow:: :graph2use: orig :simple_form: yes from dmriprep.config.testing import mock_config from dmriprep.workflows.base import init_single_subject_wf with mock_config(): wf = init_single_subject_wf("THP0005") Parameters ---------- subject_id : str List of subject labels Inputs ------ subjects_dir : os.pathlike FreeSurfer's ``$SUBJECTS_DIR`` """ name = f"single_subject_{subject_id}_wf" subject_data = collect_data(config.execution.layout, subject_id)[0] if "flair" in config.workflow.ignore: subject_data["flair"] = [] if "t2w" in config.workflow.ignore: subject_data["t2w"] = [] anat_only = config.workflow.anat_only # Make sure we always go through these two checks if not anat_only and not subject_data["dwi"]: raise Exception(f"No DWI data found for participant {subject_id}. " "All workflows require DWI images.") if not subject_data["t1w"]: raise Exception(f"No T1w images found for participant {subject_id}. " "All workflows require T1w images.") workflow = Workflow(name=name) workflow.__desc__ = f""" Results included in this manuscript come from preprocessing performed using *dMRIPrep* {config.environment.version} (@dmriprep; RRID:SCR_017412), which is based on *Nipype* {config.environment.nipype_version} (@nipype1; @nipype2; RRID:SCR_002502). """ workflow.__postdesc__ = """ For more details of the pipeline, see [the section corresponding to workflows in *dMRIPrep*'s documentation]\ (https://nipreps.github.io/dmriprep/master/workflows.html \ "dMRIPrep's documentation"). ### Copyright Waiver The above boilerplate text was automatically generated by dMRIPrep with the express intention that users should copy and paste this text into their manuscripts *unchanged*. It is released under the [CC0]\ (https://creativecommons.org/publicdomain/zero/1.0/) license. ### References """ spaces = config.workflow.spaces output_dir = config.execution.output_dir fsinputnode = pe.Node(niu.IdentityInterface(fields=["subjects_dir"]), name="fsinputnode") bidssrc = pe.Node(BIDSDataGrabber(subject_data=subject_data, anat_only=anat_only), name="bidssrc") bids_info = pe.Node(BIDSInfo(bids_dir=config.execution.bids_dir, bids_validate=False), name="bids_info") summary = pe.Node(SubjectSummary( std_spaces=spaces.get_spaces(nonstandard=False), nstd_spaces=spaces.get_spaces(standard=False)), name="summary", run_without_submitting=True) about = pe.Node(AboutSummary(version=config.environment.version, command=" ".join(sys.argv)), name="about", run_without_submitting=True) ds_report_summary = pe.Node(DerivativesDataSink( base_directory=str(output_dir), desc="summary", datatype="figures"), name="ds_report_summary", run_without_submitting=True) ds_report_about = pe.Node(DerivativesDataSink( base_directory=str(output_dir), desc="about", datatype="figures"), name="ds_report_about", run_without_submitting=True) anat_derivatives = config.execution.anat_derivatives if anat_derivatives: from smriprep.utils.bids import collect_derivatives std_spaces = spaces.get_spaces(nonstandard=False, dim=(3, )) anat_derivatives = collect_derivatives( anat_derivatives.absolute(), subject_id, std_spaces, config.workflow.run_reconall, ) # Preprocessing of T1w (includes registration to MNI) anat_preproc_wf = init_anat_preproc_wf( bids_root=str(config.execution.bids_dir), debug=config.execution.debug is True, existing_derivatives=anat_derivatives, freesurfer=config.workflow.run_reconall, hires=config.workflow.hires, longitudinal=config.workflow.longitudinal, omp_nthreads=config.nipype.omp_nthreads, output_dir=str(output_dir), skull_strip_fixed_seed=config.workflow.skull_strip_fixed_seed, skull_strip_mode="force", skull_strip_template=Reference.from_string( config.workflow.skull_strip_template)[0], spaces=spaces, t1w=subject_data["t1w"], ) workflow.connect([ (fsinputnode, anat_preproc_wf, [("subjects_dir", "inputnode.subjects_dir")]), (bidssrc, bids_info, [(("t1w", fix_multi_T1w_source_name), "in_file") ]), (fsinputnode, summary, [("subjects_dir", "subjects_dir")]), (bidssrc, summary, [("t1w", "t1w"), ("t2w", "t2w"), ("dwi", "dwi")]), (bids_info, summary, [("subject", "subject_id")]), (bids_info, anat_preproc_wf, [(("subject", _prefix), "inputnode.subject_id")]), (bidssrc, anat_preproc_wf, [("t1w", "inputnode.t1w"), ("t2w", "inputnode.t2w"), ("roi", "inputnode.roi"), ("flair", "inputnode.flair")]), (bidssrc, ds_report_summary, [(("t1w", fix_multi_T1w_source_name), "source_file")]), (summary, ds_report_summary, [("out_report", "in_file")]), (bidssrc, ds_report_about, [(("t1w", fix_multi_T1w_source_name), "source_file")]), (about, ds_report_about, [("out_report", "in_file")]), ]) # Overwrite ``out_path_base`` of smriprep's DataSinks for node in workflow.list_node_names(): if node.split(".")[-1].startswith("ds_"): workflow.get_node(node).interface.out_path_base = "dmriprep" if anat_only: return workflow # Append the dMRI section to the existing anatomical excerpt # That way we do not need to stream down the number of bold datasets anat_preproc_wf.__postdesc__ = (anat_preproc_wf.__postdesc__ or "") + f""" Diffusion data preprocessing : For each of the {len(subject_data["dwi"])} DWI scans found per subject (across all sessions), the gradient table was vetted and converted into the *RASb* format (i.e., given in RAS+ scanner coordinates, normalized b-vectors and scaled b-values), and a *b=0* average for reference to the subsequent steps of preprocessing was calculated. """ layout = config.execution.layout dwi_data = tuple([(dwi, layout.get_metadata(dwi), layout.get_bvec(dwi), layout.get_bval(dwi)) for dwi in subject_data["dwi"]]) inputnode = pe.Node(niu.IdentityInterface(fields=["dwi_data"]), name="inputnode") inputnode.iterables = [("dwi_data", dwi_data)] referencenode = pe.JoinNode(niu.IdentityInterface(fields=[ "dwi_file", "metadata", "dwi_reference", "dwi_mask", "gradients_rasb" ]), name="referencenode", joinsource="inputnode", run_without_submitting=True) split_info = pe.Node(niu.Function( function=_unpack, output_names=["dwi_file", "metadata", "bvec", "bval"]), name="split_info", run_without_submitting=True) early_b0ref_wf = init_early_b0ref_wf() workflow.connect([ (inputnode, split_info, [("dwi_data", "in_tuple")]), (split_info, early_b0ref_wf, [("dwi_file", "inputnode.dwi_file"), ("bvec", "inputnode.in_bvec"), ("bval", "inputnode.in_bval")]), (split_info, referencenode, [("dwi_file", "dwi_file"), ("metadata", "metadata")]), (early_b0ref_wf, referencenode, [ ("outputnode.dwi_reference", "dwi_reference"), ("outputnode.dwi_mask", "dwi_mask"), ("outputnode.gradients_rasb", "gradients_rasb"), ]), ]) fmap_estimation_wf = init_fmap_estimation_wf(subject_data["dwi"], debug=config.execution.debug) workflow.connect([ (referencenode, fmap_estimation_wf, [("dwi_reference", "inputnode.dwi_reference"), ("dwi_mask", "inputnode.dwi_mask")]), ]) return workflow