def init_anat_template_wf(longitudinal, omp_nthreads, num_t1w, name='anat_template_wf'): """ Generate a canonically-oriented, structural average from all input T1w images. Workflow Graph .. workflow:: :graph2use: orig :simple_form: yes from smriprep.workflows.anatomical import init_anat_template_wf wf = init_anat_template_wf( longitudinal=False, omp_nthreads=1, num_t1w=1) Parameters ---------- longitudinal : bool Create unbiased structural average, regardless of number of inputs (may increase runtime) omp_nthreads : int Maximum number of threads an individual process may use num_t1w : int Number of T1w images name : str, optional Workflow name (default: anat_template_wf) Inputs ------ t1w List of T1-weighted structural images Outputs ------- t1w_ref Structural reference averaging input T1w images, defining the T1w space. t1w_realign_xfm List of affine transforms to realign input T1w images out_report Conformation report """ workflow = Workflow(name=name) if num_t1w > 1: workflow.__desc__ = """\ A T1w-reference map was computed after registration of {num_t1w} T1w images (after INU-correction) using `mri_robust_template` [FreeSurfer {fs_ver}, @fs_template]. """.format(num_t1w=num_t1w, fs_ver=fs.Info().looseversion() or '<ver>') inputnode = pe.Node(niu.IdentityInterface(fields=['t1w']), name='inputnode') outputnode = pe.Node(niu.IdentityInterface( fields=['t1w_ref', 't1w_valid_list', 't1w_realign_xfm', 'out_report']), name='outputnode') # 0. Reorient T1w image(s) to RAS and resample to common voxel space t1w_ref_dimensions = pe.Node(TemplateDimensions(), name='t1w_ref_dimensions') t1w_conform = pe.MapNode(Conform(), iterfield='in_file', name='t1w_conform') workflow.connect([ (inputnode, t1w_ref_dimensions, [('t1w', 't1w_list')]), (t1w_ref_dimensions, t1w_conform, [('t1w_valid_list', 'in_file'), ('target_zooms', 'target_zooms'), ('target_shape', 'target_shape')]), (t1w_ref_dimensions, outputnode, [('out_report', 'out_report'), ('t1w_valid_list', 't1w_valid_list') ]), ]) if num_t1w == 1: get1st = pe.Node(niu.Select(index=[0]), name='get1st') outputnode.inputs.t1w_realign_xfm = [ pkgr('smriprep', 'data/itkIdentityTransform.txt') ] workflow.connect([ (t1w_conform, get1st, [('out_file', 'inlist')]), (get1st, outputnode, [('out', 't1w_ref')]), ]) return workflow t1w_conform_xfm = pe.MapNode(LTAConvert(in_lta='identity.nofile', out_lta=True), iterfield=['source_file', 'target_file'], name='t1w_conform_xfm') # 1. Template (only if several T1w images) # 1a. Correct for bias field: the bias field is an additive factor # in log-transformed intensity units. Therefore, it is not a linear # combination of fields and N4 fails with merged images. # 1b. Align and merge if several T1w images are provided n4_correct = pe.MapNode(N4BiasFieldCorrection(dimension=3, copy_header=True), iterfield='input_image', name='n4_correct', n_procs=1) # n_procs=1 for reproducibility # StructuralReference is fs.RobustTemplate if > 1 volume, copying otherwise t1w_merge = pe.Node( StructuralReference( auto_detect_sensitivity=True, initial_timepoint=1, # For deterministic behavior intensity_scaling=True, # 7-DOF (rigid + intensity) subsample_threshold=200, fixed_timepoint=not longitudinal, no_iteration=not longitudinal, transform_outputs=True, ), mem_gb=2 * num_t1w - 1, name='t1w_merge') # 2. Reorient template to RAS, if needed (mri_robust_template may set to LIA) t1w_reorient = pe.Node(image.Reorient(), name='t1w_reorient') concat_affines = pe.MapNode(ConcatenateLTA(out_type='RAS2RAS', invert_out=True), iterfield=['in_lta1', 'in_lta2'], name='concat_affines') lta_to_itk = pe.MapNode(LTAConvert(out_itk=True), iterfield=['in_lta'], name='lta_to_itk') def _set_threads(in_list, maximum): return min(len(in_list), maximum) workflow.connect([ (t1w_ref_dimensions, t1w_conform_xfm, [('t1w_valid_list', 'source_file')]), (t1w_conform, t1w_conform_xfm, [('out_file', 'target_file')]), (t1w_conform, n4_correct, [('out_file', 'input_image')]), (t1w_conform, t1w_merge, [(('out_file', _set_threads, omp_nthreads), 'num_threads'), (('out_file', add_suffix, '_template'), 'out_file')]), (n4_correct, t1w_merge, [('output_image', 'in_files')]), (t1w_merge, t1w_reorient, [('out_file', 'in_file')]), # Combine orientation and template transforms (t1w_conform_xfm, concat_affines, [('out_lta', 'in_lta1')]), (t1w_merge, concat_affines, [('transform_outputs', 'in_lta2')]), (concat_affines, lta_to_itk, [('out_file', 'in_lta')]), # Output (t1w_reorient, outputnode, [('out_file', 't1w_ref')]), (lta_to_itk, outputnode, [('out_itk', 't1w_realign_xfm')]), ]) return workflow
def create_mtr_workflow(scan_directory: str, patient_id: str = None, scan_id: str = None, reorient: str = 'RAI', split_mton_flag=False, use_iacl_struct=False) -> pe.Workflow: ''' Registers and estimates t2star map :param scan_directory: :param patient_id: :param scan_id: :param reorient: :param num_threads: :return: A :class:'nipype.pipeline.engine.Workflow' object :rtype: nipype.pipeline.engine.Workflow ''' name = 'mtr' if patient_id is not None and scan_id is not None: scan_directory = os.path.join(scan_directory, patient_id, 'pipeline') name += '_' + scan_id wf = pe.Workflow(name, scan_directory) input_node = pe.Node(util.IdentityInterface( fields=['mton_file', 'mtoff_file', 'target_file', 'brainmask_file'], mandatory_inputs=False), name='input_node') mtfile_node = pe.Node( util.IdentityInterface(fields=['mton_file', 'mtoff_file']), name='mtfile_node') if split_mton_flag: split_mt = pe.Node(fsl.Split(), 'split_mt') split_mt.inputs.dimension = 't' wf.connect(input_node, 'mton_file', split_mt, 'in_file') split_mt_files = pe.Node(util.Split(), 'split_mt_files') split_mt_files.inputs.splits = [1, 1] split_mt_files.inputs.squeeze = True wf.connect(split_mt, 'out_files', split_mt_files, 'inlist') wf.connect(split_mt, 'out1', mtfile_node, 'mton_file') #TODO: Check if MTON is first wf.connect(split_mt, 'out2', mtfile_node, 'mtoff_file') else: wf.connect(input_node, 'mton_file', mtfile_node, 'mton_file') wf.connect(input_node, 'mtoff_file', mtfile_node, 'mtoff_file') #wf.connect(input_node, 'mton_file', mtfile_node, 'mton_file') #wf.connect(input_node, 'mtoff_file', mtfile_node, 'mtoff_file') # Reorient if reorient is not None: reorient_mton_to_target = pe.Node(image.Reorient(), iterfield=['in_file'], name='reorient_mton_to_target') reorient_mton_to_target.inputs.orientation = reorient wf.connect(mtfile_node, 'mton_file', reorient_mton_to_target, 'in_file') reorient_mtoff_to_target = pe.Node(image.Reorient(), iterfield=['in_file'], name='reorient_mtoff_to_target') reorient_mtoff_to_target.inputs.orientation = reorient wf.connect(mtfile_node, 'mtoff_file', reorient_mtoff_to_target, 'in_file') #select_first_t2star = pe.Node(util.Split(), name='get_first_t2star') #select_first_t2star.inputs.splits = [1, num_t2star_files - 1] #select_first_t2star.inputs.squeeze = True #if reorient is not None: # wf.connect(reorient_to_target, 'out_file', select_first_t2star, 'inlist') #else: # wf.connect(input_node, 't2star_files', select_first_t2star, 'inlist') affine_reg_to_target = pe.Node(ants.Registration(), name='affine_reg_to_target') affine_reg_to_target.inputs.dimension = 3 affine_reg_to_target.inputs.interpolation = 'Linear' affine_reg_to_target.inputs.metric = ['MI', 'MI'] affine_reg_to_target.inputs.metric_weight = [1.0, 1.0] affine_reg_to_target.inputs.radius_or_number_of_bins = [32, 32] affine_reg_to_target.inputs.sampling_strategy = ['Regular', 'Regular'] affine_reg_to_target.inputs.sampling_percentage = [0.25, 0.25] affine_reg_to_target.inputs.transforms = ['Rigid', 'Affine'] affine_reg_to_target.inputs.transform_parameters = [(0.1, ), (0.1, )] affine_reg_to_target.inputs.number_of_iterations = [[100, 50, 25], [100, 50, 25]] affine_reg_to_target.inputs.convergence_threshold = [1e-6, 1e-6] affine_reg_to_target.inputs.convergence_window_size = [10, 10] affine_reg_to_target.inputs.smoothing_sigmas = [[4, 2, 1], [4, 2, 1]] affine_reg_to_target.inputs.sigma_units = ['vox', 'vox'] affine_reg_to_target.inputs.shrink_factors = [[4, 2, 1], [4, 2, 1]] affine_reg_to_target.inputs.write_composite_transform = True affine_reg_to_target.inputs.initial_moving_transform_com = 1 affine_reg_to_target.inputs.output_warped_image = True # wf.connect(select_first_t2star, 'out1', affine_reg_to_target, 'moving_image') #TODO: Check mtoff is registered? wf.connect(input_node, 'target_file', affine_reg_to_target, 'fixed_image') if reorient is not None: wf.connect(reorient_mtoff_to_target, 'out_file', affine_reg_to_target, 'moving_image') else: wf.connect(mtfile_node, 'mtoff_file', affine_reg_to_target, 'moving_image') transform_mton = pe.Node(ants.ApplyTransforms(), name='transform_mton') transform_mton.inputs.input_image_type = 3 wf.connect(input_node, 'target_file', transform_mton, 'reference_image') wf.connect(affine_reg_to_target, 'composite_transform', transform_mton, 'transforms') if reorient is not None: wf.connect(reorient_mton_to_target, 'out_file', transform_mton, 'input_image') else: wf.connect(mtfile_node, 'mton_file', transform_mton, 'input_image') estimate = pe.Node(EstimateMTR(), name='estimate_mtr') wf.connect(transform_mton, 'output_image', estimate, 'mton_file') wf.connect(affine_reg_to_target, 'warped_image', estimate, 'mtoff_file') wf.connect(input_node, 'brainmask_file', estimate, 'brainmask_file') #TODO: Copy output to a final folder # Set up base filename for copying outputs if use_iacl_struct: out_file_base = os.path.join(scan_directory, patient_id, scan_id, patient_id + '_' + scan_id) else: if patient_id is not None: out_file_base = patient_id + '_' + scan_id if scan_id is not None else patient_id else: out_file_base = 'out' out_file_base = os.path.join(scan_directory, out_file_base) export_mtr = pe.Node(io.ExportFile(), name='export_mtr') export_mtr.inputs.check_extension = True export_mtr.inputs.clobber = True export_mtr.inputs.out_file = out_file_base + '_MTR.nii.gz' wf.connect(estimate, 'mtr_file', export_mtr, 'in_file') return wf
def run(self, do_skullstrip=True, n_ants_jobs=1, n_pipeline_jobs=1): """Run queued registration jobs. Args: do_skullstrip (bool, optional): whether to skullstrip images prior to registration. Defaults to True. n_ants_jobs (int, optional): number of parallel threads for ANTs registration. Defaults to 1. n_pipeline_jobs (int, optional): number of parallel processing jobs, this should be at least equal to n_ants_jobs. Defaults to 1. """ if n_pipeline_jobs == 1: n_ants_jobs = 1 if not os.path.exists(self.strOutputDir): os.makedirs(self.strOutputDir) strJobListPath = os.path.join(self.strOutputDir, 'joblist.csv') self.dfConfig.to_csv(strJobListPath) datanode = Node(utility.csv.CSVReader( in_file=os.path.abspath(strJobListPath), header=True), name='0_datanode') augment = Workflow('augmentation', base_dir=os.path.join(self.strOutputDir, 'working_dir')) reorientFunc = MapNode(image.Reorient(), name='0_reorient_func', iterfield=['in_file']) augment.connect(datanode, 'func', reorientFunc, 'in_file') reorientAnat = MapNode(image.Reorient(), name='0_reorient_anat', iterfield=['in_file']) augment.connect(datanode, 'anat', reorientAnat, 'in_file') reorientTargetFunc = MapNode(image.Reorient(), name='0_reorient_targetfunc', iterfield=['in_file']) augment.connect(datanode, 'target_func', reorientTargetFunc, 'in_file') reorientTargetAnat = MapNode(image.Reorient(), name='0_reorient_targetanat', iterfield=['in_file']) augment.connect(datanode, 'target_anat', reorientTargetAnat, 'in_file') meanFunc = MapNode(fsl.MeanImage(), name='1_mean_func', iterfield=['in_file']) augment.connect(reorientFunc, 'out_file', meanFunc, 'in_file') meanTargetFunc = MapNode(fsl.MeanImage(), name='1_mean_targetfunc', iterfield=['in_file']) augment.connect(reorientTargetFunc, 'out_file', meanTargetFunc, 'in_file') if do_skullstrip: # Skull strip the anatomical images with ROBEX skullstripSourceAnat = MapNode(Robex(), name='1_source_anat_skullstrip', iterfield=['in_file']) augment.connect(reorientAnat, 'out_file', skullstripSourceAnat, 'in_file') skullstripTargetAnat = MapNode(Robex(), name='1_target_anat_skullstrip', iterfield=['in_file']) augment.connect(reorientTargetAnat, 'out_file', skullstripTargetAnat, 'in_file') # Skull strip the functional image with FSL BET and AFNI Automask skullstripSourceFunc = make_func_mask_workflow( base_dir=os.path.join(self.strOutputDir, 'working_dir')) augment.connect(meanFunc, 'out_file', skullstripSourceFunc, 'inputnode.mean_file') # First, perform a quick registration of the functional skull-stripped mean image to the anatomical # skull-stripped image. Use the SynQuick tool which does a rigid->affine->syn registration with some preset params func2Anat = MapNode(ants.RegistrationSynQuick(dimension=3, num_threads=n_ants_jobs), name='2_func2Anat', mem_gb=16, n_procs=n_ants_jobs, iterfield=['fixed_image', 'moving_image']) if do_skullstrip: augment.connect(skullstripSourceAnat, 'out_file', func2Anat, 'fixed_image') augment.connect(skullstripSourceFunc, 'outputnode.masked_file', func2Anat, 'moving_image') else: augment.connect(reorientAnat, 'out_file', func2Anat, 'fixed_image') augment.connect(reorientFunc, 'out_file', func2Anat, 'moving_image') # Now register the source anatomical image to the target anatomical image. Use a more precise registration for # this step. These parameters come from the antsRegistrationSyn script included in ANTS anat2Anat = MapNode( ants.Registration(metric=['MI', 'MI', 'CC'], metric_weight=[1, 1, 1], transforms=['Rigid', 'Affine', 'SyN'], smoothing_sigmas=[[3, 2, 1, 0]] * 3, shrink_factors=[[8, 4, 2, 1]] * 3, dimension=3, initial_moving_transform_com=1, radius_or_number_of_bins=[32, 32, 4], sampling_strategy=['Regular', 'Regular', None], sampling_percentage=[0.25, 0.25, None], use_histogram_matching=True, collapse_output_transforms=True, write_composite_transform=True, transform_parameters=[(0.1, ), (0.1, ), (0.1, 3, 0)], number_of_iterations=[[1000, 500, 250, 100], [1000, 500, 250, 100], [100, 70, 50, 20]], sigma_units=['vox'] * 4, winsorize_upper_quantile=0.995, winsorize_lower_quantile=0.005, num_threads=n_ants_jobs, verbose=False), name='3_anat2Anat', iterfield=['fixed_image', 'moving_image', 'output_warped_image'], mem_gb=16, n_procs=n_ants_jobs) if do_skullstrip: augment.connect(skullstripSourceAnat, 'out_file', anat2Anat, 'moving_image') augment.connect(skullstripTargetAnat, 'out_file', anat2Anat, 'fixed_image') else: augment.connect(reorientAnat, 'out_file', anat2Anat, 'moving_image') augment.connect(reorientTargetAnat, 'out_file', anat2Anat, 'fixed_image') augment.connect(datanode, 'output_anat', anat2Anat, 'output_warped_image') # Finally, use the func-to-anat transform, then the anat-to-anat transform on the source functional image concat = MapNode(utility.Merge(3), name='4_concat_transforms', iterfield=['in1', 'in2', 'in3']) # Ants applies transforms in reverse order. The first transform is the affine func-to-anat augment.connect(func2Anat, 'out_matrix', concat, 'in3') # then the nonlinear func-to-anat augment.connect(func2Anat, 'forward_warp_field', concat, 'in2') # and lastly the composite anat-to-anat augment.connect(anat2Anat, 'composite_transform', concat, 'in1') transform = MapNode( ants.ApplyTransforms(input_image_type=3, interpolation='BSpline', dimension=3, interpolation_parameters=(5, ), num_threads=n_ants_jobs), name='4_apply_transforms', iterfield=[ 'input_image', 'transforms', 'output_image', 'reference_image' ], mem_gb=16, n_procs=n_ants_jobs) augment.connect(concat, 'out', transform, 'transforms') augment.connect(reorientFunc, 'out_file', transform, 'input_image') augment.connect(meanTargetFunc, 'out_file', transform, 'reference_image') augment.connect(datanode, 'output_func', transform, 'output_image') if n_pipeline_jobs == 1: augment.run() else: augment.run(plugin='MultiProc', plugin_args={'n_procs': n_pipeline_jobs})
def init_t2w_template_wf(longitudinal, omp_nthreads, num_t2w, name="anat_t2w_template_wf"): """ Adapts :py:func:`~smriprep.workflows.anatomical.init_anat_template_wf` for T2w image reference """ from pkg_resources import resource_filename as pkgr from nipype.interfaces import freesurfer as fs, image, ants from niworkflows.engine.workflows import LiterateWorkflow as Workflow from niworkflows.interfaces.freesurfer import ( StructuralReference, PatchedLTAConvert as LTAConvert, ) from niworkflows.interfaces.images import TemplateDimensions, Conform, ValidateImage from niworkflows.interfaces.nitransforms import ConcatenateXFMs from niworkflows.utils.misc import add_suffix wf = Workflow(name=name) inputnode = pe.Node(niu.IdentityInterface(fields=["t2w"]), name="inputnode") outputnode = pe.Node( niu.IdentityInterface(fields=[ "t2w_ref", "t2w_valid_list", "t2_realign_xfm", "out_report" ]), name="outputnode", ) # 0. Reorient T2w image(s) to RAS and resample to common voxel space t2w_ref_dimensions = pe.Node(TemplateDimensions(), name='t2w_ref_dimensions') t2w_conform = pe.MapNode(Conform(), iterfield='in_file', name='t2w_conform') wf.connect([ (inputnode, t2w_ref_dimensions, [('t2w', 't1w_list')]), (t2w_ref_dimensions, t2w_conform, [('t1w_valid_list', 'in_file'), ('target_zooms', 'target_zooms'), ('target_shape', 'target_shape')]), (t2w_ref_dimensions, outputnode, [('out_report', 'out_report'), ('t1w_valid_list', 't2w_valid_list') ]), ]) if num_t2w == 1: get1st = pe.Node(niu.Select(index=[0]), name='get1st') outputnode.inputs.t2w_realign_xfm = [ pkgr('smriprep', 'data/itkIdentityTransform.txt') ] wf.connect([ (t2w_conform, get1st, [('out_file', 'inlist')]), (get1st, outputnode, [('out', 't2w_ref')]), ]) return wf wf.__desc__ = f"""\ A T2w-reference map was computed after registration of {num_t2w} T2w images (after INU-correction) using `mri_robust_template` [FreeSurfer {fs.Info().looseversion() or "<ver>"}, @fs_template]. """ t2w_conform_xfm = pe.MapNode(LTAConvert(in_lta='identity.nofile', out_lta=True), iterfield=['source_file', 'target_file'], name='t2w_conform_xfm') # 1a. Correct for bias field: the bias field is an additive factor # in log-transformed intensity units. Therefore, it is not a linear # combination of fields and N4 fails with merged images. # 1b. Align and merge if several T1w images are provided n4_correct = pe.MapNode(ants.N4BiasFieldCorrection(dimension=3, copy_header=True), iterfield='input_image', name='n4_correct', n_procs=1) # n_procs=1 for reproducibility # StructuralReference is fs.RobustTemplate if > 1 volume, copying otherwise t2w_merge = pe.Node( StructuralReference( auto_detect_sensitivity=True, initial_timepoint=1, # For deterministic behavior intensity_scaling=True, # 7-DOF (rigid + intensity) subsample_threshold=200, fixed_timepoint=not longitudinal, no_iteration=not longitudinal, transform_outputs=True, ), mem_gb=2 * num_t2w - 1, name='t2w_merge') # 2. Reorient template to RAS, if needed (mri_robust_template may set to LIA) t2w_reorient = pe.Node(image.Reorient(), name='t2w_reorient') merge_xfm = pe.MapNode(niu.Merge(2), name='merge_xfm', iterfield=['in1', 'in2'], run_without_submitting=True) concat_xfms = pe.MapNode(ConcatenateXFMs(inverse=True), name="concat_xfms", iterfield=['in_xfms'], run_without_submitting=True) def _set_threads(in_list, maximum): return min(len(in_list), maximum) wf.connect([ (t2w_ref_dimensions, t2w_conform_xfm, [('t1w_valid_list', 'source_file')]), (t2w_conform, t2w_conform_xfm, [('out_file', 'target_file')]), (t2w_conform, n4_correct, [('out_file', 'input_image')]), (t2w_conform, t2w_merge, [(('out_file', _set_threads, omp_nthreads), 'num_threads'), (('out_file', add_suffix, '_template'), 'out_file')]), (n4_correct, t2w_merge, [('output_image', 'in_files')]), (t2w_merge, t2w_reorient, [('out_file', 'in_file')]), # Combine orientation and template transforms (t2w_conform_xfm, merge_xfm, [('out_lta', 'in1')]), (t2w_merge, merge_xfm, [('transform_outputs', 'in2')]), (merge_xfm, concat_xfms, [('out', 'in_xfms')]), # Output (t2w_reorient, outputnode, [('out_file', 't2w_ref')]), (concat_xfms, outputnode, [('out_xfm', 't2w_realign_xfm')]), ]) return wf