def N4BiasFieldCorrect(filename, output_filename): normalized = N4BiasFieldCorrection() normalized.inputs.input_image = filename normalized.inputs.output_image = output_filename normalized.run() return None
import os import json from nipype.interfaces import fsl from nipype.interfaces.spm import Smooth from nipype.interfaces.utility import IdentityInterface from nipype.interfaces.io import SelectFiles, DataSink from nipype.algorithms.rapidart import ArtifactDetect from nipype import Workflow, Node import copy from nipype.interfaces.ants import N4BiasFieldCorrection import nipype.interfaces.mrtrix3 as mrt gen5tt = mrt.Generate5tt() n4 = N4BiasFieldCorrection() # Variables USE parse argument bids_dir = '~/tmp/BIDS' #experiment_dir output_dir = '~/tmp/derivatives' working_dir = '/tmp/' participant_label='sub-HC10' # ---------------------------------------------------------------- # T1w processing # check https://github.com/llevitis/APPIAN # Initiate a node to Reorient to RPI with AFNI reorient = Node(afni.)
def init_anat_template_wf(longitudinal, omp_nthreads, num_t1w, name='anat_template_wf'): """ Generate a canonically-oriented, structural average from all input T1w images. Workflow Graph .. workflow:: :graph2use: orig :simple_form: yes from smriprep.workflows.anatomical import init_anat_template_wf wf = init_anat_template_wf( longitudinal=False, omp_nthreads=1, num_t1w=1) Parameters ---------- longitudinal : :obj:`bool` Create unbiased structural average, regardless of number of inputs (may increase runtime) omp_nthreads : :obj:`int` Maximum number of threads an individual process may use num_t1w : :obj:`int` Number of T1w images name : :obj:`str`, optional Workflow name (default: anat_template_wf) Inputs ------ t1w List of T1-weighted structural images Outputs ------- t1w_ref Structural reference averaging input T1w images, defining the T1w space. t1w_realign_xfm List of affine transforms to realign input T1w images out_report Conformation report """ workflow = Workflow(name=name) if num_t1w > 1: workflow.__desc__ = """\ A T1w-reference map was computed after registration of {num_t1w} T1w images (after INU-correction) using `mri_robust_template` [FreeSurfer {fs_ver}, @fs_template]. """.format(num_t1w=num_t1w, fs_ver=fs.Info().looseversion() or '<ver>') inputnode = pe.Node(niu.IdentityInterface(fields=['t1w']), name='inputnode') outputnode = pe.Node(niu.IdentityInterface( fields=['t1w_ref', 't1w_valid_list', 't1w_realign_xfm', 'out_report']), name='outputnode') # 0. Reorient T1w image(s) to RAS and resample to common voxel space t1w_ref_dimensions = pe.Node(TemplateDimensions(), name='t1w_ref_dimensions') t1w_conform = pe.MapNode(Conform(), iterfield='in_file', name='t1w_conform') workflow.connect([ (inputnode, t1w_ref_dimensions, [('t1w', 't1w_list')]), (t1w_ref_dimensions, t1w_conform, [ ('t1w_valid_list', 'in_file'), ('target_zooms', 'target_zooms'), ('target_shape', 'target_shape')]), (t1w_ref_dimensions, outputnode, [('out_report', 'out_report'), ('t1w_valid_list', 't1w_valid_list')]), ]) if num_t1w == 1: get1st = pe.Node(niu.Select(index=[0]), name='get1st') outputnode.inputs.t1w_realign_xfm = [pkgr('smriprep', 'data/itkIdentityTransform.txt')] workflow.connect([ (t1w_conform, get1st, [('out_file', 'inlist')]), (get1st, outputnode, [('out', 't1w_ref')]), ]) return workflow t1w_conform_xfm = pe.MapNode(LTAConvert(in_lta='identity.nofile', out_lta=True), iterfield=['source_file', 'target_file'], name='t1w_conform_xfm') # 1. Template (only if several T1w images) # 1a. Correct for bias field: the bias field is an additive factor # in log-transformed intensity units. Therefore, it is not a linear # combination of fields and N4 fails with merged images. # 1b. Align and merge if several T1w images are provided n4_correct = pe.MapNode( N4BiasFieldCorrection(dimension=3, copy_header=True), iterfield='input_image', name='n4_correct', n_procs=1) # n_procs=1 for reproducibility # StructuralReference is fs.RobustTemplate if > 1 volume, copying otherwise t1w_merge = pe.Node( StructuralReference(auto_detect_sensitivity=True, initial_timepoint=1, # For deterministic behavior intensity_scaling=True, # 7-DOF (rigid + intensity) subsample_threshold=200, fixed_timepoint=not longitudinal, no_iteration=not longitudinal, transform_outputs=True, ), mem_gb=2 * num_t1w - 1, name='t1w_merge') # 2. Reorient template to RAS, if needed (mri_robust_template may set to LIA) t1w_reorient = pe.Node(image.Reorient(), name='t1w_reorient') concat_affines = pe.MapNode( ConcatenateLTA(out_type='RAS2RAS', invert_out=True), iterfield=['in_lta1', 'in_lta2'], name='concat_affines') lta_to_itk = pe.MapNode(LTAConvert(out_itk=True), iterfield=['in_lta'], name='lta_to_itk') def _set_threads(in_list, maximum): return min(len(in_list), maximum) workflow.connect([ (t1w_ref_dimensions, t1w_conform_xfm, [('t1w_valid_list', 'source_file')]), (t1w_conform, t1w_conform_xfm, [('out_file', 'target_file')]), (t1w_conform, n4_correct, [('out_file', 'input_image')]), (t1w_conform, t1w_merge, [ (('out_file', _set_threads, omp_nthreads), 'num_threads'), (('out_file', add_suffix, '_template'), 'out_file')]), (n4_correct, t1w_merge, [('output_image', 'in_files')]), (t1w_merge, t1w_reorient, [('out_file', 'in_file')]), # Combine orientation and template transforms (t1w_conform_xfm, concat_affines, [('out_lta', 'in_lta1')]), (t1w_merge, concat_affines, [('transform_outputs', 'in_lta2')]), (concat_affines, lta_to_itk, [('out_file', 'in_lta')]), # Output (t1w_reorient, outputnode, [('out_file', 't1w_ref')]), (lta_to_itk, outputnode, [('out_itk', 't1w_realign_xfm')]), ]) return workflow
def init_infant_brain_extraction_wf( ants_affine_init=False, bspline_fitting_distance=200, debug=False, in_template="MNIInfant", template_specs=None, interim_checkpoints=True, mem_gb=3.0, mri_scheme="T2w", name="infant_brain_extraction_wf", atropos_model=None, omp_nthreads=None, output_dir=None, use_float=True, ): """ Build an atlas-based brain extraction pipeline for infant T2w MRI data. Parameters ---------- ants_affine_init : :obj:`bool`, optional Set-up a pre-initialization step with ``antsAI`` to account for mis-oriented images. """ inputnode = pe.Node(niu.IdentityInterface(fields=["in_files", "in_mask"]), name="inputnode") outputnode = pe.Node(niu.IdentityInterface( fields=["out_corrected", "out_brain", "out_mask"]), name="outputnode") template_specs = template_specs or {} # Find a suitable target template in TemplateFlow tpl_target_path = get_template(in_template, suffix=mri_scheme, **template_specs) if not tpl_target_path: raise RuntimeError( f"An instance of template <tpl-{in_template}> with MR scheme '{mri_scheme}'" " could not be found.") # tpl_brainmask_path = get_template( # in_template, desc="brain", suffix="probseg", **template_specs # ) # if not tpl_brainmask_path: # ignore probseg for the time being tpl_brainmask_path = get_template(in_template, desc="brain", suffix="mask", **template_specs) tpl_regmask_path = get_template(in_template, desc="BrainCerebellumExtraction", suffix="mask", **template_specs) # validate images val_tmpl = pe.Node(ValidateImage(), name='val_tmpl') val_tmpl.inputs.in_file = _pop(tpl_target_path) val_target = pe.Node(ValidateImage(), name='val_target') # Resample both target and template to a controlled, isotropic resolution res_tmpl = pe.Node(RegridToZooms(zooms=HIRES_ZOOMS), name="res_tmpl") # testing res_target = pe.Node(RegridToZooms(zooms=HIRES_ZOOMS), name="res_target") # testing gauss_tmpl = pe.Node(niu.Function(function=_gauss_filter), name="gauss_tmpl") # Spatial normalization step lap_tmpl = pe.Node(ImageMath(operation="Laplacian", op2="0.4 1"), name="lap_tmpl") lap_target = pe.Node(ImageMath(operation="Laplacian", op2="0.4 1"), name="lap_target") # Merge image nodes mrg_target = pe.Node(niu.Merge(2), name="mrg_target") mrg_tmpl = pe.Node(niu.Merge(2), name="mrg_tmpl") norm_lap_tmpl = pe.Node(niu.Function(function=_trunc), name="norm_lap_tmpl") norm_lap_tmpl.inputs.dtype = "float32" norm_lap_tmpl.inputs.out_max = 1.0 norm_lap_tmpl.inputs.percentile = (0.01, 99.99) norm_lap_tmpl.inputs.clip_max = None norm_lap_target = pe.Node(niu.Function(function=_trunc), name="norm_lap_target") norm_lap_target.inputs.dtype = "float32" norm_lap_target.inputs.out_max = 1.0 norm_lap_target.inputs.percentile = (0.01, 99.99) norm_lap_target.inputs.clip_max = None # Set up initial spatial normalization ants_params = "testing" if debug else "precise" norm = pe.Node( Registration(from_file=pkgr_fn( "niworkflows.data", f"antsBrainExtraction_{ants_params}.json")), name="norm", n_procs=omp_nthreads, mem_gb=mem_gb, ) norm.inputs.float = use_float # main workflow wf = pe.Workflow(name) # Create a buffer interface as a cache for the actual inputs to registration buffernode = pe.Node( niu.IdentityInterface(fields=["hires_target", "smooth_target"]), name="buffernode") # truncate target intensity for N4 correction clip_target = pe.Node( niu.Function(function=_trunc), name="clip_target", ) clip_tmpl = pe.Node( niu.Function(function=_trunc), name="clip_tmpl", ) #clip_tmpl.inputs.in_file = _pop(tpl_target_path) # INU correction of the target image init_n4 = pe.Node( N4BiasFieldCorrection( dimension=3, save_bias=False, copy_header=True, n_iterations=[50] * (4 - debug), convergence_threshold=1e-7, shrink_factor=4, bspline_fitting_distance=bspline_fitting_distance, ), n_procs=omp_nthreads, name="init_n4", ) clip_inu = pe.Node( niu.Function(function=_trunc), name="clip_inu", ) gauss_target = pe.Node(niu.Function(function=_gauss_filter), name="gauss_target") wf.connect([ # truncation, resampling, and initial N4 (inputnode, val_target, [(("in_files", _pop), "in_file")]), # (inputnode, res_target, [(("in_files", _pop), "in_file")]), (val_target, res_target, [("out_file", "in_file")]), (res_target, clip_target, [("out_file", "in_file")]), (val_tmpl, clip_tmpl, [("out_file", "in_file")]), (clip_tmpl, res_tmpl, [("out", "in_file")]), (clip_target, init_n4, [("out", "input_image")]), (init_n4, clip_inu, [("output_image", "in_file")]), (clip_inu, gauss_target, [("out", "in_file")]), (clip_inu, buffernode, [("out", "hires_target")]), (gauss_target, buffernode, [("out", "smooth_target")]), (res_tmpl, gauss_tmpl, [("out_file", "in_file")]), # (clip_tmpl, gauss_tmpl, [("out", "in_file")]), ]) # Graft a template registration-mask if present if tpl_regmask_path: hires_mask = pe.Node(ApplyTransforms( input_image=_pop(tpl_regmask_path), transforms="identity", interpolation="NearestNeighbor", float=True), name="hires_mask", mem_gb=1) wf.connect([ (res_tmpl, hires_mask, [("out_file", "reference_image")]), ]) map_brainmask = pe.Node(ApplyTransforms(interpolation="Gaussian", float=True), name="map_brainmask", mem_gb=1) map_brainmask.inputs.input_image = str(tpl_brainmask_path) thr_brainmask = pe.Node(Binarize(thresh_low=0.80), name="thr_brainmask") bspline_grid = pe.Node(niu.Function(function=_bspline_distance), name="bspline_grid") # Refine INU correction final_n4 = pe.Node( N4BiasFieldCorrection( dimension=3, save_bias=True, copy_header=True, n_iterations=[50] * 5, convergence_threshold=1e-7, rescale_intensities=True, shrink_factor=4, ), n_procs=omp_nthreads, name="final_n4", ) final_mask = pe.Node(ApplyMask(), name="final_mask") if atropos_model is None: atropos_model = tuple(ATROPOS_MODELS[mri_scheme].values()) atropos_wf = init_atropos_wf( use_random_seed=False, omp_nthreads=omp_nthreads, mem_gb=mem_gb, in_segmentation_model=atropos_model, ) # if tpl_regmask_path: # atropos_wf.get_node('inputnode').inputs.in_mask_dilated = tpl_regmask_path sel_wm = pe.Node(niu.Select(index=atropos_model[-1] - 1), name='sel_wm', run_without_submitting=True) wf.connect([ (inputnode, map_brainmask, [(("in_files", _pop), "reference_image")]), (inputnode, final_n4, [(("in_files", _pop), "input_image")]), (inputnode, bspline_grid, [(("in_files", _pop), "in_file")]), # (bspline_grid, final_n4, [("out", "bspline_fitting_distance")]), (bspline_grid, final_n4, [("out", "args")]), # merge laplacian and original images (buffernode, lap_target, [("smooth_target", "op1")]), (buffernode, mrg_target, [("hires_target", "in1")]), (lap_target, norm_lap_target, [("output_image", "in_file")]), (norm_lap_target, mrg_target, [("out", "in2")]), # Template massaging (res_tmpl, lap_tmpl, [("out_file", "op1")]), (res_tmpl, mrg_tmpl, [("out_file", "in1")]), (lap_tmpl, norm_lap_tmpl, [("output_image", "in_file")]), (norm_lap_tmpl, mrg_tmpl, [("out", "in2")]), # spatial normalization (mrg_target, norm, [("out", "moving_image")]), (mrg_tmpl, norm, [("out", "fixed_image")]), (norm, map_brainmask, [("reverse_transforms", "transforms"), ("reverse_invert_flags", "invert_transform_flags")]), (map_brainmask, thr_brainmask, [("output_image", "in_file")]), # take a second pass of N4 (map_brainmask, final_n4, [("output_image", "weight_image")]), (final_n4, final_mask, [("output_image", "in_file")]), (thr_brainmask, final_mask, [("out_mask", "in_mask")]), (final_n4, outputnode, [("output_image", "out_corrected")]), (thr_brainmask, outputnode, [("out_mask", "out_mask")]), (final_mask, outputnode, [("out_file", "out_brain")]), ]) # wf.disconnect([ # (get_brainmask, apply_mask, [('output_image', 'mask_file')]), # (copy_xform, outputnode, [('out_mask', 'out_mask')]), # ]) # wf.connect([ # (init_n4, atropos_wf, [ # ('output_image', 'inputnode.in_files')]), # intensity image # (thr_brainmask, atropos_wf, [ # ('out_mask', 'inputnode.in_mask')]), # (atropos_wf, sel_wm, [('outputnode.out_tpms', 'inlist')]), # (sel_wm, final_n4, [('out', 'weight_image')]), # ]) # wf.connect([ # (atropos_wf, outputnode, [ # ('outputnode.out_mask', 'out_mask'), # ('outputnode.out_segm', 'out_segm'), # ('outputnode.out_tpms', 'out_tpms')]), # ]) if tpl_regmask_path: wf.connect([ (hires_mask, norm, [("output_image", "fixed_image_masks")]), # (hires_mask, atropos_wf, [ # ("output_image", "inputnode.in_mask_dilated")]), ]) if interim_checkpoints: final_apply = pe.Node(ApplyTransforms(interpolation="BSpline", float=True), name="final_apply", mem_gb=1) final_report = pe.Node(SimpleBeforeAfter( before_label=f"tpl-{in_template}", after_label="target", out_report="final_report.svg"), name="final_report") wf.connect([ (inputnode, final_apply, [(("in_files", _pop), "reference_image") ]), (res_tmpl, final_apply, [("out_file", "input_image")]), (norm, final_apply, [("reverse_transforms", "transforms"), ("reverse_invert_flags", "invert_transform_flags")]), (final_apply, final_report, [("output_image", "before")]), (outputnode, final_report, [("out_corrected", "after"), ("out_mask", "wm_seg")]), ]) if output_dir: from nipype.interfaces.io import DataSink ds_final_inu = pe.Node(DataSink(base_directory=str(output_dir.parent)), name="ds_final_inu") ds_final_msk = pe.Node(DataSink(base_directory=str(output_dir.parent)), name="ds_final_msk") ds_report = pe.Node(DataSink(base_directory=str(output_dir.parent)), name="ds_report") wf.connect([ (outputnode, ds_final_inu, [("out_corrected", f"{output_dir.name}.@inu_corrected")]), (outputnode, ds_final_msk, [("out_mask", f"{output_dir.name}.@brainmask")]), (final_report, ds_report, [("out_report", f"{output_dir.name}.@report")]), ]) if not ants_affine_init: return wf # Initialize transforms with antsAI lowres_tmpl = pe.Node(RegridToZooms(zooms=LOWRES_ZOOMS), name="lowres_tmpl") lowres_target = pe.Node(RegridToZooms(zooms=LOWRES_ZOOMS), name="lowres_target") init_aff = pe.Node( AI( metric=("Mattes", 32, "Regular", 0.25), transform=("Affine", 0.1), search_factor=(15, 0.1), principal_axes=False, convergence=(10, 1e-6, 10), search_grid=(40, (0, 40, 40)), verbose=True, ), name="init_aff", n_procs=omp_nthreads, ) wf.connect([ (gauss_tmpl, lowres_tmpl, [("out", "in_file")]), (lowres_tmpl, init_aff, [("out_file", "fixed_image")]), (gauss_target, lowres_target, [("out", "in_file")]), (lowres_target, init_aff, [("out_file", "moving_image")]), (init_aff, norm, [("output_transform", "initial_moving_transform")]), ]) if tpl_regmask_path: lowres_mask = pe.Node(ApplyTransforms( input_image=_pop(tpl_regmask_path), transforms="identity", interpolation="MultiLabel", float=True), name="lowres_mask", mem_gb=1) wf.connect([ (lowres_tmpl, lowres_mask, [("out_file", "reference_image")]), (lowres_mask, init_aff, [("output_image", "fixed_image_mask")]), ]) if interim_checkpoints: init_apply = pe.Node(ApplyTransforms(interpolation="BSpline", float=True), name="init_apply", mem_gb=1) init_report = pe.Node(SimpleBeforeAfter( before_label=f"tpl-{in_template}", after_label="target", out_report="init_report.svg"), name="init_report") wf.connect([ (lowres_target, init_apply, [("out_file", "input_image")]), (res_tmpl, init_apply, [("out_file", "reference_image")]), (init_aff, init_apply, [("output_transform", "transforms")]), (init_apply, init_report, [("output_image", "after")]), (res_tmpl, init_report, [("out_file", "before")]), ]) if output_dir: ds_init_report = pe.Node( DataSink(base_directory=str(output_dir.parent)), name="ds_init_report") wf.connect(init_report, "out_report", ds_init_report, f"{output_dir.name}.@init_report") return wf
def preprocess(data_dir, subject, atlas_dir, output_dir): with tempfile.TemporaryDirectory() as temp_dir: if not os.path.exists( os.path.join(output_dir, subject, 'DWI_b0.nii.gz')): if os.path.exists(os.path.join(data_dir, subject, 'DWI_b0.nii.gz')): # reorient to MNI standard direction reorient = fsl.utils.Reorient2Std() reorient.inputs.in_file = os.path.join(data_dir, subject, 'DWI_b0.nii.gz') reorient.inputs.out_file = os.path.join( temp_dir, 'DWI_b0_reorient.nii.gz') res = reorient.run() # robust fov to remove neck and lower head automatically rf = fsl.utils.RobustFOV() rf.inputs.in_file = os.path.join(temp_dir, 'DWI_b0_reorient.nii.gz') rf.inputs.out_roi = os.path.join(temp_dir, 'DWI_b0_RF.nii.gz') res = rf.run() # skull stripping first run btr1 = fsl.BET() btr1.inputs.in_file = os.path.join(temp_dir, 'DWI_b0_RF.nii.gz') btr1.inputs.robust = True btr1.inputs.frac = 0.5 btr1.inputs.out_file = os.path.join(temp_dir, 'BET_b0_first_run.nii.gz') res = btr1.run() print('BET pre-stripping...') # N4 bias field correction n4 = N4BiasFieldCorrection() n4.inputs.dimension = 3 n4.inputs.input_image = os.path.join( temp_dir, 'BET_b0_first_run.nii.gz') n4.inputs.bspline_fitting_distance = 300 n4.inputs.shrink_factor = 3 n4.inputs.n_iterations = [50, 50, 30, 20] n4.inputs.output_image = os.path.join( temp_dir, 'BET_b0_first_run_n4.nii.gz') res = n4.run() print('N4 Bias Field Correction running...') # registration of T2(DWI_b0) to MNI152 flt = fsl.FLIRT(bins=640, cost_func='mutualinfo', interp='spline', searchr_x=[-180, 180], searchr_y=[-180, 180], searchr_z=[-180, 180], dof=12) flt.inputs.in_file = os.path.join( temp_dir, 'BET_b0_first_run_n4.nii.gz') flt.inputs.reference = atlas_dir + '/mni152_downsample.nii.gz' flt.inputs.out_file = os.path.join( temp_dir, 'BET_b0_first_run_r.nii.gz') flt.inputs.out_matrix_file = os.path.join( output_dir, subject, 'B0_r_transform.mat') res = flt.run() print('FSL registration...') # second pass of BET skull stripping btr2 = fsl.BET() btr2.inputs.in_file = os.path.join( temp_dir, 'BET_b0_first_run_r.nii.gz') btr2.inputs.robust = True btr2.inputs.frac = 0.35 btr2.inputs.mask = True btr2.inputs.out_file = os.path.join(output_dir, subject, 'DWI_b0.nii.gz') res = btr2.run() print('BET skull stripping...') # copy mask file to output folder shutil.copy2( os.path.join(output_dir, subject, 'DWI_b0_mask.nii.gz'), os.path.join(temp_dir, 'DWI_b0_mask.nii.gz')) # z score normalization DWI_b0_path = os.path.join(output_dir, subject, 'DWI_b0.nii.gz') DWI_b0_final = nib.load(DWI_b0_path) DWI_b0_mask_path = os.path.join(temp_dir, 'DWI_b0_mask.nii.gz') mask = nib.load(DWI_b0_mask_path) DWI_b0_norm = zscore_normalize(DWI_b0_final, mask) nib.save(DWI_b0_norm, DWI_b0_path) print('.........................') print('patient %s registration done' % subject) else: pass else: pass
def correct_bias(in_file, out_file): correct = N4BiasFieldCorrection() correct.inputs.input_image = in_file correct.inputs.output_image = out_file done = correct.run() return done.outputs.output_image
def init_brain_extraction_wf( name="brain_extraction_wf", in_template="OASIS30ANTs", template_spec=None, use_float=True, normalization_quality="precise", omp_nthreads=None, mem_gb=3.0, bids_suffix="T1w", atropos_refine=True, atropos_use_random_seed=True, atropos_model=None, use_laplacian=True, bspline_fitting_distance=200, ): """ Build a workflow for atlas-based brain extraction on anatomical MRI data. This is a Nipype implementation of atlas-based brain extraction inspired by the official ANTs' ``antsBrainExtraction.sh`` workflow (only for 3D images). The workflow follows the following structure: 1. Step 1 performs several clerical tasks (preliminary INU correction, calculating the Laplacian of inputs, affine initialization) and the core spatial normalization. 2. Maps the brain mask into target space using the normalization calculated in 1. 3. Superstep 1b: binarization of the brain mask 4. Maps the WM (white matter) probability map from the template, if such prior exists. Combines the BS (brainstem) probability map before mapping if the WM and BS are given separately (as it is the case for ``OASIS30ANTs``.) 5. Run a second N4 INU correction round, using the prior mapped into individual step in step 4 if available. 6. Superstep 6: apply ATROPOS on the INU-corrected result of step 5, and massage its outputs 7. Superstep 7: use results from 4 to refine the brain mask 8. If exist, use priors from step 4, calculate the overlap of the posteriors estimated in step 4 to select that overlapping the most with the WM+BS prior from the template. Combine that posterior with the refined brain mask and pass it on to the next step. 9. Apply a final N4 using the refined brain mask (or the map calculated in step 8 if priors were found) as weights map for the algorithm. Workflow Graph .. workflow:: :graph2use: orig :simple_form: yes from niworkflows.anat.ants import init_brain_extraction_wf wf = init_brain_extraction_wf() Parameters ---------- in_template : str Name of the skull-stripping template ('OASIS30ANTs', 'NKI', or path). The brain template from which regions will be projected Anatomical template created using e.g. LPBA40 data set with ``buildtemplateparallel.sh`` in ANTs. The workflow will automatically search for a brain probability mask created using e.g. LPBA40 data set which have brain masks defined, and warped to anatomical template and averaged resulting in a probability image. use_float : bool Whether single precision should be used normalization_quality : str Use more precise or faster registration parameters (default: ``precise``, other possible values: ``testing``) omp_nthreads : int Maximum number of threads an individual process may use mem_gb : float Estimated peak memory consumption of the most hungry nodes in the workflow bids_suffix : str Sequence type of the first input image. For a list of acceptable values see https://bids-specification.readthedocs.io/en/latest/\ 04-modality-specific-files/01-magnetic-resonance-imaging-data.html#anatomy-imaging-data atropos_refine : bool Enables or disables the whole ATROPOS sub-workflow atropos_use_random_seed : bool Whether ATROPOS should generate a random seed based on the system's clock atropos_model : tuple or None Allows to specify a particular segmentation model, overwriting the defaults based on ``bids_suffix`` use_laplacian : bool Enables or disables alignment of the Laplacian as an additional criterion for image registration quality (default: True) bspline_fitting_distance : float The size of the b-spline mesh grid elements, in mm (default: 200) name : str, optional Workflow name (default: antsBrainExtraction) Inputs ------ in_files : list List of input anatomical images to be brain-extracted, typically T1-weighted. If a list of anatomical images is provided, subsequently specified images are used during the segmentation process. However, only the first image is used in the registration of priors. Our suggestion would be to specify the T1w as the first image. in_mask : list, optional Mask used for registration to limit the metric computation to a specific region. Outputs ------- out_file : str Skull-stripped and :abbr:`INU (intensity non-uniformity)`-corrected ``in_files`` out_mask : str Calculated brain mask bias_corrected : str The ``in_files`` input images, after :abbr:`INU (intensity non-uniformity)` correction, before skull-stripping. bias_image : str The :abbr:`INU (intensity non-uniformity)` field estimated for each input in ``in_files`` out_segm : str Output segmentation by ATROPOS out_tpms : str Output :abbr:`TPMs (tissue probability maps)` by ATROPOS """ from packaging.version import parse as parseversion, Version from templateflow.api import get as get_template wf = pe.Workflow(name) template_spec = template_spec or {} # suffix passed via spec takes precedence template_spec["suffix"] = template_spec.get("suffix", bids_suffix) tpl_target_path, common_spec = get_template_specs( in_template, template_spec=template_spec) # Get probabilistic brain mask if available tpl_mask_path = get_template( in_template, label="brain", suffix="probseg", ** common_spec) or get_template( in_template, desc="brain", suffix="mask", **common_spec) if omp_nthreads is None or omp_nthreads < 1: omp_nthreads = cpu_count() inputnode = pe.Node(niu.IdentityInterface(fields=["in_files", "in_mask"]), name="inputnode") # Try to find a registration mask, set if available tpl_regmask_path = get_template(in_template, desc="BrainCerebellumExtraction", suffix="mask", **common_spec) if tpl_regmask_path: inputnode.inputs.in_mask = str(tpl_regmask_path) outputnode = pe.Node( niu.IdentityInterface(fields=[ "out_file", "out_mask", "bias_corrected", "bias_image", "out_segm", "out_tpms", ]), name="outputnode", ) trunc = pe.MapNode( ImageMath(operation="TruncateImageIntensity", op2="0.01 0.999 256", copy_header=True), name="truncate_images", iterfield=["op1"], ) inu_n4 = pe.MapNode( N4BiasFieldCorrection( dimension=3, save_bias=False, copy_header=True, n_iterations=[50] * 4, convergence_threshold=1e-7, shrink_factor=4, bspline_fitting_distance=bspline_fitting_distance, ), n_procs=omp_nthreads, name="inu_n4", iterfield=["input_image"], ) res_tmpl = pe.Node( RegridToZooms(in_file=tpl_target_path, zooms=(4, 4, 4), smooth=True), name="res_tmpl", ) res_target = pe.Node(RegridToZooms(zooms=(4, 4, 4), smooth=True), name="res_target") lap_tmpl = pe.Node(ImageMath(operation="Laplacian", op2="1.5 1", copy_header=True), name="lap_tmpl") lap_tmpl.inputs.op1 = tpl_target_path lap_target = pe.Node( ImageMath(operation="Laplacian", op2="1.5 1", copy_header=True), name="lap_target", ) mrg_tmpl = pe.Node(niu.Merge(2), name="mrg_tmpl") mrg_tmpl.inputs.in1 = tpl_target_path mrg_target = pe.Node(niu.Merge(2), name="mrg_target") # Initialize transforms with antsAI init_aff = pe.Node( AI( metric=("Mattes", 32, "Regular", 0.25), transform=("Affine", 0.1), search_factor=(15, 0.1), principal_axes=False, convergence=(10, 1e-6, 10), verbose=True, ), name="init_aff", n_procs=omp_nthreads, ) # Tolerate missing ANTs at construction time try: init_aff.inputs.search_grid = (40, (0, 40, 40)) except ValueError: warn("antsAI's option --search-grid was added in ANTS 2.3.0 " f"({init_aff.interface.version} found.)") # Set up spatial normalization settings_file = ("antsBrainExtraction_%s.json" if use_laplacian else "antsBrainExtractionNoLaplacian_%s.json") norm = pe.Node( Registration(from_file=pkgr_fn("niworkflows.data", settings_file % normalization_quality)), name="norm", n_procs=omp_nthreads, mem_gb=mem_gb, ) norm.inputs.float = use_float fixed_mask_trait = "fixed_image_mask" if norm.interface.version and parseversion( norm.interface.version) >= Version("2.2.0"): fixed_mask_trait += "s" map_brainmask = pe.Node( ApplyTransforms(interpolation="Gaussian"), name="map_brainmask", mem_gb=1, ) map_brainmask.inputs.input_image = str(tpl_mask_path) thr_brainmask = pe.Node( ThresholdImage( dimension=3, th_low=0.5, th_high=1.0, inside_value=1, outside_value=0, copy_header=True, ), name="thr_brainmask", ) # Refine INU correction inu_n4_final = pe.MapNode( N4BiasFieldCorrection( dimension=3, save_bias=True, copy_header=True, n_iterations=[50] * 5, convergence_threshold=1e-7, shrink_factor=4, bspline_fitting_distance=bspline_fitting_distance, ), n_procs=omp_nthreads, name="inu_n4_final", iterfield=["input_image"], ) try: inu_n4_final.inputs.rescale_intensities = True except ValueError: warn( "N4BiasFieldCorrection's --rescale-intensities option was added in ANTS 2.1.0 " f"({inu_n4_final.interface.version} found.) Please consider upgrading.", UserWarning, ) # Apply mask apply_mask = pe.MapNode(ApplyMask(), iterfield=["in_file"], name="apply_mask") # fmt: off wf.connect([ (inputnode, trunc, [("in_files", "op1")]), (inputnode, inu_n4_final, [("in_files", "input_image")]), (inputnode, init_aff, [("in_mask", "fixed_image_mask")]), (inputnode, norm, [("in_mask", fixed_mask_trait)]), (inputnode, map_brainmask, [(("in_files", _pop), "reference_image")]), (trunc, inu_n4, [("output_image", "input_image")]), (inu_n4, res_target, [(("output_image", _pop), "in_file")]), (res_tmpl, init_aff, [("out_file", "fixed_image")]), (res_target, init_aff, [("out_file", "moving_image")]), (init_aff, norm, [("output_transform", "initial_moving_transform")]), (norm, map_brainmask, [ ("reverse_transforms", "transforms"), ("reverse_invert_flags", "invert_transform_flags"), ]), (map_brainmask, thr_brainmask, [("output_image", "input_image")]), (map_brainmask, inu_n4_final, [("output_image", "weight_image")]), (inu_n4_final, apply_mask, [("output_image", "in_file")]), (thr_brainmask, apply_mask, [("output_image", "in_mask")]), (thr_brainmask, outputnode, [("output_image", "out_mask")]), (inu_n4_final, outputnode, [("output_image", "bias_corrected"), ("bias_image", "bias_image")]), (apply_mask, outputnode, [("out_file", "out_file")]), ]) # fmt: on wm_tpm = (get_template( in_template, label="WM", suffix="probseg", **common_spec) or None) if wm_tpm: map_wmmask = pe.Node( ApplyTransforms(interpolation="Gaussian"), name="map_wmmask", mem_gb=1, ) # Add the brain stem if it is found. bstem_tpm = (get_template( in_template, label="BS", suffix="probseg", **common_spec) or None) if bstem_tpm: full_wm = pe.Node(niu.Function(function=_imsum), name="full_wm") full_wm.inputs.op1 = str(wm_tpm) full_wm.inputs.op2 = str(bstem_tpm) # fmt: off wf.connect([(full_wm, map_wmmask, [("out", "input_image")])]) # fmt: on else: map_wmmask.inputs.input_image = str(wm_tpm) # fmt: off wf.disconnect([ (map_brainmask, inu_n4_final, [("output_image", "weight_image")]), ]) wf.connect([ (inputnode, map_wmmask, [(("in_files", _pop), "reference_image")]), (norm, map_wmmask, [ ("reverse_transforms", "transforms"), ("reverse_invert_flags", "invert_transform_flags"), ]), (map_wmmask, inu_n4_final, [("output_image", "weight_image")]), ]) # fmt: on if use_laplacian: lap_tmpl = pe.Node( ImageMath(operation="Laplacian", op2="1.5 1", copy_header=True), name="lap_tmpl", ) lap_tmpl.inputs.op1 = tpl_target_path lap_target = pe.Node( ImageMath(operation="Laplacian", op2="1.5 1", copy_header=True), name="lap_target", ) mrg_tmpl = pe.Node(niu.Merge(2), name="mrg_tmpl") mrg_tmpl.inputs.in1 = tpl_target_path mrg_target = pe.Node(niu.Merge(2), name="mrg_target") # fmt: off wf.connect([ (inu_n4, lap_target, [(("output_image", _pop), "op1")]), (lap_tmpl, mrg_tmpl, [("output_image", "in2")]), (inu_n4, mrg_target, [("output_image", "in1")]), (lap_target, mrg_target, [("output_image", "in2")]), (mrg_tmpl, norm, [("out", "fixed_image")]), (mrg_target, norm, [("out", "moving_image")]), ]) # fmt: on else: norm.inputs.fixed_image = tpl_target_path # fmt: off wf.connect([ (inu_n4, norm, [(("output_image", _pop), "moving_image")]), ]) # fmt: on if atropos_refine: atropos_model = atropos_model or list( ATROPOS_MODELS[bids_suffix].values()) atropos_wf = init_atropos_wf( use_random_seed=atropos_use_random_seed, omp_nthreads=omp_nthreads, mem_gb=mem_gb, in_segmentation_model=atropos_model, bspline_fitting_distance=bspline_fitting_distance, wm_prior=bool(wm_tpm), ) # fmt: off wf.disconnect([ (thr_brainmask, outputnode, [("output_image", "out_mask")]), (inu_n4_final, outputnode, [("output_image", "bias_corrected"), ("bias_image", "bias_image")]), (apply_mask, outputnode, [("out_file", "out_file")]), ]) wf.connect([ (inputnode, atropos_wf, [("in_files", "inputnode.in_files")]), (inu_n4_final, atropos_wf, [("output_image", "inputnode.in_corrected")]), (thr_brainmask, atropos_wf, [("output_image", "inputnode.in_mask") ]), (atropos_wf, outputnode, [ ("outputnode.out_file", "out_file"), ("outputnode.bias_corrected", "bias_corrected"), ("outputnode.bias_image", "bias_image"), ("outputnode.out_mask", "out_mask"), ("outputnode.out_segm", "out_segm"), ("outputnode.out_tpms", "out_tpms"), ]), ]) # fmt: on if wm_tpm: # fmt: off wf.connect([ (map_wmmask, atropos_wf, [("output_image", "inputnode.wm_prior")]), ]) # fmt: on return wf
def main(): parser = argparse.ArgumentParser() parser.add_argument("t1", help="Input T1 file", type=str) parser.add_argument("bspline", help="Bspline distance for N4", type=int) parser.add_argument("niter", help="Number of iterations, will be multipled" "by [niter]*5", type=int) args = parser.parse_args() t1 = args.t1 bspline = args.bspline niter = args.niter # Standard input workdir = os.getcwd() t1 = os.path.join(workdir, t1) # Initialize workflow wf = Workflow(name="bias_field") wf.base_dir = os.getcwd() # Set up input node of list type input_node = pe.Node(niu.IdentityInterface(fields=['in_files']), name='inputnode') input_node.inputs.in_files = [t1] # Set up input node of value type single_file_buf = pe.Node(niu.IdentityInterface(fields=['input_file']), name='inputfile') single_file_buf.inputs.input_file = t1 # Initial skullstrip ants_wf = init_brain_extraction_wf(in_template='OASIS30ANTs', atropos_use_random_seed=False, normalization_quality='precise') # Apply N4 bias field correction # Iterate over: # bspline fitting distances # number of iterations n4 = pe.Node(N4BiasFieldCorrection(dimension=3, save_bias=True, copy_header=True, convergence_threshold=1e-7, shrink_factor=4, bspline_fitting_distance=bspline, n_iterations=[niter] * 5), name='n4') outputnode = pe.Node( niu.IdentityInterface(fields=['corrected_t1', 'orig_t1']), name='out') datasink = pe.Node(DataSink(base_directory=workdir, container='n4_wf'), name='sink') datasink.inputs.substitutions = [('_bspline_fitting_distance_', 'bspline-'), ('n_iterations_', 'niter-')] wf.connect([[input_node, ants_wf, [('in_files', 'inputnode.in_files')]], [single_file_buf, n4, [('input_file', 'input_image')]], [ants_wf, n4, [('outputnode.out_mask', 'mask_image')]], [n4, outputnode, [('output_image', 'corrected_t1')]], [single_file_buf, outputnode, [('input_file', 'orig_t1')]], [ outputnode, datasink, [('corrected_t1', 'corrected_img.@corrected'), ('orig_t1', 'corrected_img.@orig')] ]]) wf.run()
def init_infant_brain_extraction_wf( age_months=None, ants_affine_init=False, bspline_fitting_distance=200, sloppy=False, skull_strip_template="UNCInfant", template_specs=None, interim_checkpoints=True, mem_gb=3.0, mri_scheme="T1w", name="infant_brain_extraction_wf", atropos_model=None, omp_nthreads=None, output_dir=None, use_float=True, use_t2w=False, ): """ Build an atlas-based brain extraction pipeline for infant T1w/T2w MRI data. Pros/Cons of available templates -------------------------------- * MNIInfant + More cohorts available for finer-grain control + T1w/T2w images available - Template masks are poor * UNCInfant + Accurate masks - No T2w image available Parameters ---------- ants_affine_init : :obj:`bool`, optional Set-up a pre-initialization step with ``antsAI`` to account for mis-oriented images. """ # handle template specifics template_specs = template_specs or {} if skull_strip_template == 'MNIInfant': template_specs['resolution'] = 2 if sloppy else 1 if not template_specs.get('cohort'): if age_months is None: raise KeyError( f"Age or cohort for {skull_strip_template} must be provided!") template_specs['cohort'] = cohort_by_months(skull_strip_template, age_months) inputnode = pe.Node( niu.IdentityInterface(fields=["t1w", "t2w", "in_mask"]), name="inputnode") outputnode = pe.Node(niu.IdentityInterface( fields=["t1w_corrected", "t1w_corrected_brain", "t1w_mask"]), name="outputnode") if not use_t2w: raise RuntimeError("A T2w image is currently required.") tpl_target_path = get_template( skull_strip_template, suffix='T1w', # no T2w template desc=None, **template_specs, ) if not tpl_target_path: raise RuntimeError( f"An instance of template <tpl-{skull_strip_template}> with MR scheme " f"'{'T1w' or mri_scheme}' could not be found.") tpl_brainmask_path = get_template(skull_strip_template, label="brain", suffix="probseg", **template_specs) or get_template( skull_strip_template, desc="brain", suffix="mask", **template_specs) tpl_regmask_path = get_template(skull_strip_template, label="BrainCerebellumExtraction", suffix="mask", **template_specs) # validate images val_tmpl = pe.Node(ValidateImage(), name='val_tmpl') val_t1w = val_tmpl.clone("val_t1w") val_t2w = val_tmpl.clone("val_t2w") val_tmpl.inputs.in_file = _pop(tpl_target_path) gauss_tmpl = pe.Node(niu.Function(function=_gauss_filter), name="gauss_tmpl") # Spatial normalization step lap_tmpl = pe.Node(ImageMath(operation="Laplacian", op2="0.4 1"), name="lap_tmpl") lap_t1w = lap_tmpl.clone("lap_t1w") lap_t2w = lap_tmpl.clone("lap_t2w") # Merge image nodes mrg_tmpl = pe.Node(niu.Merge(2), name="mrg_tmpl") mrg_t2w = mrg_tmpl.clone("mrg_t2w") mrg_t1w = mrg_tmpl.clone("mrg_t1w") norm_lap_tmpl = pe.Node(niu.Function(function=_trunc), name="norm_lap_tmpl") norm_lap_tmpl.inputs.dtype = "float32" norm_lap_tmpl.inputs.out_max = 1.0 norm_lap_tmpl.inputs.percentile = (0.01, 99.99) norm_lap_tmpl.inputs.clip_max = None norm_lap_t1w = norm_lap_tmpl.clone('norm_lap_t1w') norm_lap_t2w = norm_lap_t1w.clone('norm_lap_t2w') # Set up initial spatial normalization ants_params = "testing" if sloppy else "precise" norm = pe.Node( Registration(from_file=pkgr_fn( "niworkflows.data", f"antsBrainExtraction_{ants_params}.json")), name="norm", n_procs=omp_nthreads, mem_gb=mem_gb, ) norm.inputs.float = use_float if tpl_regmask_path: norm.inputs.fixed_image_masks = tpl_regmask_path # Set up T2w -> T1w within-subject registration norm_subj = pe.Node( Registration( from_file=pkgr_fn("nibabies.data", "within_subject_t1t2.json")), name="norm_subj", n_procs=omp_nthreads, mem_gb=mem_gb, ) norm_subj.inputs.float = use_float # main workflow wf = pe.Workflow(name) # Create a buffer interface as a cache for the actual inputs to registration buffernode = pe.Node( niu.IdentityInterface(fields=["hires_target", "smooth_target"]), name="buffernode") # truncate target intensity for N4 correction clip_tmpl = pe.Node(niu.Function(function=_trunc), name="clip_tmpl") clip_t2w = clip_tmpl.clone('clip_t2w') clip_t1w = clip_tmpl.clone('clip_t1w') # INU correction of the t1w init_t2w_n4 = pe.Node( N4BiasFieldCorrection( dimension=3, save_bias=False, copy_header=True, n_iterations=[50] * (4 - sloppy), convergence_threshold=1e-7, shrink_factor=4, bspline_fitting_distance=bspline_fitting_distance, ), n_procs=omp_nthreads, name="init_t2w_n4", ) init_t1w_n4 = init_t2w_n4.clone("init_t1w_n4") clip_t2w_inu = pe.Node(niu.Function(function=_trunc), name="clip_t2w_inu") clip_t1w_inu = clip_t2w_inu.clone("clip_t1w_inu") map_mask_t2w = pe.Node(ApplyTransforms(interpolation="Gaussian", float=True), name="map_mask_t2w", mem_gb=1) map_mask_t1w = map_mask_t2w.clone("map_mask_t1w") # map template brainmask to t2w space map_mask_t2w.inputs.input_image = str(tpl_brainmask_path) thr_t2w_mask = pe.Node(Binarize(thresh_low=0.80), name="thr_t2w_mask") thr_t1w_mask = thr_t2w_mask.clone('thr_t1w_mask') bspline_grid = pe.Node(niu.Function(function=_bspline_distance), name="bspline_grid") # Refine INU correction final_n4 = pe.Node( N4BiasFieldCorrection( dimension=3, bspline_fitting_distance=bspline_fitting_distance, save_bias=True, copy_header=True, n_iterations=[50] * 5, convergence_threshold=1e-7, rescale_intensities=True, shrink_factor=4, ), n_procs=omp_nthreads, name="final_n4", ) final_mask = pe.Node(ApplyMask(), name="final_mask") if atropos_model is None: atropos_model = tuple(ATROPOS_MODELS[mri_scheme].values()) atropos_wf = init_atropos_wf( use_random_seed=False, omp_nthreads=omp_nthreads, mem_gb=mem_gb, in_segmentation_model=atropos_model, ) # if tpl_regmask_path: # atropos_wf.get_node('inputnode').inputs.in_mask_dilated = tpl_regmask_path sel_wm = pe.Node(niu.Select(index=atropos_model[-1] - 1), name='sel_wm', run_without_submitting=True) wf.connect([ # 1. massage template (val_tmpl, clip_tmpl, [("out_file", "in_file")]), (clip_tmpl, lap_tmpl, [("out", "op1")]), (clip_tmpl, mrg_tmpl, [("out", "in1")]), (lap_tmpl, norm_lap_tmpl, [("output_image", "in_file")]), (norm_lap_tmpl, mrg_tmpl, [("out", "in2")]), # 2. massage T2w (inputnode, val_t2w, [('t2w', 'in_file')]), (val_t2w, clip_t2w, [('out_file', 'in_file')]), (clip_t2w, init_t2w_n4, [('out', 'input_image')]), (init_t2w_n4, clip_t2w_inu, [("output_image", "in_file")]), (clip_t2w_inu, lap_t2w, [('out', 'op1')]), (clip_t2w_inu, mrg_t2w, [('out', 'in1')]), (lap_t2w, norm_lap_t2w, [("output_image", "in_file")]), (norm_lap_t2w, mrg_t2w, [("out", "in2")]), # 3. normalize T2w to target template (UNC) (mrg_t2w, norm, [("out", "moving_image")]), (mrg_tmpl, norm, [("out", "fixed_image")]), # 4. map template brainmask to T2w space (inputnode, map_mask_t2w, [('t2w', 'reference_image')]), (norm, map_mask_t2w, [("reverse_transforms", "transforms"), ("reverse_invert_flags", "invert_transform_flags")]), (map_mask_t2w, thr_t2w_mask, [("output_image", "in_file")]), # 5. massage T1w (inputnode, val_t1w, [("t1w", "in_file")]), (val_t1w, clip_t1w, [("out_file", "in_file")]), (clip_t1w, init_t1w_n4, [("out", "input_image")]), (init_t1w_n4, clip_t1w_inu, [("output_image", "in_file")]), (clip_t1w_inu, lap_t1w, [('out', 'op1')]), (clip_t1w_inu, mrg_t1w, [('out', 'in1')]), (lap_t1w, norm_lap_t1w, [("output_image", "in_file")]), (norm_lap_t1w, mrg_t1w, [("out", "in2")]), # 6. normalize within subject T1w to T2w (mrg_t1w, norm_subj, [("out", "moving_image")]), (mrg_t2w, norm_subj, [("out", "fixed_image")]), (thr_t2w_mask, norm_subj, [("out_mask", "fixed_image_mask")]), # 7. map mask to T1w space (thr_t2w_mask, map_mask_t1w, [("out_mask", "input_image")]), (inputnode, map_mask_t1w, [("t1w", "reference_image")]), (norm_subj, map_mask_t1w, [ ("reverse_transforms", "transforms"), ("reverse_invert_flags", "invert_transform_flags"), ]), (map_mask_t1w, thr_t1w_mask, [("output_image", "in_file")]), # 8. T1w INU (inputnode, final_n4, [("t1w", "input_image")]), (inputnode, bspline_grid, [("t1w", "in_file")]), (bspline_grid, final_n4, [("out", "args")]), (map_mask_t1w, final_n4, [("output_image", "weight_image")]), (final_n4, final_mask, [("output_image", "in_file")]), (thr_t1w_mask, final_mask, [("out_mask", "in_mask")]), # 9. Outputs (final_n4, outputnode, [("output_image", "t1w_corrected")]), (thr_t1w_mask, outputnode, [("out_mask", "t1w_mask")]), (final_mask, outputnode, [("out_file", "t1w_corrected_brain")]), ]) if ants_affine_init: ants_kwargs = dict( metric=("Mattes", 32, "Regular", 0.2), transform=("Affine", 0.1), search_factor=(20, 0.12), principal_axes=False, convergence=(10, 1e-6, 10), search_grid=(40, (0, 40, 40)), verbose=True, ) if ants_affine_init == 'random': ants_kwargs['metric'] = ("Mattes", 32, "Random", 0.2) if ants_affine_init == 'search': ants_kwargs['search_grid'] = (20, (20, 40, 40)) init_aff = pe.Node( AI(**ants_kwargs), name="init_aff", n_procs=omp_nthreads, ) if tpl_regmask_path: init_aff.inputs.fixed_image_mask = _pop(tpl_regmask_path) wf.connect([ (clip_tmpl, init_aff, [("out", "fixed_image")]), (clip_t2w_inu, init_aff, [("out", "moving_image")]), (init_aff, norm, [("output_transform", "initial_moving_transform") ]), ]) return wf
#register_template_to_cropped_axial = pe.Node(interface=fsl.FLIRT(), name = 'register_template_to_cropped_axial') #register_template_to_cropped_axial.inputs.in_file = '/data/dgutman/Dropbox/DOG_PROJECT/Erin_9dogtemplate.nii.gz' #dog_preproc_wf.connect(crop_axial_image,'roi_file', register_template_to_cropped_axial,'reference') ### before I apply the registered high dof mask... I actually need to threshold it... ### FSL will include ANY value that is > 0 in the mask and so there are so 1e-15 values so the mask is includnig extra stuff #threshold_template_mask = pe.Node(interface=fsl.ImageMaths(op_string=' -thr 1.0'), name="threshold_template_mask") #dog_preproc_wf.connect(highdof_register_template_to_cropped_axial,'out_file',threshold_template_mask,'in_file') ## I am debating if I should dilate the mask one value first... thoughts???... #dog_preproc_wf.connect(crop_axial_image,'roi_file',apply_template_mask,'in_file') #dog_preproc_wf.connect(threshold_template_mask,'out_file',apply_template_mask,'mask_file') axial_n4bias_node = pe.Node(interface=N4BiasFieldCorrection(), name='n4bias_node') axial_n4bias_node.inputs.dimension = 3 axial_n4bias_node.inputs.bspline_fitting_distance = 300 axial_n4bias_node.inputs.shrink_factor = 3 axial_n4bias_node.inputs.n_iterations = [50, 50, 30, 20] axial_n4bias_node.inputs.convergence_threshold = 1e-6 #dog_preproc_wf.connect(apply_template_mask,'out_file',axial_n4bias_node,'input_image') ## now I am going to apply the template masked image...which in this case is the 12 DOF warped image.. ### so this workflow is quite botched... the mask I am trying to apply isn't registered to the axial image ### I am going to crop the BET'ed axial images so I have a smaller working volume for registration #dog_preproc_wf.connect(threshold_template_mask,'out_file',apply_final_mask,'mask_file')
def init_brain_extraction_wf(name='brain_extraction_wf', in_template='OASIS30ANTs', template_spec=None, use_float=True, normalization_quality='precise', omp_nthreads=None, mem_gb=3.0, bids_suffix='T1w', atropos_refine=True, atropos_use_random_seed=True, atropos_model=None, use_laplacian=True, bspline_fitting_distance=200): """ A Nipype implementation of the official ANTs' ``antsBrainExtraction.sh`` workflow (only for 3D images). The official workflow is built as follows (and this implementation follows the same organization): 1. Step 1 performs several clerical tasks (adding padding, calculating the Laplacian of inputs, affine initialization) and the core spatial normalization. 2. Maps the brain mask into target space using the normalization calculated in 1. 3. Superstep 1b: smart binarization of the brain mask 4. Superstep 6: apply ATROPOS and massage its outputs 5. Superstep 7: use results from 4 to refine the brain mask .. workflow:: :graph2use: orig :simple_form: yes from niworkflows.anat import init_brain_extraction_wf wf = init_brain_extraction_wf() **Parameters** in_template : str Name of the skull-stripping template ('OASIS30ANTs', 'NKI', or path). The brain template from which regions will be projected Anatomical template created using e.g. LPBA40 data set with ``buildtemplateparallel.sh`` in ANTs. The workflow will automatically search for a brain probability mask created using e.g. LPBA40 data set which have brain masks defined, and warped to anatomical template and averaged resulting in a probability image. use_float : bool Whether single precision should be used normalization_quality : str Use more precise or faster registration parameters (default: ``precise``, other possible values: ``testing``) omp_nthreads : int Maximum number of threads an individual process may use mem_gb : float Estimated peak memory consumption of the most hungry nodes in the workflow bids_suffix : str Sequence type of the first input image. For a list of acceptable values see https://bids-specification.readthedocs.io/en/latest/\ 04-modality-specific-files/01-magnetic-resonance-imaging-data.html#anatomy-imaging-data atropos_refine : bool Enables or disables the whole ATROPOS sub-workflow atropos_use_random_seed : bool Whether ATROPOS should generate a random seed based on the system's clock atropos_model : tuple or None Allows to specify a particular segmentation model, overwriting the defaults based on ``bids_suffix`` use_laplacian : bool Enables or disables alignment of the Laplacian as an additional criterion for image registration quality (default: True) bspline_fitting_distance : float The size of the b-spline mesh grid elements, in mm (default: 200) name : str, optional Workflow name (default: antsBrainExtraction) **Inputs** in_files List of input anatomical images to be brain-extracted, typically T1-weighted. If a list of anatomical images is provided, subsequently specified images are used during the segmentation process. However, only the first image is used in the registration of priors. Our suggestion would be to specify the T1w as the first image. in_mask (optional) Mask used for registration to limit the metric computation to a specific region. **Outputs** out_file Skull-stripped and :abbr:`INU (intensity non-uniformity)`-corrected ``in_files`` out_mask Calculated brain mask bias_corrected The ``in_files`` input images, after :abbr:`INU (intensity non-uniformity)` correction, before skull-stripping. bias_image The :abbr:`INU (intensity non-uniformity)` field estimated for each input in ``in_files`` out_segm Output segmentation by ATROPOS out_tpms Output :abbr:`TPMs (tissue probability maps)` by ATROPOS """ from templateflow.api import get as get_template wf = pe.Workflow(name) template_spec = template_spec or {} # suffix passed via spec takes precedence template_spec['suffix'] = template_spec.get('suffix', bids_suffix) tpl_target_path, common_spec = get_template_specs( in_template, template_spec=template_spec) # Get probabilistic brain mask if available tpl_mask_path = get_template( in_template, label='brain', suffix='probseg', **common_spec) or \ get_template(in_template, desc='brain', suffix='mask', **common_spec) if omp_nthreads is None or omp_nthreads < 1: omp_nthreads = cpu_count() inputnode = pe.Node(niu.IdentityInterface(fields=['in_files', 'in_mask']), name='inputnode') # Try to find a registration mask, set if available tpl_regmask_path = get_template(in_template, desc='BrainCerebellumExtraction', suffix='mask', **common_spec) if tpl_regmask_path: inputnode.inputs.in_mask = str(tpl_regmask_path) outputnode = pe.Node(niu.IdentityInterface(fields=[ 'out_file', 'out_mask', 'bias_corrected', 'bias_image', 'out_segm', 'out_tpms' ]), name='outputnode') copy_xform = pe.Node(CopyXForm( fields=['out_file', 'out_mask', 'bias_corrected', 'bias_image']), name='copy_xform', run_without_submitting=True) trunc = pe.MapNode(ImageMath(operation='TruncateImageIntensity', op2='0.01 0.999 256'), name='truncate_images', iterfield=['op1']) inu_n4 = pe.MapNode(N4BiasFieldCorrection( dimension=3, save_bias=False, copy_header=True, n_iterations=[50] * 4, convergence_threshold=1e-7, shrink_factor=4, bspline_fitting_distance=bspline_fitting_distance), n_procs=omp_nthreads, name='inu_n4', iterfield=['input_image']) res_tmpl = pe.Node(ResampleImageBySpacing(out_spacing=(4, 4, 4), apply_smoothing=True), name='res_tmpl') res_tmpl.inputs.input_image = tpl_target_path res_target = pe.Node(ResampleImageBySpacing(out_spacing=(4, 4, 4), apply_smoothing=True), name='res_target') lap_tmpl = pe.Node(ImageMath(operation='Laplacian', op2='1.5 1'), name='lap_tmpl') lap_tmpl.inputs.op1 = tpl_target_path lap_target = pe.Node(ImageMath(operation='Laplacian', op2='1.5 1'), name='lap_target') mrg_tmpl = pe.Node(niu.Merge(2), name='mrg_tmpl') mrg_tmpl.inputs.in1 = tpl_target_path mrg_target = pe.Node(niu.Merge(2), name='mrg_target') # Initialize transforms with antsAI init_aff = pe.Node(AI(metric=('Mattes', 32, 'Regular', 0.25), transform=('Affine', 0.1), search_factor=(15, 0.1), principal_axes=False, convergence=(10, 1e-6, 10), verbose=True), name='init_aff', n_procs=omp_nthreads) # Tolerate missing ANTs at construction time _ants_version = Registration().version if _ants_version and parseversion(_ants_version) >= Version('2.3.0'): init_aff.inputs.search_grid = (40, (0, 40, 40)) # Set up spatial normalization settings_file = 'antsBrainExtraction_%s.json' if use_laplacian \ else 'antsBrainExtractionNoLaplacian_%s.json' norm = pe.Node( Registration(from_file=pkgr_fn('niworkflows.data', settings_file % normalization_quality)), name='norm', n_procs=omp_nthreads, mem_gb=mem_gb) norm.inputs.float = use_float fixed_mask_trait = 'fixed_image_mask' if _ants_version and parseversion(_ants_version) >= Version('2.2.0'): fixed_mask_trait += 's' map_brainmask = pe.Node(ApplyTransforms(interpolation='Gaussian', float=True), name='map_brainmask', mem_gb=1) map_brainmask.inputs.input_image = str(tpl_mask_path) thr_brainmask = pe.Node(ThresholdImage(dimension=3, th_low=0.5, th_high=1.0, inside_value=1, outside_value=0), name='thr_brainmask') # Morphological dilation, radius=2 dil_brainmask = pe.Node(ImageMath(operation='MD', op2='2'), name='dil_brainmask') # Get largest connected component get_brainmask = pe.Node(ImageMath(operation='GetLargestComponent'), name='get_brainmask') # Refine INU correction inu_n4_final = pe.MapNode(N4BiasFieldCorrection( dimension=3, save_bias=True, copy_header=True, n_iterations=[50] * 5, convergence_threshold=1e-7, shrink_factor=4, bspline_fitting_distance=bspline_fitting_distance), n_procs=omp_nthreads, name='inu_n4_final', iterfield=['input_image']) if _ants_version and parseversion(_ants_version) >= Version('2.1.0'): inu_n4_final.inputs.rescale_intensities = True else: warn( """\ Found ANTs version %s, which is too old. Please consider upgrading to 2.1.0 or \ greater so that the --rescale-intensities option is available with \ N4BiasFieldCorrection.""" % _ants_version, DeprecationWarning) # Apply mask apply_mask = pe.MapNode(ApplyMask(), iterfield=['in_file'], name='apply_mask') wf.connect([ (inputnode, trunc, [('in_files', 'op1')]), (inputnode, copy_xform, [(('in_files', _pop), 'hdr_file')]), (inputnode, inu_n4_final, [('in_files', 'input_image')]), (inputnode, init_aff, [('in_mask', 'fixed_image_mask')]), (inputnode, norm, [('in_mask', fixed_mask_trait)]), (inputnode, map_brainmask, [(('in_files', _pop), 'reference_image')]), (trunc, inu_n4, [('output_image', 'input_image')]), (inu_n4, res_target, [(('output_image', _pop), 'input_image')]), (res_tmpl, init_aff, [('output_image', 'fixed_image')]), (res_target, init_aff, [('output_image', 'moving_image')]), (init_aff, norm, [('output_transform', 'initial_moving_transform')]), (norm, map_brainmask, [('reverse_transforms', 'transforms'), ('reverse_invert_flags', 'invert_transform_flags')]), (map_brainmask, thr_brainmask, [('output_image', 'input_image')]), (thr_brainmask, dil_brainmask, [('output_image', 'op1')]), (dil_brainmask, get_brainmask, [('output_image', 'op1')]), (inu_n4_final, apply_mask, [('output_image', 'in_file')]), (get_brainmask, apply_mask, [('output_image', 'mask_file')]), (get_brainmask, copy_xform, [('output_image', 'out_mask')]), (apply_mask, copy_xform, [('out_file', 'out_file')]), (inu_n4_final, copy_xform, [('output_image', 'bias_corrected'), ('bias_image', 'bias_image')]), (copy_xform, outputnode, [('out_file', 'out_file'), ('out_mask', 'out_mask'), ('bias_corrected', 'bias_corrected'), ('bias_image', 'bias_image')]), ]) if use_laplacian: lap_tmpl = pe.Node(ImageMath(operation='Laplacian', op2='1.5 1'), name='lap_tmpl') lap_tmpl.inputs.op1 = tpl_target_path lap_target = pe.Node(ImageMath(operation='Laplacian', op2='1.5 1'), name='lap_target') mrg_tmpl = pe.Node(niu.Merge(2), name='mrg_tmpl') mrg_tmpl.inputs.in1 = tpl_target_path mrg_target = pe.Node(niu.Merge(2), name='mrg_target') wf.connect([ (inu_n4, lap_target, [(('output_image', _pop), 'op1')]), (lap_tmpl, mrg_tmpl, [('output_image', 'in2')]), (inu_n4, mrg_target, [('output_image', 'in1')]), (lap_target, mrg_target, [('output_image', 'in2')]), (mrg_tmpl, norm, [('out', 'fixed_image')]), (mrg_target, norm, [('out', 'moving_image')]), ]) else: norm.inputs.fixed_image = tpl_target_path wf.connect([ (inu_n4, norm, [(('output_image', _pop), 'moving_image')]), ]) if atropos_refine: atropos_model = atropos_model or list( ATROPOS_MODELS[bids_suffix].values()) atropos_wf = init_atropos_wf( use_random_seed=atropos_use_random_seed, omp_nthreads=omp_nthreads, mem_gb=mem_gb, in_segmentation_model=atropos_model, ) sel_wm = pe.Node(niu.Select(index=atropos_model[-1] - 1), name='sel_wm', run_without_submitting=True) wf.disconnect([ (get_brainmask, apply_mask, [('output_image', 'mask_file')]), (copy_xform, outputnode, [('out_mask', 'out_mask')]), ]) wf.connect([ (inu_n4, atropos_wf, [('output_image', 'inputnode.in_files')]), (thr_brainmask, atropos_wf, [('output_image', 'inputnode.in_mask') ]), (get_brainmask, atropos_wf, [('output_image', 'inputnode.in_mask_dilated')]), (atropos_wf, sel_wm, [('outputnode.out_tpms', 'inlist')]), (sel_wm, inu_n4_final, [('out', 'weight_image')]), (atropos_wf, apply_mask, [('outputnode.out_mask', 'mask_file')]), (atropos_wf, outputnode, [('outputnode.out_mask', 'out_mask'), ('outputnode.out_segm', 'out_segm'), ('outputnode.out_tpms', 'out_tpms')]), ]) return wf
def init_brain_extraction_wf(name='brain_extraction_wf', in_template='OASIS', use_float=True, normalization_quality='precise', omp_nthreads=None, mem_gb=3.0, modality='T1', atropos_refine=True, atropos_use_random_seed=True, atropos_model=None): """ A Nipype implementation of the official ANTs' ``antsBrainExtraction.sh`` workflow (only for 3D images). The official workflow is built as follows (and this implementation follows the same organization): 1. Step 1 performs several clerical tasks (adding padding, calculating the Laplacian of inputs, affine initialization) and the core spatial normalization. 2. Maps the brain mask into target space using the normalization calculated in 1. 3. Superstep 1b: smart binarization of the brain mask 4. Superstep 6: apply ATROPOS and massage its outputs 5. Superstep 7: use results from 4 to refine the brain mask .. workflow:: :graph2use: orig :simple_form: yes from niworkflows.anat import init_brain_extraction_wf wf = init_brain_extraction_wf() **Parameters** in_template : str Name of the skull-stripping template ('OASIS', 'NKI', or path). The brain template from which regions will be projected Anatomical template created using e.g. LPBA40 data set with ``buildtemplateparallel.sh`` in ANTs. The workflow will automatically search for a brain probability mask created using e.g. LPBA40 data set which have brain masks defined, and warped to anatomical template and averaged resulting in a probability image. use_float : bool Whether single precision should be used normalization_quality : str Use more precise or faster registration parameters (default: ``precise``, other possible values: ``testing``) omp_nthreads : int Maximum number of threads an individual process may use mem_gb : float Estimated peak memory consumption of the most hungry nodes in the workflow modality : str Sequence type of the first input image ('T1', 'T2', or 'FLAIR') atropos_refine : bool Enables or disables the whole ATROPOS sub-workflow atropos_use_random_seed : bool Whether ATROPOS should generate a random seed based on the system's clock atropos_model : tuple or None Allows to specify a particular segmentation model, overwriting the defaults based on ``modality`` name : str, optional Workflow name (default: antsBrainExtraction) **Inputs** in_files List of input anatomical images to be brain-extracted, typically T1-weighted. If a list of anatomical images is provided, subsequently specified images are used during the segmentation process. However, only the first image is used in the registration of priors. Our suggestion would be to specify the T1w as the first image. in_mask (optional) Mask used for registration to limit the metric computation to a specific region. **Outputs** bias_corrected The ``in_files`` input images, after :abbr:`INU (intensity non-uniformity)` correction. out_mask Calculated brain mask bias_image The :abbr:`INU (intensity non-uniformity)` field estimated for each input in ``in_files`` out_segm Output segmentation by ATROPOS out_tpms Output :abbr:`TPMs (tissue probability maps)` by ATROPOS """ wf = pe.Workflow(name) template_path = None if in_template in TEMPLATE_MAP: template_path = get_dataset(in_template) else: template_path = in_template mod = ('%sw' % modality[:2].upper() if modality.upper().startswith('T') else modality.upper()) # Append template modality potential_targets = list(Path(template_path).glob('*_%s.nii.gz' % mod)) if not potential_targets: raise ValueError('No %s template was found under "%s".' % (mod, template_path)) tpl_target_path = str(potential_targets[0]) target_basename = '_'.join(tpl_target_path.split('_')[:-1]) # Get probabilistic brain mask if available tpl_mask_path = '%s_class-brainmask_probtissue.nii.gz' % target_basename # Fall-back to a binary mask just in case if not os.path.exists(tpl_mask_path): tpl_mask_path = '%s_brainmask.nii.gz' % target_basename if not os.path.exists(tpl_mask_path): raise ValueError( 'Probability map for the brain mask associated to this template ' '"%s" not found.' % tpl_mask_path) if omp_nthreads is None or omp_nthreads < 1: omp_nthreads = cpu_count() inputnode = pe.Node(niu.IdentityInterface(fields=['in_files', 'in_mask']), name='inputnode') # Try to find a registration mask, set if available tpl_regmask_path = '%s_label-BrainCerebellumRegistration_roi.nii.gz' % target_basename if os.path.exists(tpl_regmask_path): inputnode.inputs.in_mask = tpl_regmask_path outputnode = pe.Node(niu.IdentityInterface( fields=['bias_corrected', 'out_mask', 'bias_image', 'out_segm']), name='outputnode') trunc = pe.MapNode(ImageMath(operation='TruncateImageIntensity', op2='0.01 0.999 256'), name='truncate_images', iterfield=['op1']) inu_n4 = pe.MapNode(N4BiasFieldCorrection(dimension=3, save_bias=True, copy_header=True, n_iterations=[50] * 4, convergence_threshold=1e-7, shrink_factor=4, bspline_fitting_distance=200), n_procs=omp_nthreads, name='inu_n4', iterfield=['input_image']) res_tmpl = pe.Node(ResampleImageBySpacing(out_spacing=(4, 4, 4), apply_smoothing=True), name='res_tmpl') res_tmpl.inputs.input_image = tpl_target_path res_target = pe.Node(ResampleImageBySpacing(out_spacing=(4, 4, 4), apply_smoothing=True), name='res_target') lap_tmpl = pe.Node(ImageMath(operation='Laplacian', op2='1.5 1'), name='lap_tmpl') lap_tmpl.inputs.op1 = tpl_target_path lap_target = pe.Node(ImageMath(operation='Laplacian', op2='1.5 1'), name='lap_target') mrg_tmpl = pe.Node(niu.Merge(2), name='mrg_tmpl') mrg_tmpl.inputs.in1 = tpl_target_path mrg_target = pe.Node(niu.Merge(2), name='mrg_target') # Initialize transforms with antsAI init_aff = pe.Node(AI(metric=('Mattes', 32, 'Regular', 0.2), transform=('Affine', 0.1), search_factor=(20, 0.12), principal_axes=False, convergence=(10, 1e-6, 10), verbose=True), name='init_aff', n_procs=omp_nthreads) if parseversion(Registration().version) > Version('2.2.0'): init_aff.inputs.search_grid = (40, (0, 40, 40)) # Set up spatial normalization norm = pe.Node(Registration(from_file=pkgr_fn( 'niworkflows.data', 'antsBrainExtraction_%s.json' % normalization_quality)), name='norm', n_procs=omp_nthreads, mem_gb=mem_gb) norm.inputs.float = use_float fixed_mask_trait = 'fixed_image_mask' if parseversion(Registration().version) >= Version('2.2.0'): fixed_mask_trait += 's' map_brainmask = pe.Node(ApplyTransforms(interpolation='Gaussian', float=True), name='map_brainmask', mem_gb=1) map_brainmask.inputs.input_image = tpl_mask_path thr_brainmask = pe.Node(ThresholdImage(dimension=3, th_low=0.5, th_high=1.0, inside_value=1, outside_value=0), name='thr_brainmask') # Morphological dilation, radius=2 dil_brainmask = pe.Node(ImageMath(operation='MD', op2='2'), name='dil_brainmask') # Get largest connected component get_brainmask = pe.Node(ImageMath(operation='GetLargestComponent'), name='get_brainmask') # Apply mask apply_mask = pe.MapNode(ApplyMask(), iterfield=['in_file'], name='apply_mask') wf.connect([ (inputnode, trunc, [('in_files', 'op1')]), (inputnode, init_aff, [('in_mask', 'fixed_image_mask')]), (inputnode, norm, [('in_mask', fixed_mask_trait)]), (inputnode, map_brainmask, [(('in_files', _pop), 'reference_image')]), (trunc, inu_n4, [('output_image', 'input_image')]), (inu_n4, res_target, [(('output_image', _pop), 'input_image')]), (inu_n4, lap_target, [(('output_image', _pop), 'op1')]), (res_tmpl, init_aff, [('output_image', 'fixed_image')]), (res_target, init_aff, [('output_image', 'moving_image')]), (inu_n4, mrg_target, [('output_image', 'in1')]), (lap_tmpl, mrg_tmpl, [('output_image', 'in2')]), (lap_target, mrg_target, [('output_image', 'in2')]), (init_aff, norm, [('output_transform', 'initial_moving_transform')]), (mrg_tmpl, norm, [('out', 'fixed_image')]), (mrg_target, norm, [('out', 'moving_image')]), (norm, map_brainmask, [('reverse_invert_flags', 'invert_transform_flags'), ('reverse_transforms', 'transforms')]), (map_brainmask, thr_brainmask, [('output_image', 'input_image')]), (thr_brainmask, dil_brainmask, [('output_image', 'op1')]), (dil_brainmask, get_brainmask, [('output_image', 'op1')]), (inu_n4, apply_mask, [('output_image', 'in_file')]), (get_brainmask, apply_mask, [('output_image', 'mask_file')]), (get_brainmask, outputnode, [('output_image', 'out_mask')]), (apply_mask, outputnode, [('out_file', 'bias_corrected')]), (inu_n4, outputnode, [('bias_image', 'bias_image')]), ]) if atropos_refine: atropos_wf = init_atropos_wf( use_random_seed=atropos_use_random_seed, omp_nthreads=omp_nthreads, mem_gb=mem_gb, in_segmentation_model=atropos_model or list(ATROPOS_MODELS[modality].values())) wf.disconnect([ (get_brainmask, outputnode, [('output_image', 'out_mask')]), (get_brainmask, apply_mask, [('output_image', 'mask_file')]), ]) wf.connect([ (inu_n4, atropos_wf, [('output_image', 'inputnode.in_files')]), (get_brainmask, atropos_wf, [('output_image', 'inputnode.in_mask') ]), (atropos_wf, outputnode, [('outputnode.out_mask', 'out_mask')]), (atropos_wf, apply_mask, [('outputnode.out_mask', 'mask_file')]), (atropos_wf, outputnode, [('outputnode.out_segm', 'out_segm'), ('outputnode.out_tpms', 'out_tpms')]) ]) return wf
def skullstrip_flash(input, path_output, name_output, min_val=100, max_val=1000, flood_fill=False, cleanup=False): """ This function computes a brain mask for a partial coverage T2*-weighted anatomical image. The mask is used to remove the sagittal sinus during segmentation. Thus, the latest echo should be used here to get the largest difference between venous and tissue compartments. The brain mask is generated by a simple thresholding operation. An intenstiy gradient in posterior-anterior direction is considered by thresholding all slices independently. The threshold value is computed from the intensity histogram of each slice by getting the minimum of the histogram within a predefined range. Inputs: *input: input path of T2*-weighted anatomy. *path_output: path where output is saved. *name_output: basename of output file. *min_val: minimum threshold of intensity histogram. *max_val: maximum threshold of intenstiy histogram. *flood_fill: apply flood filling of binary mask (boolean). *cleanup: delete intermediate files (boolean). created by Daniel Haenelt Date created: 05-05-2019 Last modified: 05-05-2019 """ import os import numpy as np import nibabel as nb from nipype.interfaces.ants import N4BiasFieldCorrection from scipy.signal import argrelextrema from scipy.ndimage.morphology import binary_fill_holes # prepare path and filename path = os.path.dirname(input) file = os.path.splitext(os.path.basename(input))[0] # bias field correction n4 = N4BiasFieldCorrection() n4.inputs.dimension = 3 n4.inputs.input_image = os.path.join(path, file + ".nii") n4.inputs.bias_image = os.path.join(path, "n4bias.nii") n4.inputs.output_image = os.path.join(path, "b" + file + ".nii") n4.run() # load input data mask = nb.load(os.path.join(path, "b" + file + ".nii")) mask_array = mask.get_fdata() # loop through slices for i in range(np.shape(mask_array)[2]): # load slice temp = np.reshape(mask_array[:, :, i], np.size(mask_array[:, :, i])) # get histogram bin, edge = np.histogram(temp, 100) edge = edge[:-1] # find local minimum within defined range bin_min = argrelextrema(bin, np.less) edge_min = edge[bin_min] edge_min[edge_min < min_val] = 0 edge_min[edge_min > max_val] = 0 edge_min[edge_min == 0] = np.nan edge_min = np.nanmin(edge_min) # mask image mask_array[:, :, i][mask_array[:, :, i] < edge_min] = 0 mask_array[:, :, i][mask_array[:, :, i] != 0] = 1 # flood filling on brain mask if flood_fill: mask_array = binary_fill_holes(mask_array, structure=np.ones((2, 2, 2))) # write output output = nb.Nifti1Image(mask_array, mask.affine, mask.header) nb.save(output, os.path.join(path_output, name_output + "_mask.nii")) # clean intermediate files if cleanup: os.remove(os.path.join(path, "n4bias.nii")) os.remove(os.path.join(path, "b" + file + ".nii"))
def sdc_t2b(name='SDC_T2B', icorr=True, num_threads=1): """ The T2w-registration based method (T2B) implements an SDC by nonlinear registration of the anatomically correct *T2w* image to the *b0* image of the *dMRI* dataset. The implementation here tries to reproduce the one included in ExploreDTI `(Leemans et al., 2009) <http://www.exploredti.com/ref/ExploreDTI_ISMRM_2009.pdf>`_, which is also used by `(Irfanoglu et al., 2012) <http://dx.doi.org/10.1016/j.neuroimage.2012.02.054>`_. :param str name: a unique name for the workflow. :inputs: * in_t2w: the reference T2w image :outputs: * outputnode.corrected_image: the dMRI image after correction Example:: >>> t2b = sdc_t2b() >>> t2b.inputs.inputnode.in_dwi = 'dwi_brain.nii' >>> t2b.inputs.inputnode.in_bval = 'dwi.bval' >>> t2b.inputs.inputnode.in_mask = 'b0_mask.nii' >>> t2b.inputs.inputnode.in_t2w = 't2w_brain.nii' >>> t2b.inputs.inputnode.in_param = 'parameters.txt' >>> t2b.run() # doctest: +SKIP """ inputnode = pe.Node(niu.IdentityInterface( fields=['in_dwi', 'in_bval', 'in_t2w', 'dwi_mask', 't2w_mask', 'in_param', 'in_surf']), name='inputnode') outputnode = pe.Node(niu.IdentityInterface( fields=['dwi', 'dwi_mask', 'out_surf']), name='outputnode') avg_b0 = pe.Node(niu.Function( input_names=['in_dwi', 'in_bval'], output_names=['out_file'], function=b0_average), name='AverageB0') n4_b0 = pe.Node(N4BiasFieldCorrection(dimension=3), name='BiasB0') n4_t2 = pe.Node(N4BiasFieldCorrection(dimension=3), name='BiasT2') getparam = pe.Node(nio.JSONFileGrabber(defaults={'enc_dir': 'y'}), name='GetEncDir') reg = pe.Node(nex.Registration(num_threads=1), name='Elastix') tfx_b0 = pe.Node(nex.EditTransform(), name='tfm_b0') split_dwi = pe.Node(fsl.utils.Split(dimension='t'), name='split_dwi') warp = pe.MapNode(nex.ApplyWarp(), iterfield=['moving_image'], name='UnwarpDWIs') warp_prop = pe.Node(nex.AnalyzeWarp(), name='DisplFieldAnalysis') warpbuff = pe.Node(niu.IdentityInterface(fields=['unwarped']), name='UnwarpedCache') mskdwis = pe.MapNode(fs.ApplyMask(), iterfield='in_file', name='MaskDWIs') thres = pe.MapNode(Threshold(thresh=0.0), iterfield=['in_file'], name='RemoveNegs') merge_dwi = pe.Node(fsl.utils.Merge(dimension='t'), name='merge_dwis') tfx_msk = pe.Node(nex.EditTransform( interpolation='nearest', output_type='unsigned char'), name='MSKInterpolator') corr_msk = pe.Node(nex.ApplyWarp(), name='UnwarpMsk') closmsk = pe.Node(fsl.maths.MathsCommand( nan2zeros=True, args='-kernel sphere 3 -dilM -kernel sphere 2 -ero'), name='MaskClosing') swarp = pe.MapNode(nex.PointsWarp(), iterfield=['points_file'], name='UnwarpSurfs') wf = pe.Workflow(name=name) wf.connect([ (inputnode, avg_b0, [('in_dwi', 'in_dwi'), ('in_bval', 'in_bval')]), (inputnode, getparam, [('in_param', 'in_file')]), (inputnode, split_dwi, [('in_dwi', 'in_file')]), (inputnode, corr_msk, [('dwi_mask', 'moving_image')]), (inputnode, swarp, [('in_surf', 'points_file')]), (inputnode, reg, [('t2w_mask', 'fixed_mask'), ('dwi_mask', 'moving_mask')]), (inputnode, n4_t2, [('in_t2w', 'input_image'), ('t2w_mask', 'mask_image')]), (inputnode, n4_b0, [('dwi_mask', 'mask_image')]), (avg_b0, n4_b0, [('out_file', 'input_image')]), (getparam, reg, [ (('enc_dir', _default_params), 'parameters')]), (n4_t2, reg, [('output_image', 'fixed_image')]), (n4_b0, reg, [('output_image', 'moving_image')]), (reg, tfx_b0, [ (('transform', _get_last), 'transform_file')]), (avg_b0, tfx_b0, [('out_file', 'reference_image')]), (tfx_b0, warp_prop, [('output_file', 'transform_file')]), (tfx_b0, warp, [('output_file', 'transform_file')]), (split_dwi, warp, [('out_files', 'moving_image')]), (warpbuff, mskdwis, [('unwarped', 'in_file')]), (closmsk, mskdwis, [('out_file', 'mask_file')]), (mskdwis, thres, [('out_file', 'in_file')]), (thres, merge_dwi, [('out_file', 'in_files')]), (reg, tfx_msk, [ (('transform', _get_last), 'transform_file')]), (tfx_b0, swarp, [('output_file', 'transform_file')]), (avg_b0, tfx_msk, [('out_file', 'reference_image')]), (tfx_msk, corr_msk, [('output_file', 'transform_file')]), (corr_msk, closmsk, [('warped_file', 'in_file')]), (merge_dwi, outputnode, [('merged_file', 'dwi')]), (closmsk, outputnode, [('out_file', 'dwi_mask')]), (warp_prop, outputnode, [('jacdet_map', 'jacobian')]), (swarp, outputnode, [('warped_file', 'out_surf')]) ]) if icorr: jac_mask = pe.Node(fs.ApplyMask(), name='mask_jac') mult = pe.MapNode(MultiImageMaths(op_string='-mul %s'), iterfield=['in_file'], name='ModulateDWIs') wf.connect([ (closmsk, jac_mask, [('out_file', 'mask_file')]), (warp_prop, jac_mask, [('jacdet_map', 'in_file')]), (warp, mult, [('warped_file', 'in_file')]), (jac_mask, mult, [('out_file', 'operand_files')]), (mult, warpbuff, [('out_file', 'unwarped')]) ]) else: wf.connect([ (warp, warpbuff, [('warped_file', 'unwarped')]) ]) return wf
def init_atropos_wf( name="atropos_wf", use_random_seed=True, omp_nthreads=None, mem_gb=3.0, padding=10, in_segmentation_model=tuple(ATROPOS_MODELS["T1w"].values()), bspline_fitting_distance=200, wm_prior=False, ): """ Create an ANTs' ATROPOS workflow for brain tissue segmentation. Re-interprets supersteps 6 and 7 of ``antsBrainExtraction.sh``, which refine the mask previously computed with the spatial normalization to the template. The workflow also executes steps 8 and 9 of the brain extraction workflow. Workflow Graph .. workflow:: :graph2use: orig :simple_form: yes from niworkflows.anat.ants import init_atropos_wf wf = init_atropos_wf() Parameters ---------- name : str, optional Workflow name (default: "atropos_wf"). use_random_seed : bool Whether ATROPOS should generate a random seed based on the system's clock omp_nthreads : int Maximum number of threads an individual process may use mem_gb : float Estimated peak memory consumption of the most hungry nodes in the workflow padding : int Pad images with zeros before processing in_segmentation_model : tuple A k-means segmentation is run to find gray or white matter around the edge of the initial brain mask warped from the template. This produces a segmentation image with :math:`$K$` classes, ordered by mean intensity in increasing order. With this option, you can control :math:`$K$` and tell the script which classes represent CSF, gray and white matter. Format (K, csfLabel, gmLabel, wmLabel). Examples: ``(3,1,2,3)`` for T1 with K=3, CSF=1, GM=2, WM=3 (default), ``(3,3,2,1)`` for T2 with K=3, CSF=3, GM=2, WM=1, ``(3,1,3,2)`` for FLAIR with K=3, CSF=1 GM=3, WM=2, ``(4,4,2,3)`` uses K=4, CSF=4, GM=2, WM=3. bspline_fitting_distance : float The size of the b-spline mesh grid elements, in mm (default: 200) wm_prior : :obj:`bool` Whether the WM posterior obtained with ATROPOS should be regularized with a prior map (typically, mapped from the template). When ``wm_prior`` is ``True`` the input field ``wm_prior`` of the input node must be connected. Inputs ------ in_files : list The original anatomical images passed in to the brain-extraction workflow. in_corrected : list :abbr:`INU (intensity non-uniformity)`-corrected files. in_mask : str Brain mask calculated previously. wm_prior : :obj:`str` Path to the WM prior probability map, aligned with the individual data. Outputs ------- out_file : :obj:`str` Path of the corrected and brain-extracted result, using the ATROPOS refinement. bias_corrected : :obj:`str` Path of the corrected and result, using the ATROPOS refinement. bias_image : :obj:`str` Path of the estimated INU bias field, using the ATROPOS refinement. out_mask : str Refined brain mask out_segm : str Output segmentation out_tpms : str Output :abbr:`TPMs (tissue probability maps)` """ wf = pe.Workflow(name) out_fields = [ "bias_corrected", "bias_image", "out_mask", "out_segm", "out_tpms" ] inputnode = pe.Node( niu.IdentityInterface( fields=["in_files", "in_corrected", "in_mask", "wm_prior"]), name="inputnode", ) outputnode = pe.Node(niu.IdentityInterface(fields=["out_file"] + out_fields), name="outputnode") copy_xform = pe.Node(CopyXForm(fields=out_fields), name="copy_xform", run_without_submitting=True) # Morphological dilation, radius=2 dil_brainmask = pe.Node(ImageMath(operation="MD", op2="2", copy_header=True), name="dil_brainmask") # Get largest connected component get_brainmask = pe.Node( ImageMath(operation="GetLargestComponent", copy_header=True), name="get_brainmask", ) # Run atropos (core node) atropos = pe.Node( Atropos( convergence_threshold=0.0, dimension=3, initialization="KMeans", likelihood_model="Gaussian", mrf_radius=[1, 1, 1], mrf_smoothing_factor=0.1, n_iterations=3, number_of_tissue_classes=in_segmentation_model[0], save_posteriors=True, use_random_seed=use_random_seed, ), name="01_atropos", n_procs=omp_nthreads, mem_gb=mem_gb, ) # massage outputs pad_segm = pe.Node( ImageMath(operation="PadImage", op2=f"{padding}", copy_header=False), name="02_pad_segm", ) pad_mask = pe.Node( ImageMath(operation="PadImage", op2=f"{padding}", copy_header=False), name="03_pad_mask", ) # Split segmentation in binary masks sel_labels = pe.Node( niu.Function(function=_select_labels, output_names=["out_wm", "out_gm", "out_csf"]), name="04_sel_labels", ) sel_labels.inputs.labels = list(reversed(in_segmentation_model[1:])) # Select largest components (GM, WM) # ImageMath ${DIMENSION} ${EXTRACTION_WM} GetLargestComponent ${EXTRACTION_WM} get_wm = pe.Node(ImageMath(operation="GetLargestComponent"), name="05_get_wm") get_gm = pe.Node(ImageMath(operation="GetLargestComponent"), name="06_get_gm") # Fill holes and calculate intersection # ImageMath ${DIMENSION} ${EXTRACTION_TMP} FillHoles ${EXTRACTION_GM} 2 # MultiplyImages ${DIMENSION} ${EXTRACTION_GM} ${EXTRACTION_TMP} ${EXTRACTION_GM} fill_gm = pe.Node(ImageMath(operation="FillHoles", op2="2"), name="07_fill_gm") mult_gm = pe.Node( MultiplyImages(dimension=3, output_product_image="08_mult_gm.nii.gz"), name="08_mult_gm", ) # MultiplyImages ${DIMENSION} ${EXTRACTION_WM} ${ATROPOS_WM_CLASS_LABEL} ${EXTRACTION_WM} # ImageMath ${DIMENSION} ${EXTRACTION_TMP} ME ${EXTRACTION_CSF} 10 relabel_wm = pe.Node( MultiplyImages( dimension=3, second_input=in_segmentation_model[-1], output_product_image="09_relabel_wm.nii.gz", ), name="09_relabel_wm", ) me_csf = pe.Node(ImageMath(operation="ME", op2="10"), name="10_me_csf") # ImageMath ${DIMENSION} ${EXTRACTION_GM} addtozero ${EXTRACTION_GM} ${EXTRACTION_TMP} # MultiplyImages ${DIMENSION} ${EXTRACTION_GM} ${ATROPOS_GM_CLASS_LABEL} ${EXTRACTION_GM} # ImageMath ${DIMENSION} ${EXTRACTION_SEGMENTATION} addtozero ${EXTRACTION_WM} ${EXTRACTION_GM} add_gm = pe.Node(ImageMath(operation="addtozero"), name="11_add_gm") relabel_gm = pe.Node( MultiplyImages( dimension=3, second_input=in_segmentation_model[-2], output_product_image="12_relabel_gm.nii.gz", ), name="12_relabel_gm", ) add_gm_wm = pe.Node(ImageMath(operation="addtozero"), name="13_add_gm_wm") # Superstep 7 # Split segmentation in binary masks sel_labels2 = pe.Node( niu.Function(function=_select_labels, output_names=["out_gm", "out_wm"]), name="14_sel_labels2", ) sel_labels2.inputs.labels = in_segmentation_model[2:] # ImageMath ${DIMENSION} ${EXTRACTION_MASK} addtozero ${EXTRACTION_MASK} ${EXTRACTION_TMP} add_7 = pe.Node(ImageMath(operation="addtozero"), name="15_add_7") # ImageMath ${DIMENSION} ${EXTRACTION_MASK} ME ${EXTRACTION_MASK} 2 me_7 = pe.Node(ImageMath(operation="ME", op2="2"), name="16_me_7") # ImageMath ${DIMENSION} ${EXTRACTION_MASK} GetLargestComponent ${EXTRACTION_MASK} comp_7 = pe.Node(ImageMath(operation="GetLargestComponent"), name="17_comp_7") # ImageMath ${DIMENSION} ${EXTRACTION_MASK} MD ${EXTRACTION_MASK} 4 md_7 = pe.Node(ImageMath(operation="MD", op2="4"), name="18_md_7") # ImageMath ${DIMENSION} ${EXTRACTION_MASK} FillHoles ${EXTRACTION_MASK} 2 fill_7 = pe.Node(ImageMath(operation="FillHoles", op2="2"), name="19_fill_7") # ImageMath ${DIMENSION} ${EXTRACTION_MASK} addtozero ${EXTRACTION_MASK} \ # ${EXTRACTION_MASK_PRIOR_WARPED} add_7_2 = pe.Node(ImageMath(operation="addtozero"), name="20_add_7_2") # ImageMath ${DIMENSION} ${EXTRACTION_MASK} MD ${EXTRACTION_MASK} 5 md_7_2 = pe.Node(ImageMath(operation="MD", op2="5"), name="21_md_7_2") # ImageMath ${DIMENSION} ${EXTRACTION_MASK} ME ${EXTRACTION_MASK} 5 me_7_2 = pe.Node(ImageMath(operation="ME", op2="5"), name="22_me_7_2") # De-pad depad_mask = pe.Node(ImageMath(operation="PadImage", op2="-%d" % padding), name="23_depad_mask") depad_segm = pe.Node(ImageMath(operation="PadImage", op2="-%d" % padding), name="24_depad_segm") depad_gm = pe.Node(ImageMath(operation="PadImage", op2="-%d" % padding), name="25_depad_gm") depad_wm = pe.Node(ImageMath(operation="PadImage", op2="-%d" % padding), name="26_depad_wm") depad_csf = pe.Node(ImageMath(operation="PadImage", op2="-%d" % padding), name="27_depad_csf") msk_conform = pe.Node(niu.Function(function=_conform_mask), name="msk_conform") merge_tpms = pe.Node(niu.Merge(in_segmentation_model[0]), name="merge_tpms") sel_wm = pe.Node(niu.Select(), name="sel_wm", run_without_submitting=True) if not wm_prior: sel_wm.inputs.index = in_segmentation_model[-1] - 1 copy_xform_wm = pe.Node(CopyXForm(fields=["wm_map"]), name="copy_xform_wm", run_without_submitting=True) # Refine INU correction inu_n4_final = pe.MapNode( N4BiasFieldCorrection( dimension=3, save_bias=True, copy_header=True, n_iterations=[50] * 5, convergence_threshold=1e-7, shrink_factor=4, bspline_fitting_distance=bspline_fitting_distance, ), n_procs=omp_nthreads, name="inu_n4_final", iterfield=["input_image"], ) try: inu_n4_final.inputs.rescale_intensities = True except ValueError: warn( "N4BiasFieldCorrection's --rescale-intensities option was added in ANTS 2.1.0 " f"({inu_n4_final.interface.version} found.) Please consider upgrading.", UserWarning, ) # Apply mask apply_mask = pe.MapNode(ApplyMask(), iterfield=["in_file"], name="apply_mask") # fmt: off wf.connect([ (inputnode, dil_brainmask, [("in_mask", "op1")]), (inputnode, copy_xform, [(("in_files", _pop), "hdr_file")]), (inputnode, copy_xform_wm, [(("in_files", _pop), "hdr_file")]), (inputnode, pad_mask, [("in_mask", "op1")]), (inputnode, atropos, [("in_corrected", "intensity_images")]), (inputnode, inu_n4_final, [("in_files", "input_image")]), (inputnode, msk_conform, [(("in_files", _pop), "in_reference")]), (dil_brainmask, get_brainmask, [("output_image", "op1")]), (get_brainmask, atropos, [("output_image", "mask_image")]), (atropos, pad_segm, [("classified_image", "op1")]), (pad_segm, sel_labels, [("output_image", "in_segm")]), (sel_labels, get_wm, [("out_wm", "op1")]), (sel_labels, get_gm, [("out_gm", "op1")]), (get_gm, fill_gm, [("output_image", "op1")]), (get_gm, mult_gm, [("output_image", "first_input")]), (fill_gm, mult_gm, [("output_image", "second_input")]), (get_wm, relabel_wm, [("output_image", "first_input")]), (sel_labels, me_csf, [("out_csf", "op1")]), (mult_gm, add_gm, [("output_product_image", "op1")]), (me_csf, add_gm, [("output_image", "op2")]), (add_gm, relabel_gm, [("output_image", "first_input")]), (relabel_wm, add_gm_wm, [("output_product_image", "op1")]), (relabel_gm, add_gm_wm, [("output_product_image", "op2")]), (add_gm_wm, sel_labels2, [("output_image", "in_segm")]), (sel_labels2, add_7, [("out_wm", "op1"), ("out_gm", "op2")]), (add_7, me_7, [("output_image", "op1")]), (me_7, comp_7, [("output_image", "op1")]), (comp_7, md_7, [("output_image", "op1")]), (md_7, fill_7, [("output_image", "op1")]), (fill_7, add_7_2, [("output_image", "op1")]), (pad_mask, add_7_2, [("output_image", "op2")]), (add_7_2, md_7_2, [("output_image", "op1")]), (md_7_2, me_7_2, [("output_image", "op1")]), (me_7_2, depad_mask, [("output_image", "op1")]), (add_gm_wm, depad_segm, [("output_image", "op1")]), (relabel_wm, depad_wm, [("output_product_image", "op1")]), (relabel_gm, depad_gm, [("output_product_image", "op1")]), (sel_labels, depad_csf, [("out_csf", "op1")]), (depad_csf, merge_tpms, [("output_image", "in1")]), (depad_gm, merge_tpms, [("output_image", "in2")]), (depad_wm, merge_tpms, [("output_image", "in3")]), (depad_mask, msk_conform, [("output_image", "in_mask")]), (msk_conform, copy_xform, [("out", "out_mask")]), (depad_segm, copy_xform, [("output_image", "out_segm")]), (merge_tpms, copy_xform, [("out", "out_tpms")]), (atropos, sel_wm, [("posteriors", "inlist")]), (sel_wm, copy_xform_wm, [("out", "wm_map")]), (copy_xform_wm, inu_n4_final, [("wm_map", "weight_image")]), (inu_n4_final, copy_xform, [("output_image", "bias_corrected"), ("bias_image", "bias_image")]), (copy_xform, apply_mask, [("bias_corrected", "in_file"), ("out_mask", "in_mask")]), (apply_mask, outputnode, [("out_file", "out_file")]), (copy_xform, outputnode, [ ("bias_corrected", "bias_corrected"), ("bias_image", "bias_image"), ("out_mask", "out_mask"), ("out_segm", "out_segm"), ("out_tpms", "out_tpms"), ]), ]) # fmt: on if wm_prior: from nipype.algorithms.metrics import FuzzyOverlap def _argmax(in_dice): import numpy as np return np.argmax(in_dice) match_wm = pe.Node( niu.Function(function=_matchlen), name="match_wm", run_without_submitting=True, ) overlap = pe.Node(FuzzyOverlap(), name="overlap", run_without_submitting=True) apply_wm_prior = pe.Node(niu.Function(function=_improd), name="apply_wm_prior") # fmt: off wf.disconnect([ (copy_xform_wm, inu_n4_final, [("wm_map", "weight_image")]), ]) wf.connect([ (inputnode, apply_wm_prior, [("in_mask", "in_mask"), ("wm_prior", "op2")]), (inputnode, match_wm, [("wm_prior", "value")]), (atropos, match_wm, [("posteriors", "reference")]), (atropos, overlap, [("posteriors", "in_ref")]), (match_wm, overlap, [("out", "in_tst")]), (overlap, sel_wm, [(("class_fdi", _argmax), "index")]), (copy_xform_wm, apply_wm_prior, [("wm_map", "op1")]), (apply_wm_prior, inu_n4_final, [("out", "weight_image")]), ]) # fmt: on return wf
if __name__ == "__main__": input_file = '/autofs/space/bhim_001/users/aj660/PSACNN/data/IXI/T2/IXI002-Guys-0828-T2.nii.gz' output_dir = '/autofs/space/bhim_001/users/aj660/psacnn_brain_segmentation/test_output/IXI002-Guys-0828-T2' subprocess.call(['mkdir', '-p', output_dir]) preprocess_flow = Workflow(name='preprocess', base_dir=output_dir) conform = Node(MRIConvert(conform=True, out_type='niigz', out_file='conformed.nii.gz'), name='conform') n4 = Node(N4BiasFieldCorrection(dimension=3, bspline_fitting_distance=300, shrink_factor=3, n_iterations=[50, 50, 30, 20], output_image='n4.nii.gz'), name='n4') robex = Node(ROBEX(seed=1729, stripped_image='brain.nii.gz'), name='robex') psacnn = Node(PSACNN(output_dir=output_dir, contrast='t2w', gpu=0, patch_size=96, save_prob_output=False), name='psacnn') preprocess_flow.connect([(conform, n4, [('out_file', 'input_image')]), (n4, robex, [('output_image', 'input_image')]), (robex, psacnn, [('stripped_image', 'input_image')
def init_n4_only_wf( atropos_model=None, atropos_refine=True, atropos_use_random_seed=True, bids_suffix="T1w", mem_gb=3.0, name="n4_only_wf", omp_nthreads=None, ): """ Build a workflow to sidetrack brain extraction on skull-stripped datasets. An alternative workflow to "init_brain_extraction_wf", for anatomical images which have already been brain extracted. 1. Creates brain mask assuming all zero voxels are outside the brain 2. Applies N4 bias field correction 3. (Optional) apply ATROPOS and massage its outputs 4. Use results from 3 to refine N4 bias field correction Workflow Graph .. workflow:: :graph2use: orig :simple_form: yes from niworkflows.anat.ants import init_n4_only_wf wf = init_n4_only_wf() Parameters ---------- omp_nthreads : int Maximum number of threads an individual process may use mem_gb : float Estimated peak memory consumption of the most hungry nodes bids_suffix : str Sequence type of the first input image. For a list of acceptable values see https://bids-specification.readthedocs.io/en/latest/04-modality-specific-files/01-magnetic-resonance-imaging-data.html#anatomy-imaging-data atropos_refine : bool Enables or disables the whole ATROPOS sub-workflow atropos_use_random_seed : bool Whether ATROPOS should generate a random seed based on the system's clock atropos_model : tuple or None Allows to specify a particular segmentation model, overwriting the defaults based on ``bids_suffix`` name : str, optional Workflow name (default: ``'n4_only_wf'``). Inputs ------ in_files List of input anatomical images to be bias corrected, typically T1-weighted. If a list of anatomical images is provided, subsequently specified images are used during the segmentation process. However, only the first image is used in the registration of priors. Our suggestion would be to specify the T1w as the first image. Outputs ------- out_file :abbr:`INU (intensity non-uniformity)`-corrected ``in_files`` out_mask Calculated brain mask bias_corrected Same as "out_file", provided for consistency with brain extraction bias_image The :abbr:`INU (intensity non-uniformity)` field estimated for each input in ``in_files`` out_segm Output segmentation by ATROPOS out_tpms Output :abbr:`TPMs (tissue probability maps)` by ATROPOS """ from ..interfaces.nibabel import Binarize wf = pe.Workflow(name) inputnode = pe.Node(niu.IdentityInterface(fields=["in_files", "in_mask"]), name="inputnode") outputnode = pe.Node( niu.IdentityInterface(fields=[ "out_file", "out_mask", "bias_corrected", "bias_image", "out_segm", "out_tpms", ]), name="outputnode", ) # Create brain mask thr_brainmask = pe.Node(Binarize(thresh_low=2), name="binarize") # INU correction inu_n4_final = pe.MapNode( N4BiasFieldCorrection( dimension=3, save_bias=True, copy_header=True, n_iterations=[50] * 5, convergence_threshold=1e-7, shrink_factor=4, bspline_fitting_distance=200, ), n_procs=omp_nthreads, name="inu_n4_final", iterfield=["input_image"], ) # Check ANTs version try: inu_n4_final.inputs.rescale_intensities = True except ValueError: warn( "N4BiasFieldCorrection's --rescale-intensities option was added in ANTS 2.1.0 " f"({inu_n4_final.interface.version} found.) Please consider upgrading.", UserWarning, ) # fmt: off wf.connect([ (inputnode, inu_n4_final, [("in_files", "input_image")]), (inputnode, thr_brainmask, [(("in_files", _pop), "in_file")]), (thr_brainmask, outputnode, [("out_mask", "out_mask")]), (inu_n4_final, outputnode, [("output_image", "out_file"), ("output_image", "bias_corrected"), ("bias_image", "bias_image")]), ]) # fmt: on # If atropos refine, do in4 twice if atropos_refine: atropos_model = atropos_model or list( ATROPOS_MODELS[bids_suffix].values()) atropos_wf = init_atropos_wf( use_random_seed=atropos_use_random_seed, omp_nthreads=omp_nthreads, mem_gb=mem_gb, in_segmentation_model=atropos_model, ) # fmt: off wf.disconnect([ (inu_n4_final, outputnode, [("output_image", "out_file"), ("output_image", "bias_corrected"), ("bias_image", "bias_image")]), ]) wf.connect([ (inputnode, atropos_wf, [("in_files", "inputnode.in_files")]), (inu_n4_final, atropos_wf, [("output_image", "inputnode.in_corrected")]), (thr_brainmask, atropos_wf, [("out_mask", "inputnode.in_mask")]), (atropos_wf, outputnode, [ ("outputnode.out_file", "out_file"), ("outputnode.bias_corrected", "bias_corrected"), ("outputnode.bias_image", "bias_image"), ("outputnode.out_segm", "out_segm"), ("outputnode.out_tpms", "out_tpms"), ]), ]) # fmt: on return wf
def init_coregistration_wf( *, bspline_fitting_distance=200, mem_gb=3.0, name="coregistration_wf", omp_nthreads=None, sloppy=False, debug=False, ): """ Set-up a T2w-to-T1w within-baby co-registration framework. See the ANTs' registration config file (under ``nibabies/data``) for further details. The main surprise in it is that, for some participants, accurate registration requires extra degrees of freedom (one affine level and one SyN level) to ensure that the T1w and T2w images align well. I attribute this requirement to the following potential reasons: * The T1w image and the T2w image were acquired in different sessions, apart in time enough for growth to happen. Although this is, in theory possible, it doesn't seem the images we have tested on are acquired on different sessions. * The skull is still so malleable that a change of position of the baby inside the coil made an actual change on the overall shape of their head. * Nonlinear distortions of the T1w and T2w images are, for some reason, more notorious for babies than they are for adults. We would need to look into each sequence's details to confirm this. Parameters ---------- bspline_fitting_distance : :obj:`float` Distance in mm between B-Spline control points for N4 INU estimation. mem_gb : :obj:`float` Base memory fingerprint unit. name : :obj:`str` This particular workflow's unique name (Nipype requirement). omp_nthreads : :obj:`int` The number of threads for individual processes in this workflow. sloppy : :obj:`bool` Run in *sloppy* mode. debug : :obj:`bool` Produce intermediate registration files Inputs ------ in_t1w : :obj:`str` The unprocessed input T1w image. in_t2w_preproc : :obj:`str` The preprocessed input T2w image, from the brain extraction workflow. in_mask : :obj:`str` The brainmask, as obtained in T2w space. in_probmap : :obj:`str` The probabilistic brainmask, as obtained in T2w space. Outputs ------- t1w_preproc : :obj:`str` The preprocessed T1w image (INU and clipping). t2w_preproc : :obj:`str` The preprocessed T2w image (INU and clipping), aligned into the T1w's space. t1w_brain : :obj:`str` The preprocessed, brain-extracted T1w image. t1w_mask : :obj:`str` The binary brainmask projected from the T2w. t1w2t2w_xfm : :obj:`str` The T1w-to-T2w mapping. """ from nipype.interfaces.ants import N4BiasFieldCorrection from niworkflows.interfaces.fixes import ( FixHeaderRegistration as Registration, FixHeaderApplyTransforms as ApplyTransforms, ) from niworkflows.interfaces.nibabel import ApplyMask, Binarize from ...interfaces.nibabel import BinaryDilation workflow = pe.Workflow(name) inputnode = pe.Node( niu.IdentityInterface( fields=["in_t1w", "in_t2w_preproc", "in_mask", "in_probmap"]), name="inputnode", ) outputnode = pe.Node( niu.IdentityInterface(fields=[ "t1w_preproc", "t1w_brain", "t1w_mask", "t1w2t2w_xfm", "t2w_preproc", ]), name="outputnode", ) fixed_masks_arg = pe.Node(niu.Merge(3), name="fixed_masks_arg", run_without_submitting=True) # Dilate t2w mask for easier t1->t2 registration reg_mask = pe.Node(BinaryDilation(radius=8, iterations=3), name="reg_mask") refine_mask = pe.Node(BinaryDilation(radius=8, iterations=1), name="refine_mask") # Set up T2w -> T1w within-subject registration coreg = pe.Node( Registration( from_file=pkgr_fn("nibabies.data", "within_subject_t1t2.json")), name="coreg", n_procs=omp_nthreads, mem_gb=mem_gb, ) coreg.inputs.float = sloppy if debug: coreg.inputs.args = "--write-interval-volumes 5" coreg.inputs.output_inverse_warped_image = sloppy coreg.inputs.output_warped_image = sloppy map_mask = pe.Node(ApplyTransforms(interpolation="Gaussian"), name="map_mask", mem_gb=1) map_t2w = pe.Node(ApplyTransforms(interpolation="BSpline"), name="map_t2w", mem_gb=1) thr_mask = pe.Node(Binarize(thresh_low=0.80), name="thr_mask") final_n4 = pe.Node( N4BiasFieldCorrection( dimension=3, bspline_fitting_distance=bspline_fitting_distance, save_bias=True, copy_header=True, n_iterations=[50] * 5, convergence_threshold=1e-7, rescale_intensities=True, shrink_factor=4, ), n_procs=omp_nthreads, name="final_n4", ) apply_mask = pe.Node(ApplyMask(), name="apply_mask") # fmt:off workflow.connect([ (inputnode, map_mask, [("in_t1w", "reference_image")]), (inputnode, final_n4, [("in_t1w", "input_image")]), (inputnode, coreg, [("in_t1w", "moving_image"), ("in_t2w_preproc", "fixed_image")]), (inputnode, map_mask, [("in_probmap", "input_image")]), (inputnode, reg_mask, [("in_mask", "in_file")]), (inputnode, refine_mask, [("in_mask", "in_file")]), (reg_mask, fixed_masks_arg, [("out_file", "in1")]), (reg_mask, fixed_masks_arg, [("out_file", "in2")]), (refine_mask, fixed_masks_arg, [("out_file", "in3")]), (inputnode, map_t2w, [("in_t1w", "reference_image")]), (inputnode, map_t2w, [("in_t2w_preproc", "input_image")]), (fixed_masks_arg, coreg, [("out", "fixed_image_masks")]), (coreg, map_mask, [ ("reverse_transforms", "transforms"), ("reverse_invert_flags", "invert_transform_flags"), ]), (coreg, map_t2w, [ ("reverse_transforms", "transforms"), ("reverse_invert_flags", "invert_transform_flags"), ]), (map_mask, thr_mask, [("output_image", "in_file")]), (map_mask, final_n4, [("output_image", "weight_image")]), (final_n4, apply_mask, [("output_image", "in_file")]), (thr_mask, apply_mask, [("out_mask", "in_mask")]), (final_n4, outputnode, [("output_image", "t1w_preproc")]), (map_t2w, outputnode, [("output_image", "t2w_preproc")]), (thr_mask, outputnode, [("out_mask", "t1w_mask")]), (apply_mask, outputnode, [("out_file", "t1w_brain")]), (coreg, outputnode, [("forward_transforms", "t1w2t2w_xfm")]), ]) # fmt:on return workflow
from nipype.interfaces.ants import N4BiasFieldCorrection import sys import os import ast if len(sys.argv) < 2: print( "INPUT from ipython: run n4_bias_correction input_image dimension n_iterations(optional, form:[n_1,n_2,n_3,n_4]) output_image(optional)" ) sys.exit(1) # if output_image is given if len(sys.argv) > 3: n4 = N4BiasFieldCorrection(output_image=sys.argv[4]) else: n4 = N4BiasFieldCorrection() # dimension of input image, input image n4.inputs.dimension = int(sys.argv[2]) n4.inputs.input_image = sys.argv[1] # if n_dinesions arg given if len(sys.argv) > 2: n4.inputs.n_iterations = ast.literal_eval(sys.argv[3]) n4.run()
def init_brainextraction_wf(name="brainextraction_wf"): """ Remove nonbrain tissue from images. Parameters ---------- name : :obj:`str`, optional Workflow name (default: ``"brainextraction_wf"``) Inputs ------ in_file : :obj:`str` the GRE magnitude or EPI reference to be brain-extracted Outputs ------- out_file : :obj:`str` the input file after N4 and smart clipping out_brain : :obj:`str` the output file, just the brain extracted out_mask : :obj:`str` the calculated mask out_probseg : :obj:`str` a probability map that the random walker reached a given voxel (some sort of "soft" brainmask) """ from nipype.interfaces.ants import N4BiasFieldCorrection from ..interfaces.brainmask import BrainExtraction from ..interfaces.utils import IntensityClip wf = Workflow(name=name) inputnode = pe.Node(niu.IdentityInterface(fields=("in_file", )), name="inputnode") outputnode = pe.Node( niu.IdentityInterface(fields=( "out_file", "out_brain", "out_mask", "out_probseg", )), name="outputnode", ) clipper_pre = pe.Node(IntensityClip(), name="clipper_pre") # de-gradient the fields ("bias/illumination artifact") n4 = pe.Node( N4BiasFieldCorrection( dimension=3, copy_header=True, n_iterations=[50] * 5, convergence_threshold=1e-7, shrink_factor=4, ), n_procs=8, name="n4", ) clipper_post = pe.Node(IntensityClip(p_max=100.0), name="clipper_post") masker = pe.Node(BrainExtraction(), name="masker") # fmt:off wf.connect([ (inputnode, clipper_pre, [("in_file", "in_file")]), (clipper_pre, n4, [("out_file", "input_image")]), (n4, clipper_post, [("output_image", "in_file")]), (clipper_post, masker, [("out_file", "in_file")]), (clipper_post, outputnode, [("out_file", "out_file")]), (masker, outputnode, [("out_file", "out_brain"), ("out_mask", "out_mask"), ("out_probseg", "out_probseg")]), ]) # fmt:on return wf
def init_epi_reference_wf( omp_nthreads, auto_bold_nss=False, name="epi_reference_wf", ): """ Build a workflow that generates a reference map from a set of EPI images. .. danger :: All input files MUST have the same shimming configuration. At the very least, make sure all input EPI images are acquired within the same session, and have the same PE direction and total readout time. Inputs to this workflow might be a list of :abbr:`SBRefs (single-band references)`, a list of fieldmapping :abbr:`EPIs (echo-planar images)`, a list of :abbr:`BOLD (blood-oxygen level-dependent)` images, or a list of :abbr:`DWI (diffusion-weighted imaging)` datasets. Please note that these different modalities should not be mixed together in any case for this particular workflow. For BOLD datasets, the workflow may be set up to execute an algorithm that determines the nonsteady states in the beginning of the timeseries (also called *dummy scans*), and uses those for generating a reference of the particular run, since the nonsteady states are known to yield better T1 contrast (and hence perhaps better signal for image registration). Relatedly, the workflow also provides a global signal drift estimation per run. This global signal drift is typically interesting for DWIs: because *b=0* volumes are typically scattered throughout the scan, this drift can be fit an exponential decay to model the signal drop caused by the increasing temperature of the device (this is closely related to BOLD *nonsteady states* described above, as these are just the few initial instants when the exponential decay is much faster). Workflow Graph .. workflow:: :graph2use: orig :simple_form: yes from niworkflows.workflows.epi.refmap import init_epi_reference_wf wf = init_epi_reference_wf(omp_nthreads=1) Parameters ---------- omp_nthreads : :obj:`int` Maximum number of threads an individual process may use name : :obj:`str` Name of workflow (default: ``epi_reference_wf``) auto_bold_nss : :obj:`bool` If ``True``, determines nonsteady states in the beginning of the timeseries and selects them for the averaging of each run. IMPORTANT: this option applies only to BOLD EPIs. Inputs ------ in_files : :obj:`list` of :obj:`str` List of paths of the input EPI images from which reference volumes will be selected, aligned and averaged. Outputs ------- epi_ref_file : :obj:`str` Path of the generated EPI reference file. xfm_files : :obj:`list` of :obj:`str` List of rigid-body transforms in LTA format to resample from the reference volume of each run into the ``epi_ref_file`` reference. per_run_ref_files : :obj:`list` of :obj:`str` List of paths to the reference volume generated per input run. drift_factors : :obj:`list` of :obj:`list` of :obj:`float` A list of global signal drift factors for the set of volumes selected for averaging, per run. n_dummy_scans : :obj:`list` of :obj:`int` Number of nonsteady states at the beginning of each run (only BOLD with ``auto_bold_nss=True``) validate_report : :obj:`str` HTML reportlet(s) indicating whether the input files had a valid affine See Also -------- Discussion and original flowchart at `nipreps/niworkflows#601 <https://github.com/nipreps/niworkflows/issues/601>`__. """ from nipype.interfaces.ants import N4BiasFieldCorrection from ...utils.connections import listify from ...interfaces.bold import NonsteadyStatesDetector from ...interfaces.freesurfer import StructuralReference from ...interfaces.header import ValidateImage from ...interfaces.images import RobustAverage from ...interfaces.nibabel import IntensityClip wf = Workflow(name=name) inputnode = pe.Node( niu.IdentityInterface(fields=["in_files", "t_masks"]), name="inputnode" ) outputnode = pe.Node( niu.IdentityInterface( fields=[ "epi_ref_file", "xfm_files", "per_run_ref_files", "drift_factors", "n_dummy", "validation_report", ] ), name="outputnode", ) validate_nii = pe.MapNode( ValidateImage(), name="validate_nii", iterfield=["in_file"] ) per_run_avgs = pe.MapNode( RobustAverage(), name="per_run_avgs", mem_gb=1, iterfield=["in_file", "t_mask"] ) clip_avgs = pe.MapNode(IntensityClip(), name="clip_avgs", iterfield=["in_file"]) # de-gradient the fields ("bias/illumination artifact") n4_avgs = pe.MapNode( N4BiasFieldCorrection( dimension=3, copy_header=True, n_iterations=[50] * 5, convergence_threshold=1e-7, shrink_factor=4, ), n_procs=omp_nthreads, name="n4_avgs", iterfield=["input_image"], ) clip_bg_noise = pe.MapNode( IntensityClip(p_min=2.0, p_max=100.0), name="clip_bg_noise", iterfield=["in_file"], ) epi_merge = pe.Node( StructuralReference( auto_detect_sensitivity=True, initial_timepoint=1, # For deterministic behavior intensity_scaling=True, # 7-DOF (rigid + intensity) subsample_threshold=200, fixed_timepoint=True, no_iteration=True, transform_outputs=True, ), name="epi_merge", ) post_merge = pe.Node(niu.Function(function=_post_merge), name="post_merge") def _set_threads(in_list, maximum): return min(len(in_list), maximum) # fmt:off wf.connect([ (inputnode, validate_nii, [(("in_files", listify), "in_file")]), (validate_nii, per_run_avgs, [("out_file", "in_file")]), (per_run_avgs, clip_avgs, [("out_file", "in_file")]), (clip_avgs, n4_avgs, [("out_file", "input_image")]), (n4_avgs, clip_bg_noise, [("output_image", "in_file")]), (clip_bg_noise, epi_merge, [ ("out_file", "in_files"), (("out_file", _set_threads, omp_nthreads), "num_threads"), ]), (epi_merge, post_merge, [("out_file", "in_file"), ("transform_outputs", "in_xfms")]), (post_merge, outputnode, [("out", "epi_ref_file")]), (epi_merge, outputnode, [("transform_outputs", "xfm_files")]), (per_run_avgs, outputnode, [("out_drift", "drift_factors")]), (n4_avgs, outputnode, [("output_image", "per_run_ref_files")]), (validate_nii, outputnode, [("out_report", "validation_report")]), ]) # fmt:on if auto_bold_nss: select_volumes = pe.MapNode( NonsteadyStatesDetector(), name="select_volumes", iterfield=["in_file"] ) # fmt:off wf.connect([ (validate_nii, select_volumes, [("out_file", "in_file")]), (select_volumes, per_run_avgs, [("t_mask", "t_mask")]), (select_volumes, outputnode, [("n_dummy", "n_dummy")]) ]) # fmt:on else: wf.connect(inputnode, "t_masks", per_run_avgs, "t_mask") return wf
def epi_mni_align(name='SpatialNormalization', ants_nthreads=6, testing=False, resolution=2): """ Uses FSL FLIRT with the BBR cost function to find the transform that maps the EPI space into the MNI152-nonlinear-symmetric atlas. The input epi_mean is the averaged and brain-masked EPI timeseries Returns the EPI mean resampled in MNI space (for checking out registration) and the associated "lobe" parcellation in EPI space. .. workflow:: from mriqc.workflows.functional import epi_mni_align wf = epi_mni_align() """ from nipype.interfaces.ants import ApplyTransforms, N4BiasFieldCorrection from niworkflows.data import get_mni_icbm152_nlin_asym_09c as get_template from niworkflows.interfaces.registration import RobustMNINormalizationRPT as RobustMNINormalization from pkg_resources import resource_filename as pkgrf mni_template = get_template() workflow = pe.Workflow(name=name) inputnode = pe.Node(niu.IdentityInterface(fields=['epi_mean', 'epi_mask']), name='inputnode') outputnode = pe.Node( niu.IdentityInterface(fields=['epi_mni', 'epi_parc', 'report']), name='outputnode') epimask = pe.Node(fsl.ApplyMask(), name='EPIApplyMask') n4itk = pe.Node(N4BiasFieldCorrection(dimension=3), name='SharpenEPI') norm = pe.Node(RobustMNINormalization(num_threads=ants_nthreads, template='mni_icbm152_nlin_asym_09c', testing=testing, moving='EPI', generate_report=True), name='EPI2MNI') norm.inputs.reference_image = pkgrf( 'mriqc', 'data/mni/%dmm_T2_brain.nii.gz' % resolution) # Warp segmentation into EPI space invt = pe.Node(ApplyTransforms(input_image=op.join( mni_template, '%dmm_parc.nii.gz' % resolution), dimension=3, default_value=0, interpolation='NearestNeighbor'), name='ResampleSegmentation') workflow.connect([ (inputnode, invt, [('epi_mean', 'reference_image')]), (inputnode, n4itk, [('epi_mean', 'input_image')]), (inputnode, epimask, [('epi_mask', 'mask_file')]), (n4itk, epimask, [('output_image', 'in_file')]), (epimask, norm, [('out_file', 'moving_image')]), (norm, invt, [('inverse_composite_transform', 'transforms')]), (invt, outputnode, [('output_image', 'epi_parc')]), (norm, outputnode, [('warped_image', 'epi_mni'), ('out_report', 'report')]), ]) return workflow
def get_workflow(name, opts): workflow = pe.Workflow(name=name) in_fields = ['mri'] if opts.user_brainmask : in_fields += ['brain_mask_space_stx'] if opts.user_mri_stx : in_fields += ['tfm_mri_stx', 'tfm_stx_mri'] label_types = [opts.quant_label_type, opts.pvc_label_type, opts.results_label_type] stages = ['quant', 'pvc', 'results'] label_imgs= [opts.quant_label_img, opts.pvc_label_img, opts.results_label_img ] inputnode = pe.Node(niu.IdentityInterface(fields=in_fields), name="inputnode") out_fields=['tfm_stx_mri', 'tfm_mri_stx', 'brain_mask_space_stx', 'brain_mask_space_mri', 'mri_space_stx', 'mri_space_nat' ] for stage, label_type in zip(stages, label_types): if 'internal_cls' == label_type : out_fields += [ stage+'_label_img'] outputnode = pe.Node(niu.IdentityInterface(fields=out_fields), name='outputnode') ########################################## # T1 spatial (+ intensity) normalization # ########################################## if opts.n4_bspline_fitting_distance != 0 : n4 = pe.Node(N4BiasFieldCorrection(), "mri_intensity_normalized" ) workflow.connect(inputnode, 'mri', n4, 'input_image') n4.inputs.dimension = 3 n4.inputs.bspline_fitting_distance = opts.n4_bspline_fitting_distance n4.inputs.shrink_factor = opts.n4_shrink_factor n4.inputs.n_iterations = opts.n4_n_iterations n4.inputs.convergence_threshold = opts.n4_convergence_threshold else : n4 = pe.Node(niu.IdentityInterface(fields=["output_image"]), name='mri_no_intensity_normalization') workflow.connect(inputnode, 'mri', n4, 'output_image') if opts.user_mri_stx == '': mri2template = pe.Node(interface=APPIANRegistration(), name="mri_spatial_normalized") mri2template.inputs.moving_image_space="T1w" mri2template.inputs.fixed_image_space="stx" mri2template.inputs.fixed_image_mask = icbm_default_brain mri2template.inputs.fixed_image = opts.template workflow.connect(n4, 'output_image', mri2template, 'moving_image') if opts.user_ants_command != None : mri2template.inputs.user_ants_command = opts.user_ants_command if opts.normalization_type : mri2template.inputs.normalization_type = opts.normalization_type mri_stx_file = 'warped_image' mri_stx_node = mri2template tfm_node= mri2template tfm_inv_node= mri2template if opts.normalization_type == 'nl' : tfm_file='composite_transform' tfm_inv_file='inverse_composite_transform' elif opts.normalization_type == 'affine' : tfm_file='out_matrix' tfm_inv_file='out_matrix_inverse' else : print("Error: --normalization-type should be either rigid, lin, or nl") exit(1) else : transform_mri = pe.Node(interface=APPIANApplyTransforms(), name="transform_mri" ) workflow.connect(inputnode, 'mri', transform_mri, 'input_image') workflow.connect(inputnode, 'tfm_mri_stx', transform_mri, 'transform_1') transform_mri.inputs.reference_image = opts.template mri_stx_node = transform_mri mri_stx_file = 'output_image' tfm_node = inputnode tfm_file = 'tfm_mri_stx' tfm_inv_node=inputnode tfm_inv_file='tfm_stx_mri' # # T1 in native space will be part of the APPIAN target directory # and hence it won't be necessary to link to the T1 in the source directory. # copy_mri_nat = pe.Node(interface=copyCommand(), name="mri_nat" ) workflow.connect(inputnode, 'mri', copy_mri_nat, 'input_file') ################################### # Segment T1 in Stereotaxic space # ################################### seg=None if opts.ants_atropos_priors == [] and opts.template == icbm_default_template : opts.ants_atropos_priors = [ icbm_default_csf, icbm_default_gm, icbm_default_wm ] if opts.ants_atropos_priors == [] : print("Warning : user did not provide alternative priors for template. This will affect your T1 MRI segmentation. Check this segmentation visually to make sure it is what you want ") for stage, label_type, img in zip(stages, label_types, label_imgs) : if seg == None : seg = pe.Node(interface=Atropos(), name="segmentation_ants") seg.inputs.dimension=3 seg.inputs.number_of_tissue_classes=len(opts.ants_atropos_priors) seg.inputs.initialization = 'PriorProbabilityImages' seg.inputs.prior_weighting = opts.ants_atropos_prior_weighting seg.inputs.prior_probability_images = opts.ants_atropos_priors seg.inputs.likelihood_model = 'Gaussian' seg.inputs.posterior_formulation = 'Socrates' seg.inputs.use_mixture_model_proportions = True seg.inputs.args="-v 1" workflow.connect(mri_stx_node, mri_stx_file, seg, 'intensity_images' ) seg.inputs.mask_image = icbm_default_brain #workflow.connect(brain_mask_node, brain_mask_file, seg, 'mask_image' ) print(stage, img) if 'antsAtropos' == img : workflow.connect(seg, 'classified_image', outputnode, stage+'_label_img') #################### # T1 Brain masking # #################### if not opts.user_brainmask : # if opts.brain_extraction_method == 'beast': # #Brain Mask MNI-Space # mriMNI_brain_mask = pe.Node(interface=beast(), name="mri_stx_brain_mask") # mriMNI_brain_mask.inputs.library_dir = library_dir # mriMNI_brain_mask.inputs.template = library_dir+"/margin_mask.mnc" # mriMNI_brain_mask.inputs.configuration = mriMNI_brain_mask.inputs.library_dir+os.sep+"default.2mm.conf" # mriMNI_brain_mask.inputs.same_resolution = True # mriMNI_brain_mask.inputs.median = True # mriMNI_brain_mask.inputs.fill = True # mriMNI_brain_mask.inputs.median = True # workflow.connect(mri_stx_node, mri_stx_file, mriMNI_brain_mask, "in_file" ) # brain_mask_node = mriMNI_brain_mask # brain_mask_file = 'out_file' # else : #mriMNI_brain_mask = pe.Node(interface=BrainExtraction(), name="mri_stx_brain_mask") #mriMNI_brain_mask.inputs.dimension = 3 #mriMNI_brain_mask.inputs.brain_template = opts.template #template_base, template_ext = splitext(opts.template) #mriMNI_brain_mask.inputs.brain_probability_mask =template_base+'_variant-brain_pseg'+template_ext mriMNI_brain_mask = pe.Node(interface=SegmentationToBrainMask(), name="mri_stx_brain_mask") #workflow.connect(mri_stx_node, mri_stx_file, mriMNI_brain_mask, "anatomical_image" ) workflow.connect(seg, 'classified_image', mriMNI_brain_mask, "seg_file" ) brain_mask_node = mriMNI_brain_mask brain_mask_file = 'output_image' else : brain_mask_node = inputnode brain_mask_file = 'brain_mask_space_stx' # # Transform brain mask from stereotaxic to T1 native space # transform_brain_mask = pe.Node(interface=APPIANApplyTransforms(),name="transform_brain_mask") transform_brain_mask.inputs.interpolation = 'NearestNeighbor' workflow.connect(brain_mask_node, brain_mask_file, transform_brain_mask, 'input_image') workflow.connect(tfm_node, tfm_inv_file, transform_brain_mask, 'transform_1') workflow.connect(copy_mri_nat,'output_file', transform_brain_mask,'reference_image') ############################### # Pass results to output node # ############################### workflow.connect(brain_mask_node, brain_mask_file, outputnode, 'brain_mask_space_stx') workflow.connect(tfm_node, tfm_file, outputnode, 'tfm_mri_stx' ) workflow.connect(tfm_node, tfm_inv_file, outputnode, 'tfm_stx_mri' ) workflow.connect(transform_brain_mask, 'output_image', outputnode, 'brain_mask_space_mri') #workflow.connect(mri_stx_node, mri_stx_file, outputnode, 'mri_space_stx') workflow.connect(copy_mri_nat, 'output_file', outputnode, 'mri_space_nat') return(workflow)
def epi_mni_align(name="SpatialNormalization"): """ Estimate the transform that maps the EPI space into MNI152NLin2009cAsym. The input epi_mean is the averaged and brain-masked EPI timeseries Returns the EPI mean resampled in MNI space (for checking out registration) and the associated "lobe" parcellation in EPI space. .. workflow:: from mriqc.workflows.functional import epi_mni_align from mriqc.testing import mock_config with mock_config(): wf = epi_mni_align() """ from nipype.interfaces.ants import ApplyTransforms, N4BiasFieldCorrection from niworkflows.interfaces.reportlets.registration import ( SpatialNormalizationRPT as RobustMNINormalization, ) from templateflow.api import get as get_template # Get settings testing = config.execution.debug n_procs = config.nipype.nprocs ants_nthreads = config.nipype.omp_nthreads workflow = pe.Workflow(name=name) inputnode = pe.Node( niu.IdentityInterface(fields=["epi_mean", "epi_mask"]), name="inputnode", ) outputnode = pe.Node( niu.IdentityInterface(fields=["epi_mni", "epi_parc", "report"]), name="outputnode", ) n4itk = pe.Node(N4BiasFieldCorrection(dimension=3, copy_header=True), name="SharpenEPI") norm = pe.Node( RobustMNINormalization( explicit_masking=False, flavor="testing" if testing else "precise", float=config.execution.ants_float, generate_report=True, moving="boldref", num_threads=ants_nthreads, reference="boldref", reference_image=str( get_template("MNI152NLin2009cAsym", resolution=2, suffix="boldref")), reference_mask=str( get_template( "MNI152NLin2009cAsym", resolution=2, desc="brain", suffix="mask", )), template="MNI152NLin2009cAsym", ), name="EPI2MNI", num_threads=n_procs, mem_gb=3, ) # Warp segmentation into EPI space invt = pe.Node( ApplyTransforms( float=True, input_image=str( get_template( "MNI152NLin2009cAsym", resolution=1, desc="carpet", suffix="dseg", )), dimension=3, default_value=0, interpolation="MultiLabel", ), name="ResampleSegmentation", ) # fmt: off workflow.connect([ (inputnode, invt, [("epi_mean", "reference_image")]), (inputnode, n4itk, [("epi_mean", "input_image")]), (inputnode, norm, [("epi_mask", "moving_mask")]), (n4itk, norm, [("output_image", "moving_image")]), (norm, invt, [("inverse_composite_transform", "transforms")]), (invt, outputnode, [("output_image", "epi_parc")]), (norm, outputnode, [("warped_image", "epi_mni"), ("out_report", "report")]), ]) # fmt: on return workflow
#manually remove bad slices motscrubdat = nib.load(cur_dir).get_fdata() if len(scrub_vols) > 0: motscrubdat2 = np.delete(motscrubdat,scrub_vols,axis=3) array_img = nib.Nifti1Image(motscrubdat2, nib.load(cur_dir).affine) nib.save(array_img, '%s/rest.nii.gz'%(output_dir)) else: nib.save(nib.load(cur_dir), '%s/rest.nii.gz'%(output_dir)) ########## 2: Remove first 5 vols of func data os.system("fslroi %s/rest.nii.gz %s/rest_delvol.nii.gz 5 350"%(output_dir,output_dir)) ########## 3: Bias Field Correction inu_n4 = Node(N4BiasFieldCorrection( dimension=3, input_image = '%s/%s/session_1/anat_1/anat.nii.gz'%(data_dir,subnum), output_image = '%s/mprage_inu.nii.gz'%(output_dir)) , name='inu_n4') res1 = inu_n4.run() ########## 4: Skullstripping #anat bet_anat = Node(fsl.BET(frac=0.5, robust=True, output_type='NIFTI_GZ', in_file= '%s/mprage_inu.nii.gz'%(output_dir), out_file = '%s/mprage_inu_bet.nii.gz'%(output_dir)), name="bet_anat") res = bet_anat.run() #func bet_func = Node(fsl.BET(frac=0.5,