def distortion_correction_workflow(self): # The initial script created by Vinit Srivastava was: # antsIntermodalityIntrasubject.sh -d 3 -i eddy_corr_brain_b0.nii.gz -r # T1-nonGdE_brain_N4bfc_masked.nii.gz -x T1-nonGdE_brain_mask.nii.gz -w # template -o B0toT1SmallWarp -t 2 # # Note: the script antsIntermodalityIntrasubject.sh returns an error regarding a missing template file: # template1Warp.nii.gz does not exist - please specify in order to proceed to steps that map to the template # This is expected and means that the second half of the script is not executed nor necessary for this step. # https://github.com/ANTsX/ANTs/blob/master/Scripts/antsIntermodalityIntrasubject.sh # # Additionally, the anatomical T1 brain mask is used in the second part of the script and is not useful in our # case. # # The ants interface from nipype doesn't wrap the antsIntermodalityIntrasubject.sh script # # antsIntermodalityIntrasubject.sh Script breakdown: # Usage: `basename $0` # -d imageDimension # -r anatomicalT1image(brain or whole - head, depending on modality) to align to # -R anatomicalReference image to warp to(often higher resolution thananatomicalT1image) # -i scalarImageToMatch(such as avgerage bold, averge dwi, etc.) # -x anatomicalT1brainmask(should mask out regions that do not appear in scalarImageToMatch) # -t transformType(0 = rigid, 1 = affine, 2 = rigid + small_def, 3 = affine + small_def) # -w prefix of T1 to template transform # -T template space # < OPTARGS > # -o outputPrefix # -l labels in template space # -a auxiliary scalar image/s to warp to template # -b auxiliary dt image to warp to template # Initial command runs: # /opt/ants-2.3.1/antsRegistration -d 3 -m MI[anatomicalImage(-r), scalarImage(-i),1,32,Regular,0.25] # -c [1000x500x250x0,1e-7,5] -t Rigid[0.1] -f 8x4x2x1 -s 4x2x1x0 # -u 1 -m mattes[anatomicalImage(-r), scalarImage(-i),1,32] -c [50x50x0,1e-7,5] -t SyN[0.1,3,0] -f 4x2x1 # -s 2x1x0mm -u 1 -z 1 --winsorize-image-intensities [0.005, 0.995] -o B0toT1Warp # -d: dimensionality # -m: metric # "MI[fixedImage,movingImage,metricWeight,numberOfBins,<samplingStrategy={None,Regular,Random}>,<samplingPercentage=[0,1]>]" ); # "Mattes[fixedImage,movingImage,metricWeight,numberOfBins,<samplingStrategy={None,Regular,Random}>,<samplingPercentage=[0,1]>]" ); # -c: convergence # "MxNxO" # "[MxNxO,<convergenceThreshold=1e-6>,<convergenceWindowSize=10>]" # -t: transform # 0:rigid[GradientStep], 1:affine[], 2:composite affine[], 3:similarity[], 4:translation[], 5:BSpline[] # "SyN[gradientStep,<updateFieldVarianceInVoxelSpace=3>,<totalFieldVarianceInVoxelSpace=0>]" # -f: shrink-factors # "MxNxO..." # -s: smoothing-sigmas # "MxNxO..." # -u: use-histogram-matching # -z: collapse-output-transforms # -o: output transform prefix b0_T1w_Reg = Node(Registration(), name="b0_T1w_Reg") b0_T1w_Reg.btn_string = 'dwi b0 to T1w Registration' # -r, -i, -x will get set via workflow implementation # -d b0_T1w_Reg.inputs.dimension = 3 # -m b0_T1w_Reg.inputs.metric = ['MI', 'Mattes'] b0_T1w_Reg.inputs.metric_weight = [1, 1] b0_T1w_Reg.inputs.radius_or_number_of_bins = [32, 32] b0_T1w_Reg.inputs.sampling_strategy = ['Regular', None] b0_T1w_Reg.inputs.sampling_percentage = [0.25, None] # -c b0_T1w_Reg.inputs.number_of_iterations = [[1000, 500, 250, 0], [50, 50, 0]] b0_T1w_Reg.inputs.convergence_threshold = [1e-7, 1e-7] b0_T1w_Reg.inputs.convergence_window_size = [5, 5] # -t b0_T1w_Reg.inputs.transforms = ['Rigid', 'SyN'] b0_T1w_Reg.inputs.transform_parameters = [(0.1, ), (0.1, 3, 0.0)] # -f b0_T1w_Reg.inputs.shrink_factors = [[8, 4, 2, 1], [4, 2, 1]] # -s b0_T1w_Reg.inputs.smoothing_sigmas = [[4, 2, 1, 0], [2, 1, 0]] b0_T1w_Reg.inputs.sigma_units = ['vox', 'mm'] # -u b0_T1w_Reg.inputs.use_histogram_matching = [True, True] # -z b0_T1w_Reg.inputs.collapse_output_transforms = True # winsorize b0_T1w_Reg.inputs.winsorize_lower_quantile = 0.005 b0_T1w_Reg.inputs.winsorize_upper_quantile = 0.995 # -o b0_T1w_Reg.inputs.output_transform_prefix = 'dwiToT1Warp' # Since the antsApplyTransform interface in nipype only accepts the transform list in the reverse order (i.e. # the output from the antsRegistration script needs to be flipped) we save the transform files as a single # composite file. b0_T1w_Reg.inputs.write_composite_transform = True self.interfaces.append(b0_T1w_Reg) # Althought the antsRegistration interface can output a warped image, we keep the antsApplyTransform node to # replicate the original (i.e. not nipype) pipeline and to add the input_image_type parameter.\ # second script: antsApplyTransforms # antsApplyTransforms -d 3 -e 3 -i data.nii.gz -o data_distcorr.nii.gz -r # eddy_corr_brain_b0.nii.gz -t B0toT1SmallWarp1Warp.nii.gz -t # B0toT1SmallWarp0GenericAffine.mat -v dwi_T1w_Tran = Node(ApplyTransforms(), name="dwi_T1w_Tran") dwi_T1w_Tran.btn_string = 'dwi to T1w Transformation' # -d: dimension dwi_T1w_Tran.inputs.dimension = 3 # -e: input image type dwi_T1w_Tran.inputs.input_image_type = 3 # the -i, -o, -r, -t options are from workflow self.interfaces.append(dwi_T1w_Tran)
def __init__(self, parent, dir_dic, bids): super().__init__(parent, dir_dic, bids) # Create interfaces ============================================================================================ # BET T1w_BET = Node(BET(), name="T1w_BET") T1w_BET.btn_string = 'T1w Brain Extraction' self.interfaces.append(T1w_BET) T1w_gad_BET = Node(BET(), name="T1w_gad_BET") T1w_gad_BET.btn_string = 'T1w Gadolinium Enhanced Brain Extraction' self.interfaces.append(T1w_gad_BET) T2w_dbs_BET = Node(BET(), name="T2w_dbs_BET") T2w_dbs_BET.btn_string = 'T2w DBS Acquisition Brain Extraction' self.interfaces.append(T2w_dbs_BET) dwi_BET = Node(BET(), name="dwi_BET") dwi_BET.btn_string = 'dwi Brain Extraction' self.interfaces.append(dwi_BET) # BFC T1w_BFC = Node(N4BiasFieldCorrection(), name="T1w_BFC") T1w_BFC.btn_string = 'T1w Bias Field Correction' self.interfaces.append(T1w_BFC) # Split dwi_ROI_b0 = Node(ExtractROI(), name="dwi_ROI_b0") dwi_ROI_b0.btn_string = 'dwi Extract b0' self.interfaces.append(dwi_ROI_b0) # Eddy current correction dwi_Eddy = Node(Eddy(), name="dwi_Eddy") dwi_Eddy.btn_string = 'dwi Eddy Current Correction' self.interfaces.append(dwi_Eddy) # Distortion correction # as this section is script/comment heavy it was put into a function self.distortion_correction_workflow() # Data output (i.e. sink) ====================================================================================== self.sink = Node(DataSink(), name="sink") self.sink.btn_string = 'data sink' self.sink.inputs.base_directory = self.dir_dic['data_dir'] self.jsink = Node(JSONFileSink(), name="jsink") self.jsink.btn_string = 'json sink' self.jsink.inputs.base_directory = self.dir_dic['data_dir'] # Initialize workflow ========================================================================================== self.wf = Workflow(name='pre_processing') # T1w BET to ants N4BiasFieldCorrection self.wf.connect([(self.return_interface("T1w_BET"), self.return_interface("T1w_BFC"), [("out_file", "input_image")])]) self.wf.connect([(self.return_interface("T1w_BET"), self.return_interface("T1w_BFC"), [("mask_file", "mask_image")])]) # Eddy self.wf.connect([(self.return_interface("dwi_BET"), self.return_interface("dwi_Eddy"), [("out_file", "in_file")])]) self.wf.connect([(self.return_interface("dwi_BET"), self.return_interface("dwi_Eddy"), [("mask_file", "in_mask")])]) # ROI b0 self.wf.connect([(self.return_interface("dwi_Eddy"), self.return_interface("dwi_ROI_b0"), [("out_corrected", "in_file")])]) # Distortion Correction: # b0_T1_Reg: # -i: moving image # -r: T1 # -x: T1 mask self.wf.connect([(self.return_interface("dwi_ROI_b0"), self.return_interface("b0_T1w_Reg"), [("roi_file", "moving_image")])]) self.wf.connect([(self.return_interface("T1w_BFC"), self.return_interface("b0_T1w_Reg"), [("output_image", "fixed_image")])]) # test remove as doesn't seem useful (see self.distortion_correction_workflow()) and causes a crash when added # self.wf.connect([(self.return_interface("T1w_BET"), self.return_interface("b0_T1w_Reg"), # [("mask_file", "fixed_image_mask")])]) # dwi_T1_Tran: # -i: Eddy corrected image # -r: Eddy corrected b0 # -t: transforms self.wf.connect([(self.return_interface("dwi_Eddy"), self.return_interface("dwi_T1w_Tran"), [("out_corrected", "input_image")])]) self.wf.connect([(self.return_interface("dwi_ROI_b0"), self.return_interface("dwi_T1w_Tran"), [("roi_file", "reference_image")])]) self.wf.connect([(self.return_interface("b0_T1w_Reg"), self.return_interface("dwi_T1w_Tran"), [("composite_transform", "transforms")])]) # BaseInterface generates a dict mapping button strings to the workflow nodes # self.map_workflow() graph_file = self.wf.write_graph("pre_processing", graph2use='flat') self.graph_file = graph_file.replace("pre_processing.png", "pre_processing_detailed.png") self.init_settings() self.init_ui()
def __init__(self, parent, dir_dic, bids): super().__init__(parent, dir_dic, bids) # Create interfaces ============================================================================================ # BET MNI_BET = Node(BET(), name="MNI_BET") MNI_BET.btn_string = 'MNI Template Brain Extraction' self.interfaces.append(MNI_BET) # Registration postopCT_T1_Reg = Node(Registration(), name="postopCT_T1_Reg") postopCT_T1_Reg.btn_string = 'post-op CT to T1w Registration' self.interfaces.append(postopCT_T1_Reg) preopCT_T1_Reg = Node(Registration(), name="preopCT_T1_Reg") preopCT_T1_Reg.btn_string = 'pre-op CT to T1w Registration' self.interfaces.append(preopCT_T1_Reg) T1_MNI_Reg = Node(Registration(), name="T1_MNI_Reg") T1_MNI_Reg.btn_string = 'T1w to MNI template Registration' self.interfaces.append(T1_MNI_Reg) # Transformations postopCT_T1_Tran = Node(ApplyTransforms(), name="postopCT_T1_Tran") postopCT_T1_Tran.btn_string = 'post-op CT to T1w Transformation' self.interfaces.append(postopCT_T1_Tran) preopCT_T1_Tran = Node(ApplyTransforms(), name="preopCT_T1_Tran") preopCT_T1_Tran.btn_string = 'pre-op CT to T1w Transformation' self.interfaces.append(preopCT_T1_Tran) T1_MNI_Tran = Node(ApplyTransforms(), name="T1_MNI_Tran") T1_MNI_Tran.btn_string = 'T1w to MNI template Transformation' self.interfaces.append(T1_MNI_Tran) # Data output (i.e. sink) ====================================================================================== self.sink = Node(DataSink(), name="sink") self.sink.btn_string = 'data sink' self.sink.inputs.base_directory = self.dir_dic['data_dir'] self.jsink = Node(JSONFileSink(), name="jsink") self.jsink.btn_string = 'json sink' self.jsink.inputs.base_directory = self.dir_dic['data_dir'] # Initialize workflow ========================================================================================== self.wf = Workflow(name='co_registration') # Brain extracted MNI template to antsRegistration # MI[mni_t1_brain.nii.gz,t1_nonGdE_brain_N4bfc_masked.nii.gz,1,32,Regular,0.25] # MI[fixedImage,movingImage,metricWeight,numberOfBins,<samplingStrategy={None,Regular,Random}>,<samplingPercentage=[0,1]>] self.wf.connect([(self.return_interface("MNI_BET"), self.return_interface("T1_MNI_Reg"), [("out_file", "fixed_image")])]) self.wf.connect([(self.return_interface("MNI_BET"), self.return_interface("T1_MNI_Tran"), [("out_file", "reference_image")])]) # T1 -> MNI Reg to Tran self.wf.connect([(self.return_interface("T1_MNI_Reg"), self.return_interface("T1_MNI_Tran"), [("composite_transform", "transforms")])]) # postop CT -> T1 Reg to Tran self.wf.connect([(self.return_interface("postopCT_T1_Reg"), self.return_interface("postopCT_T1_Tran"), [("composite_transform", "transforms")])]) # preop CT -> T1 Reg to Tran self.wf.connect([(self.return_interface("preopCT_T1_Reg"), self.return_interface("preopCT_T1_Tran"), [("composite_transform", "transforms")])]) # BaseInterface generates a dict mapping button strings to the workflow nodes self.wf.base_dir = self.dir_dic['temp_dir'] graph_file = self.wf.write_graph("co_registration", graph2use='flat') self.graph_file = graph_file.replace("co_registration.png", "co_registration_detailed.png") self.init_settings() self.init_ui()