def tensor_pipeline(self, **kwargs): # @UnusedVariable """ Fits the apparrent diffusion tensor (DT) to each voxel of the image """ pipeline = self.create_pipeline( name='tensor', inputs=[ DatasetSpec('bias_correct', nifti_gz_format), DatasetSpec('grad_dirs', fsl_bvecs_format), DatasetSpec('bvalues', fsl_bvals_format), DatasetSpec('brain_mask', nifti_gz_format) ], outputs=[DatasetSpec('tensor', nifti_gz_format)], desc=("Estimates the apparent diffusion tensor in each " "voxel"), version=1, citations=[], **kwargs) # Create tensor fit node dwi2tensor = pipeline.create_node(FitTensor(), name='dwi2tensor') dwi2tensor.inputs.out_file = 'dti.nii.gz' # Gradient merge node fsl_grads = pipeline.create_node(MergeTuple(2), name="fsl_grads") # Connect nodes pipeline.connect(fsl_grads, 'out', dwi2tensor, 'grad_fsl') # Connect to inputs pipeline.connect_input('grad_dirs', fsl_grads, 'in1') pipeline.connect_input('bvalues', fsl_grads, 'in2') pipeline.connect_input('bias_correct', dwi2tensor, 'in_file') pipeline.connect_input('brain_mask', dwi2tensor, 'in_mask') # Connect to outputs pipeline.connect_output('tensor', dwi2tensor, 'out_file') # Check inputs/output are connected return pipeline
def extract_b0_pipeline(self, **kwargs): # @UnusedVariable """ Extracts the b0 images from a DWI study and takes their mean """ pipeline = self.create_pipeline( name='extract_b0', inputs=[ DatasetSpec('bias_correct', nifti_gz_format), DatasetSpec('grad_dirs', fsl_bvecs_format), DatasetSpec('bvalues', fsl_bvals_format) ], outputs=[DatasetSpec('b0', nifti_gz_format)], desc="Extract b0 image from a DWI study", version=1, citations=[mrtrix_cite], **kwargs) # Gradient merge node fsl_grads = pipeline.create_node(MergeTuple(2), name="fsl_grads") # Extraction node extract_b0s = pipeline.create_node(ExtractDWIorB0(), name='extract_b0s', requirements=[mrtrix3_req]) extract_b0s.inputs.bzero = True extract_b0s.inputs.quiet = True # FIXME: Need a registration step before the mean # Mean calculation node mean = pipeline.create_node(MRMath(), name="mean", requirements=[mrtrix3_req]) mean.inputs.axis = 3 mean.inputs.operation = 'mean' mean.inputs.quiet = True # Convert to Nifti mrconvert = pipeline.create_node(MRConvert(), name="output_conversion", requirements=[mrtrix3_req]) mrconvert.inputs.out_ext = '.nii.gz' mrconvert.inputs.quiet = True # Connect inputs pipeline.connect_input('bias_correct', extract_b0s, 'in_file') pipeline.connect_input('grad_dirs', fsl_grads, 'in1') pipeline.connect_input('bvalues', fsl_grads, 'in2') # Connect between nodes pipeline.connect(extract_b0s, 'out_file', mean, 'in_files') pipeline.connect(fsl_grads, 'out', extract_b0s, 'grad_fsl') pipeline.connect(mean, 'out_file', mrconvert, 'in_file') # Connect outputs pipeline.connect_output('b0', mrconvert, 'out_file') pipeline.assert_connected() # Check inputs/outputs are connected return pipeline
def response_pipeline(self, **kwargs): # @UnusedVariable """ Estimates the fibre orientation distribution (FOD) using constrained spherical deconvolution Parameters ---------- response_algorithm : str Algorithm used to estimate the response """ outputs = [DatasetSpec('wm_response', text_format)] if self.multi_tissue: outputs.append(DatasetSpec('gm_response', text_format)) outputs.append(DatasetSpec('csf_response', text_format)) pipeline = self.create_pipeline( name='response', inputs=[ DatasetSpec('bias_correct', nifti_gz_format), DatasetSpec('grad_dirs', fsl_bvecs_format), DatasetSpec('bvalues', fsl_bvals_format), DatasetSpec('brain_mask', nifti_gz_format) ], outputs=outputs, desc=("Estimates the fibre response function"), version=1, citations=[mrtrix_cite], **kwargs) # Create fod fit node response = pipeline.create_node(ResponseSD(), name='response', requirements=[mrtrix3_req]) response.inputs.algorithm = self.switch('response_algorithm') # Gradient merge node fsl_grads = pipeline.create_node(MergeTuple(2), name="fsl_grads") # Connect nodes pipeline.connect(fsl_grads, 'out', response, 'grad_fsl') # Connect to inputs pipeline.connect_input('grad_dirs', fsl_grads, 'in1') pipeline.connect_input('bvalues', fsl_grads, 'in2') pipeline.connect_input('bias_correct', response, 'in_file') pipeline.connect_input('brain_mask', response, 'in_mask') # Connect to outputs pipeline.connect_output('wm_response', response, 'wm_file') if self.multi_tissue: response.inputs.gm_file = 'gm.txt' response.inputs.csf_file = 'csf.txt' pipeline.connect_output('gm_response', response, 'gm_file') pipeline.connect_output('csf_response', response, 'csf_file') # Check inputs/output are connected return pipeline
def brain_extraction_pipeline(self, **kwargs): # @UnusedVariable @IgnorePep8 """ Generates a whole brain mask using MRtrix's 'dwi2mask' command Parameters ---------- mask_tool: Str Can be either 'bet' or 'dwi2mask' depending on which mask tool you want to use """ if self.branch('brain_extract_method', 'mrtrix'): pipeline = self.create_pipeline( 'brain_extraction', inputs=[ DatasetSpec('preproc', nifti_gz_format), DatasetSpec('grad_dirs', fsl_bvecs_format), DatasetSpec('bvalues', fsl_bvals_format) ], outputs=[DatasetSpec('brain_mask', nifti_gz_format)], desc="Generate brain mask from b0 images", version=1, citations=[mrtrix_cite], **kwargs) # Create mask node dwi2mask = pipeline.create_node(BrainMask(), name='dwi2mask', requirements=[mrtrix3_req]) dwi2mask.inputs.out_file = 'brain_mask.nii.gz' # Gradient merge node grad_fsl = pipeline.create_node(MergeTuple(2), name="grad_fsl") # Connect nodes pipeline.connect(grad_fsl, 'out', dwi2mask, 'grad_fsl') # Connect inputs pipeline.connect_input('grad_dirs', grad_fsl, 'in1') pipeline.connect_input('bvalues', grad_fsl, 'in2') pipeline.connect_input('preproc', dwi2mask, 'in_file') # Connect outputs pipeline.connect_output('brain_mask', dwi2mask, 'out_file') # Check inputs/outputs are connected pipeline.assert_connected() else: pipeline = super(DiffusionStudy, self).brain_extraction_pipeline(**kwargs) return pipeline
def bias_correct_pipeline(self, **kwargs): # @UnusedVariable @IgnorePep8 """ Corrects B1 field inhomogeneities """ bias_method = self.switch('bias_correct_method') pipeline = self.create_pipeline( name='bias_correct', inputs=[ DatasetSpec('preproc', nifti_gz_format), DatasetSpec('brain_mask', nifti_gz_format), DatasetSpec('grad_dirs', fsl_bvecs_format), DatasetSpec('bvalues', fsl_bvals_format) ], outputs=[DatasetSpec('bias_correct', nifti_gz_format)], desc="Corrects for B1 field inhomogeneity", version=1, citations=[ fast_cite, (n4_cite if bias_method == 'ants' else fsl_cite) ], **kwargs) # Create bias correct node bias_correct = pipeline.create_node( DWIBiasCorrect(), name="bias_correct", requirements=( [mrtrix3_req] + [ants2_req if bias_method == 'ants' else fsl509_req])) bias_correct.inputs.method = bias_method # Gradient merge node fsl_grads = pipeline.create_node(MergeTuple(2), name="fsl_grads") # Connect nodes pipeline.connect(fsl_grads, 'out', bias_correct, 'grad_fsl') # Connect to inputs pipeline.connect_input('grad_dirs', fsl_grads, 'in1') pipeline.connect_input('bvalues', fsl_grads, 'in2') pipeline.connect_input('preproc', bias_correct, 'in_file') pipeline.connect_input('brain_mask', bias_correct, 'mask') # Connect to outputs pipeline.connect_output('bias_correct', bias_correct, 'out_file') # Check inputs/output are connected return pipeline
def fod_pipeline(self, **kwargs): # @UnusedVariable """ Estimates the fibre orientation distribution (FOD) using constrained spherical deconvolution Parameters ---------- """ inputs = [ DatasetSpec('bias_correct', nifti_gz_format), DatasetSpec('grad_dirs', fsl_bvecs_format), DatasetSpec('bvalues', fsl_bvals_format), DatasetSpec('wm_response', text_format), DatasetSpec('brain_mask', nifti_gz_format) ] outputs = [DatasetSpec('wm_odf', mrtrix_format)] if self.multi_tissue: inputs.append(DatasetSpec('gm_response', text_format)) inputs.append(DatasetSpec('csf_response', text_format)) outputs.append(DatasetSpec('gm_odf', mrtrix_format)) outputs.append(DatasetSpec('csf_odf', mrtrix_format)) algorithm = 'msmt_csd' else: algorithm = 'csd' pipeline = self.create_pipeline( name='fod', inputs=inputs, outputs=outputs, desc=("Estimates the fibre orientation distribution in each" " voxel"), version=1, citations=[mrtrix_cite], **kwargs) # Create fod fit node dwi2fod = pipeline.create_node(EstimateFOD(), name='dwi2fod', requirements=[mrtrix3_req]) dwi2fod.inputs.algorithm = algorithm # Gradient merge node fsl_grads = pipeline.create_node(MergeTuple(2), name="fsl_grads") # Connect nodes pipeline.connect(fsl_grads, 'out', dwi2fod, 'grad_fsl') # Connect to inputs pipeline.connect_input('grad_dirs', fsl_grads, 'in1') pipeline.connect_input('bvalues', fsl_grads, 'in2') pipeline.connect_input('bias_correct', dwi2fod, 'in_file') pipeline.connect_input('wm_response', dwi2fod, 'wm_txt') pipeline.connect_input('brain_mask', dwi2fod, 'mask_file') # Connect to outputs pipeline.connect_output('wm_odf', dwi2fod, 'wm_odf') # If multi-tissue if self.multi_tissue: pipeline.connect_input('gm_response', dwi2fod, 'gm_txt') pipeline.connect_input('csf_response', dwi2fod, 'csf_txt') dwi2fod.inputs.gm_odf = 'gm.mif' dwi2fod.inputs.csf_odf = 'csf.mif' pipeline.connect_output('gm_odf', dwi2fod, 'gm_odf') pipeline.connect_output('csf_odf', dwi2fod, 'csf_odf') # Check inputs/output are connected return pipeline
def intensity_normalisation_pipeline(self, **kwargs): pipeline = self.create_pipeline( name='intensity_normalization', inputs=[ DatasetSpec('bias_correct', nifti_gz_format), DatasetSpec('brain_mask', nifti_gz_format), DatasetSpec('grad_dirs', fsl_bvecs_format), DatasetSpec('bvalues', fsl_bvals_format) ], outputs=[ DatasetSpec('norm_intensity', mrtrix_format), DatasetSpec('norm_intens_fa_template', mrtrix_format, frequency='per_project'), DatasetSpec('norm_intens_wm_mask', mrtrix_format, frequency='per_project') ], desc="Corrects for B1 field inhomogeneity", version=1, citations=[mrtrix3_req], **kwargs) # Convert from nifti to mrtrix format grad_merge = pipeline.create_node(MergeTuple(2), name="grad_merge") mrconvert = pipeline.create_node(MRConvert(), name='mrconvert') mrconvert.inputs.out_ext = '.mif' # Set up join nodes fields = ['dwis', 'masks', 'subject_ids', 'visit_ids'] join_subjects = pipeline.create_join_subjects_node( IdentityInterface(fields), joinfield=fields, name='join_subjects') join_visits = pipeline.create_join_visits_node(Chain(fields), joinfield=fields, name='join_visits') # Set up expand nodes select = pipeline.create_node(SelectSession(), name='expand') # Intensity normalization intensity_norm = pipeline.create_node(DWIIntensityNorm(), name='dwiintensitynorm') # Connect inputs pipeline.connect_input('bias_correct', mrconvert, 'in_file') pipeline.connect_input('grad_dirs', grad_merge, 'in1') pipeline.connect_input('bvalues', grad_merge, 'in2') pipeline.connect_subject_id(join_subjects, 'subject_ids') pipeline.connect_visit_id(join_subjects, 'visit_ids') pipeline.connect_subject_id(select, 'subject_id') pipeline.connect_visit_id(select, 'visit_id') pipeline.connect_input('brain_mask', join_subjects, 'masks') # Internal connections pipeline.connect(grad_merge, 'out', mrconvert, 'grad_fsl') pipeline.connect(mrconvert, 'out_file', join_subjects, 'dwis') pipeline.connect(join_subjects, 'dwis', join_visits, 'dwis') pipeline.connect(join_subjects, 'masks', join_visits, 'masks') pipeline.connect(join_subjects, 'subject_ids', join_visits, 'subject_ids') pipeline.connect(join_subjects, 'visit_ids', join_visits, 'visit_ids') pipeline.connect(join_visits, 'dwis', intensity_norm, 'in_files') pipeline.connect(join_visits, 'masks', intensity_norm, 'masks') pipeline.connect(join_visits, 'subject_ids', select, 'subject_ids') pipeline.connect(join_visits, 'visit_ids', select, 'visit_ids') pipeline.connect(intensity_norm, 'out_files', select, 'items') # Connect outputs pipeline.connect_output('norm_intensity', select, 'item') pipeline.connect_output('norm_intens_fa_template', intensity_norm, 'fa_template') pipeline.connect_output('norm_intens_wm_mask', intensity_norm, 'wm_mask') return pipeline