def correct_bias(in_file, out_file=None, image_type=sitk.sitkFloat64): """ Corrects the bias using ANTs N4BiasFieldCorrection. If this fails, will then attempt to correct bias using SimpleITK :param in_file: nii文件的输入路径 :param out_file: 校正后的文件保存路径名 :return: 校正后的nii文件全路径名 """ if out_file == None: out_file = in_file.rstrip('.nii')+"_bias_corrected.nii" #使用N4BiasFieldCorrection校正MRI图像的偏置场 correct = N4BiasFieldCorrection() correct.inputs.input_image = in_file correct.inputs.output_image = out_file try: done = correct.run() return done.outputs.output_image except IOError: warnings.warn(RuntimeWarning("ANTs N4BIasFieldCorrection could not be found." "Will try using SimpleITK for bias field correction" " which will take much longer. To fix this problem, add N4BiasFieldCorrection" " to your PATH system variable. (example: EXPORT PATH=${PATH}:/path/to/ants/bin)")) input_image = sitk.ReadImage(in_file, image_type) output_image = sitk.N4BiasFieldCorrection(input_image, input_image > 0) sitk.WriteImage(output_image, out_file) return os.path.abspath(out_file)
def bias_field_correction(in_subj_dir, out_subj_dir): print("N4ITK on: ", in_subj_dir) create_dir(out_subj_dir) for scan_name in os.listdir(in_subj_dir): if "mask" in scan_name: continue in_path = os.path.join(in_subj_dir, scan_name) out_path = os.path.join(out_subj_dir, scan_name) try: n4 = N4BiasFieldCorrection() n4.inputs.input_image = in_path n4.inputs.output_image = out_path n4.inputs.dimension = 3 n4.inputs.n_iterations = [100, 100, 60, 40] n4.inputs.shrink_factor = 3 n4.inputs.convergence_threshold = 1e-4 n4.inputs.bspline_fitting_distance = 300 n4.run() except RuntimeError: print("\tFailed on: ", in_path) return
def test_N4BiasFieldCorrection_outputs(): output_map = dict(output_image=dict(), ) outputs = N4BiasFieldCorrection.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value
def test_N4BiasFieldCorrection_outputs(): output_map = dict(output_image=dict(), ) outputs = N4BiasFieldCorrection.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value
def n4_correction(im_input): n4 = N4BiasFieldCorrection() n4.inputs.dimension = 3 n4.inputs.input_image = im_input n4.inputs.bspline_fitting_distance = 300 n4.inputs.shrink_factor = 3 n4.inputs.n_iterations = [50, 50, 30, 20] n4.inputs.output_image = im_input.replace('.nii.gz', '_corrected.nii.gz') n4.run()
def test_N4BiasFieldCorrection_inputs(): input_map = dict( args=dict(argstr='%s', ), bias_image=dict(hash_files=False, ), bspline_fitting_distance=dict(argstr='--bspline-fitting %s', ), bspline_order=dict(requires=['bspline_fitting_distance'], ), convergence_threshold=dict(requires=['n_iterations'], ), dimension=dict( argstr='--image-dimension %d', usedefault=True, ), environ=dict( nohash=True, usedefault=True, ), ignore_exception=dict( nohash=True, usedefault=True, ), input_image=dict( argstr='--input-image %s', mandatory=True, ), mask_image=dict(argstr='--mask-image %s', ), n_iterations=dict( argstr='--convergence %s', requires=['convergence_threshold'], ), num_threads=dict( nohash=True, usedefault=True, ), output_image=dict( argstr='--output %s', genfile=True, hash_files=False, ), save_bias=dict( mandatory=True, usedefault=True, xor=['bias_image'], ), shrink_factor=dict(argstr='--shrink-factor %d', ), terminal_output=dict( mandatory=True, nohash=True, ), weight_image=dict(argstr='--weight-image %s', ), ) inputs = N4BiasFieldCorrection.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value
def n4_bfc(input_path): print("Applying bias correction...") print("Input: {}".format(input_path)) n4 = N4BiasFieldCorrection() n4.inputs.dimension = 3 n4.inputs.input_image = input_path n4.inputs.bspline_fitting_distance = 300 n4.inputs.shrink_factor = 3 n4.inputs.n_iterations = [50, 50, 30, 20] n4.inputs.output_image = input_path.replace('.mha', '_n4.mha') n4.run()
def test_N4BiasFieldCorrection_inputs(): input_map = dict(args=dict(argstr='%s', ), bias_image=dict(hash_files=False, ), bspline_fitting_distance=dict(argstr='--bsline-fitting [%g]', ), convergence_threshold=dict(argstr=',%g]', position=2, requires=['n_iterations'], ), dimension=dict(argstr='--image-dimension %d', usedefault=True, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), input_image=dict(argstr='--input-image %s', mandatory=True, ), mask_image=dict(argstr='--mask-image %s', ), n_iterations=dict(argstr='--convergence [ %s', position=1, requires=['convergence_threshold'], sep='x', ), num_threads=dict(nohash=True, usedefault=True, ), output_image=dict(argstr='--output %s', genfile=True, hash_files=False, ), save_bias=dict(mandatory=True, usedefault=True, xor=['bias_image'], ), shrink_factor=dict(argstr='--shrink-factor %d', ), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = N4BiasFieldCorrection.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value
def bias_field_correction(src_path, dst_path): print("N4ITK on: ", src_path) try: n4 = N4BiasFieldCorrection() n4.inputs.input_image = src_path n4.inputs.output_image = dst_path n4.inputs.dimension = 3 n4.inputs.n_iterations = [100, 100, 60, 40] n4.inputs.shrink_factor = 3 n4.inputs.convergence_threshold = 1e-4 n4.inputs.bspline_fitting_distance = 300 n4.run() except RuntimeError: print("\tFailed on: ", src_path) return
def bias_field_correction(src_path, dst_path): logging.info('N4ITK on: {}'.format(src_path)) try: n4 = N4BiasFieldCorrection() n4.inputs.input_image = src_path n4.inputs.output_image = dst_path n4.inputs.dimension = 3 n4.inputs.n_iterations = [100, 100, 60, 40] n4.inputs.shrink_factor = 3 n4.inputs.convergence_threshold = 1e-4 n4.inputs.bspline_fitting_distance = 300 n4.run() except RuntimeError: logging.warning('Failed on: {}'.format(src_path)) return
def bias_correction(img_path, output_path): if not os.path.exists(output_path): n4 = N4BiasFieldCorrection() n4.inputs.dimension = 3 n4.inputs.input_image = img_path n4.inputs.output_image = output_path n4.inputs.bspline_fitting_distance = 500 n4.inputs.shrink_factor = 10 n4.inputs.n_iterations = [100, 100, 60, 40] n4.inputs.convergence_threshold = 1e-4 # subprocess.call(n4.cmdline.split(" ")) devnull = open(os.devnull, 'w') subprocess.call(n4.cmdline.split(" "), stdout=devnull, stderr=devnull) return
def n4itk_norm(self, path, n_dims=3, n_iters=None): """ :param path: string, path to mha T1 or T1c file :param n_dims: int, :param n_iters: :return: writes n4itk normalized image to parent_dir under orig_filename_n.mha """ output_fn = path[:-4] + '_n.mha' if n_iters is None: n_iters = [20, 20, 10, 5] n4 = N4BiasFieldCorrection(output_image=output_fn) # dimension of input image, input image n4.inputs.dimension = n_dims n4.inputs.input_image = path n4.inputs.n_iterations = n_iters n4.run()
def builder(subject_id, subId, project_dir, data_dir, output_dir, output_final_dir, output_interm_dir, layout, anat=None, funcs=None, fmaps=None, task_name='', session=None, apply_trim=False, apply_dist_corr=False, apply_smooth=False, apply_filter=False, mni_template='2mm', apply_n4=True, ants_threads=8, readable_crash_files=False, write_logs=True): """ Core function that returns a workflow. See wfmaker for more details. Args: subject_id: name of subject folder for final outputted sub-folder name subId: abbreviate name of subject for intermediate outputted sub-folder name project_dir: full path to root of project data_dir: full path to raw data files output_dir: upper level output dir (others will be nested within this) output_final_dir: final preprocessed sub-dir name output_interm_dir: intermediate preprcess sub-dir name layout: BIDS layout instance """ ################## ### PATH SETUP ### ################## if session is not None: session = int(session) if session < 10: session = '0' + str(session) else: session = str(session) # Set MNI template MNItemplate = os.path.join(get_resource_path(), 'MNI152_T1_' + mni_template + '_brain.nii.gz') MNImask = os.path.join(get_resource_path(), 'MNI152_T1_' + mni_template + '_brain_mask.nii.gz') MNItemplatehasskull = os.path.join(get_resource_path(), 'MNI152_T1_' + mni_template + '.nii.gz') # Set ANTs files bet_ants_template = os.path.join(get_resource_path(), 'OASIS_template.nii.gz') bet_ants_prob_mask = os.path.join( get_resource_path(), 'OASIS_BrainCerebellumProbabilityMask.nii.gz') bet_ants_registration_mask = os.path.join( get_resource_path(), 'OASIS_BrainCerebellumRegistrationMask.nii.gz') ################################# ### NIPYPE IMPORTS AND CONFIG ### ################################# # Update nipype global config because workflow.config[] = ..., doesn't seem to work # Can't store nipype config/rc file in container anyway so set them globaly before importing and setting up workflow as suggested here: http://nipype.readthedocs.io/en/latest/users/config_file.html#config-file # Create subject's intermediate directory before configuring nipype and the workflow because that's where we'll save log files in addition to intermediate files if not os.path.exists(os.path.join(output_interm_dir, subId, 'logs')): os.makedirs(os.path.join(output_interm_dir, subId, 'logs')) log_dir = os.path.join(output_interm_dir, subId, 'logs') from nipype import config if readable_crash_files: cfg = dict(execution={'crashfile_format': 'txt'}) config.update_config(cfg) config.update_config({ 'logging': { 'log_directory': log_dir, 'log_to_file': write_logs }, 'execution': { 'crashdump_dir': log_dir } }) from nipype import logging logging.update_logging(config) # Now import everything else from nipype.interfaces.io import DataSink from nipype.interfaces.utility import Merge, IdentityInterface from nipype.pipeline.engine import Node, Workflow from nipype.interfaces.nipy.preprocess import ComputeMask from nipype.algorithms.rapidart import ArtifactDetect from nipype.interfaces.ants.segmentation import BrainExtraction, N4BiasFieldCorrection from nipype.interfaces.ants import Registration, ApplyTransforms from nipype.interfaces.fsl import MCFLIRT, TOPUP, ApplyTOPUP from nipype.interfaces.fsl.maths import MeanImage from nipype.interfaces.fsl import Merge as MERGE from nipype.interfaces.fsl.utils import Smooth from nipype.interfaces.nipy.preprocess import Trim from .interfaces import Plot_Coregistration_Montage, Plot_Quality_Control, Plot_Realignment_Parameters, Create_Covariates, Down_Sample_Precision, Create_Encoding_File, Filter_In_Mask ################## ### INPUT NODE ### ################## # Turn functional file list into interable Node func_scans = Node(IdentityInterface(fields=['scan']), name='func_scans') func_scans.iterables = ('scan', funcs) # Get TR for use in filtering below; we're assuming all BOLD runs have the same TR tr_length = layout.get_metadata(funcs[0])['RepetitionTime'] ##################################### ## TRIM ## ##################################### if apply_trim: trim = Node(Trim(), name='trim') trim.inputs.begin_index = apply_trim ##################################### ## DISTORTION CORRECTION ## ##################################### if apply_dist_corr: # Get fmap file locations fmaps = [ f.filename for f in layout.get( subject=subId, modality='fmap', extensions='.nii.gz') ] if not fmaps: raise IOError( "Distortion Correction requested but field map scans not found..." ) # Get fmap metadata totalReadoutTimes, measurements, fmap_pes = [], [], [] for i, fmap in enumerate(fmaps): # Grab total readout time for each fmap totalReadoutTimes.append( layout.get_metadata(fmap)['TotalReadoutTime']) # Grab measurements (for some reason pyBIDS doesn't grab dcm_meta... fields from side-car json file and json.load, doesn't either; so instead just read the header using nibabel to determine number of scans) measurements.append(nib.load(fmap).header['dim'][4]) # Get phase encoding direction fmap_pe = layout.get_metadata(fmap)["PhaseEncodingDirection"] fmap_pes.append(fmap_pe) encoding_file_writer = Node(interface=Create_Encoding_File(), name='create_encoding') encoding_file_writer.inputs.totalReadoutTimes = totalReadoutTimes encoding_file_writer.inputs.fmaps = fmaps encoding_file_writer.inputs.fmap_pes = fmap_pes encoding_file_writer.inputs.measurements = measurements encoding_file_writer.inputs.file_name = 'encoding_file.txt' merge_to_file_list = Node(interface=Merge(2), infields=['in1', 'in2'], name='merge_to_file_list') merge_to_file_list.inputs.in1 = fmaps[0] merge_to_file_list.inputs.in1 = fmaps[1] # Merge AP and PA distortion correction scans merger = Node(interface=MERGE(dimension='t'), name='merger') merger.inputs.output_type = 'NIFTI_GZ' merger.inputs.in_files = fmaps merger.inputs.merged_file = 'merged_epi.nii.gz' # Create distortion correction map topup = Node(interface=TOPUP(), name='topup') topup.inputs.output_type = 'NIFTI_GZ' # Apply distortion correction to other scans apply_topup = Node(interface=ApplyTOPUP(), name='apply_topup') apply_topup.inputs.output_type = 'NIFTI_GZ' apply_topup.inputs.method = 'jac' apply_topup.inputs.interp = 'spline' ################################### ### REALIGN ### ################################### realign_fsl = Node(MCFLIRT(), name="realign") realign_fsl.inputs.cost = 'mutualinfo' realign_fsl.inputs.mean_vol = True realign_fsl.inputs.output_type = 'NIFTI_GZ' realign_fsl.inputs.save_mats = True realign_fsl.inputs.save_rms = True realign_fsl.inputs.save_plots = True ################################### ### MEAN EPIs ### ################################### # For coregistration after realignment mean_epi = Node(MeanImage(), name='mean_epi') mean_epi.inputs.dimension = 'T' # For after normalization is done to plot checks mean_norm_epi = Node(MeanImage(), name='mean_norm_epi') mean_norm_epi.inputs.dimension = 'T' ################################### ### MASK, ART, COV CREATION ### ################################### compute_mask = Node(ComputeMask(), name='compute_mask') compute_mask.inputs.m = .05 art = Node(ArtifactDetect(), name='art') art.inputs.use_differences = [True, False] art.inputs.use_norm = True art.inputs.norm_threshold = 1 art.inputs.zintensity_threshold = 3 art.inputs.mask_type = 'file' art.inputs.parameter_source = 'FSL' make_cov = Node(Create_Covariates(), name='make_cov') ################################ ### N4 BIAS FIELD CORRECTION ### ################################ if apply_n4: n4_correction = Node(N4BiasFieldCorrection(), name='n4_correction') n4_correction.inputs.copy_header = True n4_correction.inputs.save_bias = False n4_correction.inputs.num_threads = ants_threads n4_correction.inputs.input_image = anat ################################### ### BRAIN EXTRACTION ### ################################### brain_extraction_ants = Node(BrainExtraction(), name='brain_extraction') brain_extraction_ants.inputs.dimension = 3 brain_extraction_ants.inputs.use_floatingpoint_precision = 1 brain_extraction_ants.inputs.num_threads = ants_threads brain_extraction_ants.inputs.brain_probability_mask = bet_ants_prob_mask brain_extraction_ants.inputs.keep_temporary_files = 1 brain_extraction_ants.inputs.brain_template = bet_ants_template brain_extraction_ants.inputs.extraction_registration_mask = bet_ants_registration_mask brain_extraction_ants.inputs.out_prefix = 'bet' ################################### ### COREGISTRATION ### ################################### coregistration = Node(Registration(), name='coregistration') coregistration.inputs.float = False coregistration.inputs.output_transform_prefix = "meanEpi2highres" coregistration.inputs.transforms = ['Rigid'] coregistration.inputs.transform_parameters = [(0.1, ), (0.1, )] coregistration.inputs.number_of_iterations = [[1000, 500, 250, 100]] coregistration.inputs.dimension = 3 coregistration.inputs.num_threads = ants_threads coregistration.inputs.write_composite_transform = True coregistration.inputs.collapse_output_transforms = True coregistration.inputs.metric = ['MI'] coregistration.inputs.metric_weight = [1] coregistration.inputs.radius_or_number_of_bins = [32] coregistration.inputs.sampling_strategy = ['Regular'] coregistration.inputs.sampling_percentage = [0.25] coregistration.inputs.convergence_threshold = [1e-08] coregistration.inputs.convergence_window_size = [10] coregistration.inputs.smoothing_sigmas = [[3, 2, 1, 0]] coregistration.inputs.sigma_units = ['mm'] coregistration.inputs.shrink_factors = [[4, 3, 2, 1]] coregistration.inputs.use_estimate_learning_rate_once = [True] coregistration.inputs.use_histogram_matching = [False] coregistration.inputs.initial_moving_transform_com = True coregistration.inputs.output_warped_image = True coregistration.inputs.winsorize_lower_quantile = 0.01 coregistration.inputs.winsorize_upper_quantile = 0.99 ################################### ### NORMALIZATION ### ################################### # Settings Explanations # Only a few key settings are worth adjusting and most others relate to how ANTs optimizer starts or iterates and won't make a ton of difference # Brian Avants referred to these settings as the last "best tested" when he was aligning fMRI data: https://github.com/ANTsX/ANTsRCore/blob/master/R/antsRegistration.R#L275 # Things that matter the most: # smoothing_sigmas: # how much gaussian smoothing to apply when performing registration, probably want the upper limit of this to match the resolution that the data is collected at e.g. 3mm # Old settings [[3,2,1,0]]*3 # shrink_factors # The coarseness with which to do registration # Old settings [[8,4,2,1]] * 3 # >= 8 may result is some problems causing big chunks of cortex with little fine grain spatial structure to be moved to other parts of cortex # Other settings # transform_parameters: # how much regularization to do for fitting that transformation # for syn this pertains to both the gradient regularization term, and the flow, and elastic terms. Leave the syn settings alone as they seem to be the most well tested across published data sets # radius_or_number_of_bins # This is the bin size for MI metrics and 32 is probably adequate for most use cases. Increasing this might increase precision (e.g. to 64) but takes exponentially longer # use_histogram_matching # Use image intensity distribution to guide registration # Leave it on for within modality registration (e.g. T1 -> MNI), but off for between modality registration (e.g. EPI -> T1) # convergence_threshold # threshold for optimizer # convergence_window_size # how many samples should optimizer average to compute threshold? # sampling_strategy # what strategy should ANTs use to initialize the transform. Regular here refers to approximately random sampling around the center of the image mass normalization = Node(Registration(), name='normalization') normalization.inputs.float = False normalization.inputs.collapse_output_transforms = True normalization.inputs.convergence_threshold = [1e-06] normalization.inputs.convergence_window_size = [10] normalization.inputs.dimension = 3 normalization.inputs.fixed_image = MNItemplate normalization.inputs.initial_moving_transform_com = True normalization.inputs.metric = ['MI', 'MI', 'CC'] normalization.inputs.metric_weight = [1.0] * 3 normalization.inputs.number_of_iterations = [[1000, 500, 250, 100], [1000, 500, 250, 100], [100, 70, 50, 20]] normalization.inputs.num_threads = ants_threads normalization.inputs.output_transform_prefix = 'anat2template' normalization.inputs.output_inverse_warped_image = True normalization.inputs.output_warped_image = True normalization.inputs.radius_or_number_of_bins = [32, 32, 4] normalization.inputs.sampling_percentage = [0.25, 0.25, 1] normalization.inputs.sampling_strategy = ['Regular', 'Regular', 'None'] normalization.inputs.shrink_factors = [[8, 4, 2, 1]] * 3 normalization.inputs.sigma_units = ['vox'] * 3 normalization.inputs.smoothing_sigmas = [[3, 2, 1, 0]] * 3 normalization.inputs.transforms = ['Rigid', 'Affine', 'SyN'] normalization.inputs.transform_parameters = [(0.1, ), (0.1, ), (0.1, 3.0, 0.0)] normalization.inputs.use_histogram_matching = True normalization.inputs.winsorize_lower_quantile = 0.005 normalization.inputs.winsorize_upper_quantile = 0.995 normalization.inputs.write_composite_transform = True # NEW SETTINGS (need to be adjusted; specifically shink_factors and smoothing_sigmas need to be the same length) # normalization = Node(Registration(), name='normalization') # normalization.inputs.float = False # normalization.inputs.collapse_output_transforms = True # normalization.inputs.convergence_threshold = [1e-06, 1e-06, 1e-07] # normalization.inputs.convergence_window_size = [10] # normalization.inputs.dimension = 3 # normalization.inputs.fixed_image = MNItemplate # normalization.inputs.initial_moving_transform_com = True # normalization.inputs.metric = ['MI', 'MI', 'CC'] # normalization.inputs.metric_weight = [1.0]*3 # normalization.inputs.number_of_iterations = [[1000, 500, 250, 100], # [1000, 500, 250, 100], # [100, 70, 50, 20]] # normalization.inputs.num_threads = ants_threads # normalization.inputs.output_transform_prefix = 'anat2template' # normalization.inputs.output_inverse_warped_image = True # normalization.inputs.output_warped_image = True # normalization.inputs.radius_or_number_of_bins = [32, 32, 4] # normalization.inputs.sampling_percentage = [0.25, 0.25, 1] # normalization.inputs.sampling_strategy = ['Regular', # 'Regular', # 'None'] # normalization.inputs.shrink_factors = [[4, 3, 2, 1]]*3 # normalization.inputs.sigma_units = ['vox']*3 # normalization.inputs.smoothing_sigmas = [[2, 1], [2, 1], [3, 2, 1, 0]] # normalization.inputs.transforms = ['Rigid', 'Affine', 'SyN'] # normalization.inputs.transform_parameters = [(0.1,), # (0.1,), # (0.1, 3.0, 0.0)] # normalization.inputs.use_histogram_matching = True # normalization.inputs.winsorize_lower_quantile = 0.005 # normalization.inputs.winsorize_upper_quantile = 0.995 # normalization.inputs.write_composite_transform = True ################################### ### APPLY TRANSFORMS AND SMOOTH ### ################################### merge_transforms = Node(Merge(2), iterfield=['in2'], name='merge_transforms') # Used for epi -> mni, via (coreg + norm) apply_transforms = Node(ApplyTransforms(), iterfield=['input_image'], name='apply_transforms') apply_transforms.inputs.input_image_type = 3 apply_transforms.inputs.float = False apply_transforms.inputs.num_threads = 12 apply_transforms.inputs.environ = {} apply_transforms.inputs.interpolation = 'BSpline' apply_transforms.inputs.invert_transform_flags = [False, False] apply_transforms.inputs.reference_image = MNItemplate # Used for t1 segmented -> mni, via (norm) apply_transform_seg = Node(ApplyTransforms(), name='apply_transform_seg') apply_transform_seg.inputs.input_image_type = 3 apply_transform_seg.inputs.float = False apply_transform_seg.inputs.num_threads = 12 apply_transform_seg.inputs.environ = {} apply_transform_seg.inputs.interpolation = 'MultiLabel' apply_transform_seg.inputs.invert_transform_flags = [False] apply_transform_seg.inputs.reference_image = MNItemplate ################################### ### PLOTS ### ################################### plot_realign = Node(Plot_Realignment_Parameters(), name="plot_realign") plot_qa = Node(Plot_Quality_Control(), name="plot_qa") plot_normalization_check = Node(Plot_Coregistration_Montage(), name="plot_normalization_check") plot_normalization_check.inputs.canonical_img = MNItemplatehasskull ############################################ ### FILTER, SMOOTH, DOWNSAMPLE PRECISION ### ############################################ # Use cosanlab_preproc for down sampling down_samp = Node(Down_Sample_Precision(), name="down_samp") # Use FSL for smoothing if apply_smooth: smooth = Node(Smooth(), name='smooth') if isinstance(apply_smooth, list): smooth.iterables = ("fwhm", apply_smooth) elif isinstance(apply_smooth, int) or isinstance(apply_smooth, float): smooth.inputs.fwhm = apply_smooth else: raise ValueError("apply_smooth must be a list or int/float") # Use cosanlab_preproc for low-pass filtering if apply_filter: lp_filter = Node(Filter_In_Mask(), name='lp_filter') lp_filter.inputs.mask = MNImask lp_filter.inputs.sampling_rate = tr_length lp_filter.inputs.high_pass_cutoff = 0 if isinstance(apply_filter, list): lp_filter.iterables = ("low_pass_cutoff", apply_filter) elif isinstance(apply_filter, int) or isinstance(apply_filter, float): lp_filter.inputs.low_pass_cutoff = apply_filter else: raise ValueError("apply_filter must be a list or int/float") ################### ### OUTPUT NODE ### ################### # Collect all final outputs in the output dir and get rid of file name additions datasink = Node(DataSink(), name='datasink') if session: datasink.inputs.base_directory = os.path.join(output_final_dir, subject_id) datasink.inputs.container = 'ses-' + session else: datasink.inputs.base_directory = output_final_dir datasink.inputs.container = subject_id # Remove substitutions data_dir_parts = data_dir.split('/')[1:] if session: prefix = ['_scan_'] + data_dir_parts + [subject_id] + [ 'ses-' + session ] + ['func'] else: prefix = ['_scan_'] + data_dir_parts + [subject_id] + ['func'] func_scan_names = [os.path.split(elem)[-1] for elem in funcs] to_replace = [] for elem in func_scan_names: bold_name = elem.split(subject_id + '_')[-1] bold_name = bold_name.split('.nii.gz')[0] to_replace.append(('..'.join(prefix + [elem]), bold_name)) datasink.inputs.substitutions = to_replace ##################### ### INIT WORKFLOW ### ##################### # If we have sessions provide the full path to the subject's intermediate directory # and only rely on workflow init to create the session container *within* that directory # Otherwise just point to the intermediate directory and let the workflow init create the subject container within the intermediate directory if session: workflow = Workflow(name='ses_' + session) workflow.base_dir = os.path.join(output_interm_dir, subId) else: workflow = Workflow(name=subId) workflow.base_dir = output_interm_dir ############################ ######### PART (1a) ######### # func -> discorr -> trim -> realign # OR # func -> trim -> realign # OR # func -> discorr -> realign # OR # func -> realign ############################ if apply_dist_corr: workflow.connect([(encoding_file_writer, topup, [('encoding_file', 'encoding_file')]), (encoding_file_writer, apply_topup, [('encoding_file', 'encoding_file')]), (merger, topup, [('merged_file', 'in_file')]), (func_scans, apply_topup, [('scan', 'in_files')]), (topup, apply_topup, [('out_fieldcoef', 'in_topup_fieldcoef'), ('out_movpar', 'in_topup_movpar')])]) if apply_trim: # Dist Corr + Trim workflow.connect([(apply_topup, trim, [('out_corrected', 'in_file') ]), (trim, realign_fsl, [('out_file', 'in_file')])]) else: # Dist Corr + No Trim workflow.connect([(apply_topup, realign_fsl, [('out_corrected', 'in_file')])]) else: if apply_trim: # No Dist Corr + Trim workflow.connect([(func_scans, trim, [('scan', 'in_file')]), (trim, realign_fsl, [('out_file', 'in_file')])]) else: # No Dist Corr + No Trim workflow.connect([ (func_scans, realign_fsl, [('scan', 'in_file')]), ]) ############################ ######### PART (1n) ######### # anat -> N4 -> bet # OR # anat -> bet ############################ if apply_n4: workflow.connect([(n4_correction, brain_extraction_ants, [('output_image', 'anatomical_image')])]) else: brain_extraction_ants.inputs.anatomical_image = anat ########################################## ############### PART (2) ################# # realign -> coreg -> mni (via t1) # t1 -> mni # covariate creation # plot creation ########################################### workflow.connect([ (realign_fsl, plot_realign, [('par_file', 'realignment_parameters')]), (realign_fsl, plot_qa, [('out_file', 'dat_img')]), (realign_fsl, art, [('out_file', 'realigned_files'), ('par_file', 'realignment_parameters')]), (realign_fsl, mean_epi, [('out_file', 'in_file')]), (realign_fsl, make_cov, [('par_file', 'realignment_parameters')]), (mean_epi, compute_mask, [('out_file', 'mean_volume')]), (compute_mask, art, [('brain_mask', 'mask_file')]), (art, make_cov, [('outlier_files', 'spike_id')]), (art, plot_realign, [('outlier_files', 'outliers')]), (plot_qa, make_cov, [('fd_outliers', 'fd_outliers')]), (brain_extraction_ants, coregistration, [('BrainExtractionBrain', 'fixed_image')]), (mean_epi, coregistration, [('out_file', 'moving_image')]), (brain_extraction_ants, normalization, [('BrainExtractionBrain', 'moving_image')]), (coregistration, merge_transforms, [('composite_transform', 'in2')]), (normalization, merge_transforms, [('composite_transform', 'in1')]), (merge_transforms, apply_transforms, [('out', 'transforms')]), (realign_fsl, apply_transforms, [('out_file', 'input_image')]), (apply_transforms, mean_norm_epi, [('output_image', 'in_file')]), (normalization, apply_transform_seg, [('composite_transform', 'transforms')]), (brain_extraction_ants, apply_transform_seg, [('BrainExtractionSegmentation', 'input_image')]), (mean_norm_epi, plot_normalization_check, [('out_file', 'wra_img')]) ]) ################################################## ################### PART (3) ##################### # epi (in mni) -> filter -> smooth -> down sample # OR # epi (in mni) -> filter -> down sample # OR # epi (in mni) -> smooth -> down sample # OR # epi (in mni) -> down sample ################################################### if apply_filter: workflow.connect([(apply_transforms, lp_filter, [('output_image', 'in_file')])]) if apply_smooth: # Filtering + Smoothing workflow.connect([(lp_filter, smooth, [('out_file', 'in_file')]), (smooth, down_samp, [('smoothed_file', 'in_file') ])]) else: # Filtering + No Smoothing workflow.connect([(lp_filter, down_samp, [('out_file', 'in_file')]) ]) else: if apply_smooth: # No Filtering + Smoothing workflow.connect([ (apply_transforms, smooth, [('output_image', 'in_file')]), (smooth, down_samp, [('smoothed_file', 'in_file')]) ]) else: # No Filtering + No Smoothing workflow.connect([(apply_transforms, down_samp, [('output_image', 'in_file')])]) ########################################## ############### PART (4) ################# # down sample -> save # plots -> save # covs -> save # t1 (in mni) -> save # t1 segmented masks (in mni) -> save # realignment parms -> save ########################################## workflow.connect([ (down_samp, datasink, [('out_file', 'functional.@down_samp')]), (plot_realign, datasink, [('plot', 'functional.@plot_realign')]), (plot_qa, datasink, [('plot', 'functional.@plot_qa')]), (plot_normalization_check, datasink, [('plot', 'functional.@plot_normalization')]), (make_cov, datasink, [('covariates', 'functional.@covariates')]), (normalization, datasink, [('warped_image', 'structural.@normanat')]), (apply_transform_seg, datasink, [('output_image', 'structural.@normanatseg')]), (realign_fsl, datasink, [('par_file', 'functional.@motionparams')]) ]) if not os.path.exists(os.path.join(output_dir, 'pipeline.png')): workflow.write_graph(dotfilename=os.path.join(output_dir, 'pipeline'), format='png') print(f"Creating workflow for subject: {subject_id}") if ants_threads != 8: print( f"ANTs will utilize the user-requested {ants_threads} threads for parallel processing." ) return workflow
def Lesion_extractor( name='Lesion_Extractor', wf_name='Test', base_dir='/homes_unix/alaurent/', input_dir=None, subjects=None, main=None, acc=None, atlas='/homes_unix/alaurent/cbstools-public-master/atlases/brain-segmentation-prior3.0/brain-atlas-quant-3.0.8.txt' ): wf = Workflow(wf_name) wf.base_dir = base_dir #file = open(subjects,"r") #subjects = file.read().split("\n") #file.close() # Subject List subjectList = Node(IdentityInterface(fields=['subject_id'], mandatory_inputs=True), name="subList") subjectList.iterables = ('subject_id', [ sub for sub in subjects if sub != '' and sub != '\n' ]) # T1w and FLAIR scanList = Node(DataGrabber(infields=['subject_id'], outfields=['T1', 'FLAIR']), name="scanList") scanList.inputs.base_directory = input_dir scanList.inputs.ignore_exception = False scanList.inputs.raise_on_empty = True scanList.inputs.sort_filelist = True #scanList.inputs.template = '%s/%s.nii' #scanList.inputs.template_args = {'T1': [['subject_id','T1*']], # 'FLAIR': [['subject_id','FLAIR*']]} scanList.inputs.template = '%s/anat/%s' scanList.inputs.template_args = { 'T1': [['subject_id', '*_T1w.nii.gz']], 'FLAIR': [['subject_id', '*_FLAIR.nii.gz']] } wf.connect(subjectList, "subject_id", scanList, "subject_id") # # T1w and FLAIR # dg = Node(DataGrabber(outfields=['T1', 'FLAIR']), name="T1wFLAIR") # dg.inputs.base_directory = "/homes_unix/alaurent/LesionPipeline" # dg.inputs.template = "%s/NIFTI/*.nii.gz" # dg.inputs.template_args['T1']=[['7']] # dg.inputs.template_args['FLAIR']=[['9']] # dg.inputs.sort_filelist=True # Reorient Volume T1Conv = Node(Reorient2Std(), name="ReorientVolume") T1Conv.inputs.ignore_exception = False T1Conv.inputs.terminal_output = 'none' T1Conv.inputs.out_file = "T1_reoriented.nii.gz" wf.connect(scanList, "T1", T1Conv, "in_file") # Reorient Volume (2) T2flairConv = Node(Reorient2Std(), name="ReorientVolume2") T2flairConv.inputs.ignore_exception = False T2flairConv.inputs.terminal_output = 'none' T2flairConv.inputs.out_file = "FLAIR_reoriented.nii.gz" wf.connect(scanList, "FLAIR", T2flairConv, "in_file") # N3 Correction T1NUC = Node(N4BiasFieldCorrection(), name="N3Correction") T1NUC.inputs.dimension = 3 T1NUC.inputs.environ = {'NSLOTS': '1'} T1NUC.inputs.ignore_exception = False T1NUC.inputs.num_threads = 1 T1NUC.inputs.save_bias = False T1NUC.inputs.terminal_output = 'none' wf.connect(T1Conv, "out_file", T1NUC, "input_image") # N3 Correction (2) T2flairNUC = Node(N4BiasFieldCorrection(), name="N3Correction2") T2flairNUC.inputs.dimension = 3 T2flairNUC.inputs.environ = {'NSLOTS': '1'} T2flairNUC.inputs.ignore_exception = False T2flairNUC.inputs.num_threads = 1 T2flairNUC.inputs.save_bias = False T2flairNUC.inputs.terminal_output = 'none' wf.connect(T2flairConv, "out_file", T2flairNUC, "input_image") ''' ##################### ### PRE-NORMALIZE ### ##################### To make sure there's no outlier values (negative, or really high) to offset the initialization steps ''' # Intensity Range Normalization getMaxT1NUC = Node(ImageStats(op_string='-r'), name="getMaxT1NUC") wf.connect(T1NUC, 'output_image', getMaxT1NUC, 'in_file') T1NUCirn = Node(AbcImageMaths(), name="IntensityNormalization") T1NUCirn.inputs.op_string = "-div" T1NUCirn.inputs.out_file = "normT1.nii.gz" wf.connect(T1NUC, 'output_image', T1NUCirn, 'in_file') wf.connect(getMaxT1NUC, ('out_stat', getElementFromList, 1), T1NUCirn, "op_value") # Intensity Range Normalization (2) getMaxT2NUC = Node(ImageStats(op_string='-r'), name="getMaxT2") wf.connect(T2flairNUC, 'output_image', getMaxT2NUC, 'in_file') T2NUCirn = Node(AbcImageMaths(), name="IntensityNormalization2") T2NUCirn.inputs.op_string = "-div" T2NUCirn.inputs.out_file = "normT2.nii.gz" wf.connect(T2flairNUC, 'output_image', T2NUCirn, 'in_file') wf.connect(getMaxT2NUC, ('out_stat', getElementFromList, 1), T2NUCirn, "op_value") ''' ######################## #### COREGISTRATION #### ######################## ''' # Optimized Automated Registration T2flairCoreg = Node(FLIRT(), name="OptimizedAutomatedRegistration") T2flairCoreg.inputs.output_type = 'NIFTI_GZ' wf.connect(T2NUCirn, "out_file", T2flairCoreg, "in_file") wf.connect(T1NUCirn, "out_file", T2flairCoreg, "reference") ''' ######################### #### SKULL-STRIPPING #### ######################### ''' # SPECTRE T1ss = Node(BET(), name="SPECTRE") T1ss.inputs.frac = 0.45 #0.4 T1ss.inputs.mask = True T1ss.inputs.outline = True T1ss.inputs.robust = True wf.connect(T1NUCirn, "out_file", T1ss, "in_file") # Image Calculator T2ss = Node(ApplyMask(), name="ImageCalculator") wf.connect(T1ss, "mask_file", T2ss, "mask_file") wf.connect(T2flairCoreg, "out_file", T2ss, "in_file") ''' #################################### #### 2nd LAYER OF N3 CORRECTION #### #################################### This time without the skull: there were some significant amounts of inhomogeneities leftover. ''' # N3 Correction (3) T1ssNUC = Node(N4BiasFieldCorrection(), name="N3Correction3") T1ssNUC.inputs.dimension = 3 T1ssNUC.inputs.environ = {'NSLOTS': '1'} T1ssNUC.inputs.ignore_exception = False T1ssNUC.inputs.num_threads = 1 T1ssNUC.inputs.save_bias = False T1ssNUC.inputs.terminal_output = 'none' wf.connect(T1ss, "out_file", T1ssNUC, "input_image") # N3 Correction (4) T2ssNUC = Node(N4BiasFieldCorrection(), name="N3Correction4") T2ssNUC.inputs.dimension = 3 T2ssNUC.inputs.environ = {'NSLOTS': '1'} T2ssNUC.inputs.ignore_exception = False T2ssNUC.inputs.num_threads = 1 T2ssNUC.inputs.save_bias = False T2ssNUC.inputs.terminal_output = 'none' wf.connect(T2ss, "out_file", T2ssNUC, "input_image") ''' #################################### #### NORMALIZE FOR MGDM #### #################################### This normalization is a bit aggressive: only useful to have a cropped dynamic range into MGDM, but possibly harmful to further processing, so the unprocessed images are passed to the subsequent steps. ''' # Intensity Range Normalization getMaxT1ssNUC = Node(ImageStats(op_string='-r'), name="getMaxT1ssNUC") wf.connect(T1ssNUC, 'output_image', getMaxT1ssNUC, 'in_file') T1ssNUCirn = Node(AbcImageMaths(), name="IntensityNormalization3") T1ssNUCirn.inputs.op_string = "-div" T1ssNUCirn.inputs.out_file = "normT1ss.nii.gz" wf.connect(T1ssNUC, 'output_image', T1ssNUCirn, 'in_file') wf.connect(getMaxT1ssNUC, ('out_stat', getElementFromList, 1), T1ssNUCirn, "op_value") # Intensity Range Normalization (2) getMaxT2ssNUC = Node(ImageStats(op_string='-r'), name="getMaxT2ssNUC") wf.connect(T2ssNUC, 'output_image', getMaxT2ssNUC, 'in_file') T2ssNUCirn = Node(AbcImageMaths(), name="IntensityNormalization4") T2ssNUCirn.inputs.op_string = "-div" T2ssNUCirn.inputs.out_file = "normT2ss.nii.gz" wf.connect(T2ssNUC, 'output_image', T2ssNUCirn, 'in_file') wf.connect(getMaxT2ssNUC, ('out_stat', getElementFromList, 1), T2ssNUCirn, "op_value") ''' #################################### #### ESTIMATE CSF PV #### #################################### Here we try to get a better handle on CSF voxels to help the segmentation step ''' # Recursive Ridge Diffusion CSF_pv = Node(RecursiveRidgeDiffusion(), name='estimate_CSF_pv') CSF_pv.plugin_args = {'sbatch_args': '--mem 6000'} CSF_pv.inputs.ridge_intensities = "dark" CSF_pv.inputs.ridge_filter = "2D" CSF_pv.inputs.orientation = "undefined" CSF_pv.inputs.ang_factor = 1.0 CSF_pv.inputs.min_scale = 0 CSF_pv.inputs.max_scale = 3 CSF_pv.inputs.propagation_model = "diffusion" CSF_pv.inputs.diffusion_factor = 0.5 CSF_pv.inputs.similarity_scale = 0.1 CSF_pv.inputs.neighborhood_size = 4 CSF_pv.inputs.max_iter = 100 CSF_pv.inputs.max_diff = 0.001 CSF_pv.inputs.save_data = True wf.connect( subjectList, ('subject_id', createOutputDir, wf.base_dir, wf.name, CSF_pv.name), CSF_pv, 'output_dir') wf.connect(T1ssNUCirn, 'out_file', CSF_pv, 'input_image') ''' #################################### #### MGDM #### #################################### ''' # Multi-contrast Brain Segmentation MGDM = Node(MGDMSegmentation(), name='MGDM') MGDM.plugin_args = {'sbatch_args': '--mem 7000'} MGDM.inputs.contrast_type1 = "Mprage3T" MGDM.inputs.contrast_type2 = "FLAIR3T" MGDM.inputs.contrast_type3 = "PVDURA" MGDM.inputs.save_data = True MGDM.inputs.atlas_file = atlas wf.connect( subjectList, ('subject_id', createOutputDir, wf.base_dir, wf.name, MGDM.name), MGDM, 'output_dir') wf.connect(T1ssNUCirn, 'out_file', MGDM, 'contrast_image1') wf.connect(T2ssNUCirn, 'out_file', MGDM, 'contrast_image2') wf.connect(CSF_pv, 'ridge_pv', MGDM, 'contrast_image3') # Enhance Region Contrast ERC = Node(EnhanceRegionContrast(), name='ERC') ERC.plugin_args = {'sbatch_args': '--mem 7000'} ERC.inputs.enhanced_region = "crwm" ERC.inputs.contrast_background = "crgm" ERC.inputs.partial_voluming_distance = 2.0 ERC.inputs.save_data = True ERC.inputs.atlas_file = atlas wf.connect(subjectList, ('subject_id', createOutputDir, wf.base_dir, wf.name, ERC.name), ERC, 'output_dir') wf.connect(T1ssNUC, 'output_image', ERC, 'intensity_image') wf.connect(MGDM, 'segmentation', ERC, 'segmentation_image') wf.connect(MGDM, 'distance', ERC, 'levelset_boundary_image') # Enhance Region Contrast (2) ERC2 = Node(EnhanceRegionContrast(), name='ERC2') ERC2.plugin_args = {'sbatch_args': '--mem 7000'} ERC2.inputs.enhanced_region = "crwm" ERC2.inputs.contrast_background = "crgm" ERC2.inputs.partial_voluming_distance = 2.0 ERC2.inputs.save_data = True ERC2.inputs.atlas_file = atlas wf.connect( subjectList, ('subject_id', createOutputDir, wf.base_dir, wf.name, ERC2.name), ERC2, 'output_dir') wf.connect(T2ssNUC, 'output_image', ERC2, 'intensity_image') wf.connect(MGDM, 'segmentation', ERC2, 'segmentation_image') wf.connect(MGDM, 'distance', ERC2, 'levelset_boundary_image') # Define Multi-Region Priors DMRP = Node(DefineMultiRegionPriors(), name='DefineMultRegPriors') DMRP.plugin_args = {'sbatch_args': '--mem 6000'} #DMRP.inputs.defined_region = "ventricle-horns" #DMRP.inputs.definition_method = "closest-distance" DMRP.inputs.distance_offset = 3.0 DMRP.inputs.save_data = True DMRP.inputs.atlas_file = atlas wf.connect( subjectList, ('subject_id', createOutputDir, wf.base_dir, wf.name, DMRP.name), DMRP, 'output_dir') wf.connect(MGDM, 'segmentation', DMRP, 'segmentation_image') wf.connect(MGDM, 'distance', DMRP, 'levelset_boundary_image') ''' ############################################### #### REMOVE VENTRICLE POSTERIOR #### ############################################### Due to topology constraints, the ventricles are often not fully segmented: here add back all ventricle voxels from the posterior probability (without the topology constraints) ''' # Posterior label PostLabel = Node(Split(), name='PosteriorLabel') PostLabel.inputs.dimension = "t" wf.connect(MGDM, 'labels', PostLabel, 'in_file') # Posterior proba PostProba = Node(Split(), name='PosteriorProba') PostProba.inputs.dimension = "t" wf.connect(MGDM, 'memberships', PostProba, 'in_file') # Threshold binary mask : ventricle label part 1 VentLabel1 = Node(Threshold(), name="VentricleLabel1") VentLabel1.inputs.thresh = 10.5 VentLabel1.inputs.direction = "below" wf.connect(PostLabel, ("out_files", getFirstElement), VentLabel1, "in_file") # Threshold binary mask : ventricle label part 2 VentLabel2 = Node(Threshold(), name="VentricleLabel2") VentLabel2.inputs.thresh = 13.5 VentLabel2.inputs.direction = "above" wf.connect(VentLabel1, "out_file", VentLabel2, "in_file") # Image calculator : ventricle proba VentProba = Node(ImageMaths(), name="VentricleProba") VentProba.inputs.op_string = "-mul" VentProba.inputs.out_file = "ventproba.nii.gz" wf.connect(PostProba, ("out_files", getFirstElement), VentProba, "in_file") wf.connect(VentLabel2, "out_file", VentProba, "in_file2") # Image calculator : remove inter ventricles RmInterVent = Node(ImageMaths(), name="RemoveInterVent") RmInterVent.inputs.op_string = "-sub" RmInterVent.inputs.out_file = "rmintervent.nii.gz" wf.connect(ERC, "region_pv", RmInterVent, "in_file") wf.connect(DMRP, "inter_ventricular_pv", RmInterVent, "in_file2") # Image calculator : add horns AddHorns = Node(ImageMaths(), name="AddHorns") AddHorns.inputs.op_string = "-add" AddHorns.inputs.out_file = "rmvent.nii.gz" wf.connect(RmInterVent, "out_file", AddHorns, "in_file") wf.connect(DMRP, "ventricular_horns_pv", AddHorns, "in_file2") # Image calculator : remove ventricles RmVent = Node(ImageMaths(), name="RemoveVentricles") RmVent.inputs.op_string = "-sub" RmVent.inputs.out_file = "rmvent.nii.gz" wf.connect(AddHorns, "out_file", RmVent, "in_file") wf.connect(VentProba, "out_file", RmVent, "in_file2") # Image calculator : remove internal capsule RmIC = Node(ImageMaths(), name="RemoveInternalCap") RmIC.inputs.op_string = "-sub" RmIC.inputs.out_file = "rmic.nii.gz" wf.connect(RmVent, "out_file", RmIC, "in_file") wf.connect(DMRP, "internal_capsule_pv", RmIC, "in_file2") # Intensity Range Normalization (3) getMaxRmIC = Node(ImageStats(op_string='-r'), name="getMaxRmIC") wf.connect(RmIC, 'out_file', getMaxRmIC, 'in_file') RmICirn = Node(AbcImageMaths(), name="IntensityNormalization5") RmICirn.inputs.op_string = "-div" RmICirn.inputs.out_file = "normRmIC.nii.gz" wf.connect(RmIC, 'out_file', RmICirn, 'in_file') wf.connect(getMaxRmIC, ('out_stat', getElementFromList, 1), RmICirn, "op_value") # Probability To Levelset : WM orientation WM_Orient = Node(ProbabilityToLevelset(), name='WM_Orientation') WM_Orient.plugin_args = {'sbatch_args': '--mem 6000'} WM_Orient.inputs.save_data = True wf.connect( subjectList, ('subject_id', createOutputDir, wf.base_dir, wf.name, WM_Orient.name), WM_Orient, 'output_dir') wf.connect(RmICirn, 'out_file', WM_Orient, 'probability_image') # Recursive Ridge Diffusion : PVS in WM only WM_pvs = Node(RecursiveRidgeDiffusion(), name='PVS_in_WM') WM_pvs.plugin_args = {'sbatch_args': '--mem 6000'} WM_pvs.inputs.ridge_intensities = "bright" WM_pvs.inputs.ridge_filter = "1D" WM_pvs.inputs.orientation = "orthogonal" WM_pvs.inputs.ang_factor = 1.0 WM_pvs.inputs.min_scale = 0 WM_pvs.inputs.max_scale = 3 WM_pvs.inputs.propagation_model = "diffusion" WM_pvs.inputs.diffusion_factor = 1.0 WM_pvs.inputs.similarity_scale = 1.0 WM_pvs.inputs.neighborhood_size = 2 WM_pvs.inputs.max_iter = 100 WM_pvs.inputs.max_diff = 0.001 WM_pvs.inputs.save_data = True wf.connect( subjectList, ('subject_id', createOutputDir, wf.base_dir, wf.name, WM_pvs.name), WM_pvs, 'output_dir') wf.connect(ERC, 'background_proba', WM_pvs, 'input_image') wf.connect(WM_Orient, 'levelset', WM_pvs, 'surface_levelset') wf.connect(RmICirn, 'out_file', WM_pvs, 'loc_prior') # Extract Lesions : extract WM PVS extract_WM_pvs = Node(LesionExtraction(), name='ExtractPVSfromWM') extract_WM_pvs.plugin_args = {'sbatch_args': '--mem 6000'} extract_WM_pvs.inputs.gm_boundary_partial_vol_dist = 1.0 extract_WM_pvs.inputs.csf_boundary_partial_vol_dist = 3.0 extract_WM_pvs.inputs.lesion_clust_dist = 1.0 extract_WM_pvs.inputs.prob_min_thresh = 0.1 extract_WM_pvs.inputs.prob_max_thresh = 0.33 extract_WM_pvs.inputs.small_lesion_size = 4.0 extract_WM_pvs.inputs.save_data = True extract_WM_pvs.inputs.atlas_file = atlas wf.connect(subjectList, ('subject_id', createOutputDir, wf.base_dir, wf.name, extract_WM_pvs.name), extract_WM_pvs, 'output_dir') wf.connect(WM_pvs, 'propagation', extract_WM_pvs, 'probability_image') wf.connect(MGDM, 'segmentation', extract_WM_pvs, 'segmentation_image') wf.connect(MGDM, 'distance', extract_WM_pvs, 'levelset_boundary_image') wf.connect(RmICirn, 'out_file', extract_WM_pvs, 'location_prior_image') ''' 2nd branch ''' # Image calculator : internal capsule witout ventricules ICwoVent = Node(ImageMaths(), name="ICWithoutVentricules") ICwoVent.inputs.op_string = "-sub" ICwoVent.inputs.out_file = "icwovent.nii.gz" wf.connect(DMRP, "internal_capsule_pv", ICwoVent, "in_file") wf.connect(DMRP, "inter_ventricular_pv", ICwoVent, "in_file2") # Image calculator : remove ventricles IC RmVentIC = Node(ImageMaths(), name="RmVentIC") RmVentIC.inputs.op_string = "-sub" RmVentIC.inputs.out_file = "RmVentIC.nii.gz" wf.connect(ICwoVent, "out_file", RmVentIC, "in_file") wf.connect(VentProba, "out_file", RmVentIC, "in_file2") # Intensity Range Normalization (4) getMaxRmVentIC = Node(ImageStats(op_string='-r'), name="getMaxRmVentIC") wf.connect(RmVentIC, 'out_file', getMaxRmVentIC, 'in_file') RmVentICirn = Node(AbcImageMaths(), name="IntensityNormalization6") RmVentICirn.inputs.op_string = "-div" RmVentICirn.inputs.out_file = "normRmVentIC.nii.gz" wf.connect(RmVentIC, 'out_file', RmVentICirn, 'in_file') wf.connect(getMaxRmVentIC, ('out_stat', getElementFromList, 1), RmVentICirn, "op_value") # Probability To Levelset : IC orientation IC_Orient = Node(ProbabilityToLevelset(), name='IC_Orientation') IC_Orient.plugin_args = {'sbatch_args': '--mem 6000'} IC_Orient.inputs.save_data = True wf.connect( subjectList, ('subject_id', createOutputDir, wf.base_dir, wf.name, IC_Orient.name), IC_Orient, 'output_dir') wf.connect(RmVentICirn, 'out_file', IC_Orient, 'probability_image') # Recursive Ridge Diffusion : PVS in IC only IC_pvs = Node(RecursiveRidgeDiffusion(), name='RecursiveRidgeDiffusion2') IC_pvs.plugin_args = {'sbatch_args': '--mem 6000'} IC_pvs.inputs.ridge_intensities = "bright" IC_pvs.inputs.ridge_filter = "1D" IC_pvs.inputs.orientation = "undefined" IC_pvs.inputs.ang_factor = 1.0 IC_pvs.inputs.min_scale = 0 IC_pvs.inputs.max_scale = 3 IC_pvs.inputs.propagation_model = "diffusion" IC_pvs.inputs.diffusion_factor = 1.0 IC_pvs.inputs.similarity_scale = 1.0 IC_pvs.inputs.neighborhood_size = 2 IC_pvs.inputs.max_iter = 100 IC_pvs.inputs.max_diff = 0.001 IC_pvs.inputs.save_data = True wf.connect( subjectList, ('subject_id', createOutputDir, wf.base_dir, wf.name, IC_pvs.name), IC_pvs, 'output_dir') wf.connect(ERC, 'background_proba', IC_pvs, 'input_image') wf.connect(IC_Orient, 'levelset', IC_pvs, 'surface_levelset') wf.connect(RmVentICirn, 'out_file', IC_pvs, 'loc_prior') # Extract Lesions : extract IC PVS extract_IC_pvs = Node(LesionExtraction(), name='ExtractPVSfromIC') extract_IC_pvs.plugin_args = {'sbatch_args': '--mem 6000'} extract_IC_pvs.inputs.gm_boundary_partial_vol_dist = 1.0 extract_IC_pvs.inputs.csf_boundary_partial_vol_dist = 4.0 extract_IC_pvs.inputs.lesion_clust_dist = 1.0 extract_IC_pvs.inputs.prob_min_thresh = 0.25 extract_IC_pvs.inputs.prob_max_thresh = 0.5 extract_IC_pvs.inputs.small_lesion_size = 4.0 extract_IC_pvs.inputs.save_data = True extract_IC_pvs.inputs.atlas_file = atlas wf.connect(subjectList, ('subject_id', createOutputDir, wf.base_dir, wf.name, extract_IC_pvs.name), extract_IC_pvs, 'output_dir') wf.connect(IC_pvs, 'propagation', extract_IC_pvs, 'probability_image') wf.connect(MGDM, 'segmentation', extract_IC_pvs, 'segmentation_image') wf.connect(MGDM, 'distance', extract_IC_pvs, 'levelset_boundary_image') wf.connect(RmVentICirn, 'out_file', extract_IC_pvs, 'location_prior_image') ''' 3rd branch ''' # Image calculator : RmInter = Node(ImageMaths(), name="RemoveInterVentricules") RmInter.inputs.op_string = "-sub" RmInter.inputs.out_file = "rminter.nii.gz" wf.connect(ERC2, 'region_pv', RmInter, "in_file") wf.connect(DMRP, "inter_ventricular_pv", RmInter, "in_file2") # Image calculator : AddVentHorns = Node(ImageMaths(), name="AddVentHorns") AddVentHorns.inputs.op_string = "-add" AddVentHorns.inputs.out_file = "rminter.nii.gz" wf.connect(RmInter, 'out_file', AddVentHorns, "in_file") wf.connect(DMRP, "ventricular_horns_pv", AddVentHorns, "in_file2") # Intensity Range Normalization (5) getMaxAddVentHorns = Node(ImageStats(op_string='-r'), name="getMaxAddVentHorns") wf.connect(AddVentHorns, 'out_file', getMaxAddVentHorns, 'in_file') AddVentHornsirn = Node(AbcImageMaths(), name="IntensityNormalization7") AddVentHornsirn.inputs.op_string = "-div" AddVentHornsirn.inputs.out_file = "normAddVentHorns.nii.gz" wf.connect(AddVentHorns, 'out_file', AddVentHornsirn, 'in_file') wf.connect(getMaxAddVentHorns, ('out_stat', getElementFromList, 1), AddVentHornsirn, "op_value") # Extract Lesions : extract White Matter Hyperintensities extract_WMH = Node(LesionExtraction(), name='Extract_WMH') extract_WMH.plugin_args = {'sbatch_args': '--mem 6000'} extract_WMH.inputs.gm_boundary_partial_vol_dist = 1.0 extract_WMH.inputs.csf_boundary_partial_vol_dist = 2.0 extract_WMH.inputs.lesion_clust_dist = 1.0 extract_WMH.inputs.prob_min_thresh = 0.84 extract_WMH.inputs.prob_max_thresh = 0.84 extract_WMH.inputs.small_lesion_size = 4.0 extract_WMH.inputs.save_data = True extract_WMH.inputs.atlas_file = atlas wf.connect(subjectList, ('subject_id', createOutputDir, wf.base_dir, wf.name, extract_WMH.name), extract_WMH, 'output_dir') wf.connect(ERC2, 'background_proba', extract_WMH, 'probability_image') wf.connect(MGDM, 'segmentation', extract_WMH, 'segmentation_image') wf.connect(MGDM, 'distance', extract_WMH, 'levelset_boundary_image') wf.connect(AddVentHornsirn, 'out_file', extract_WMH, 'location_prior_image') #=========================================================================== # extract_WMH2 = extract_WMH.clone(name='Extract_WMH2') # extract_WMH2.inputs.gm_boundary_partial_vol_dist = 2.0 # wf.connect(subjectList,('subject_id',createOutputDir,wf.base_dir,wf.name,extract_WMH2.name),extract_WMH2,'output_dir') # wf.connect(ERC2,'background_proba',extract_WMH2,'probability_image') # wf.connect(MGDM,'segmentation',extract_WMH2,'segmentation_image') # wf.connect(MGDM,'distance',extract_WMH2,'levelset_boundary_image') # wf.connect(AddVentHornsirn,'out_file',extract_WMH2,'location_prior_image') # # extract_WMH3 = extract_WMH.clone(name='Extract_WMH3') # extract_WMH3.inputs.gm_boundary_partial_vol_dist = 3.0 # wf.connect(subjectList,('subject_id',createOutputDir,wf.base_dir,wf.name,extract_WMH3.name),extract_WMH3,'output_dir') # wf.connect(ERC2,'background_proba',extract_WMH3,'probability_image') # wf.connect(MGDM,'segmentation',extract_WMH3,'segmentation_image') # wf.connect(MGDM,'distance',extract_WMH3,'levelset_boundary_image') # wf.connect(AddVentHornsirn,'out_file',extract_WMH3,'location_prior_image') #=========================================================================== ''' #################################### #### FINDING SMALL WMHs #### #################################### Small round WMHs near the cortex are often missed by the main algorithm, so we're adding this one that takes care of them. ''' # Recursive Ridge Diffusion : round WMH detection round_WMH = Node(RecursiveRidgeDiffusion(), name='round_WMH') round_WMH.plugin_args = {'sbatch_args': '--mem 6000'} round_WMH.inputs.ridge_intensities = "bright" round_WMH.inputs.ridge_filter = "0D" round_WMH.inputs.orientation = "undefined" round_WMH.inputs.ang_factor = 1.0 round_WMH.inputs.min_scale = 1 round_WMH.inputs.max_scale = 4 round_WMH.inputs.propagation_model = "none" round_WMH.inputs.diffusion_factor = 1.0 round_WMH.inputs.similarity_scale = 0.1 round_WMH.inputs.neighborhood_size = 4 round_WMH.inputs.max_iter = 100 round_WMH.inputs.max_diff = 0.001 round_WMH.inputs.save_data = True wf.connect( subjectList, ('subject_id', createOutputDir, wf.base_dir, wf.name, round_WMH.name), round_WMH, 'output_dir') wf.connect(ERC2, 'background_proba', round_WMH, 'input_image') wf.connect(AddVentHornsirn, 'out_file', round_WMH, 'loc_prior') # Extract Lesions : extract round WMH extract_round_WMH = Node(LesionExtraction(), name='Extract_round_WMH') extract_round_WMH.plugin_args = {'sbatch_args': '--mem 6000'} extract_round_WMH.inputs.gm_boundary_partial_vol_dist = 1.0 extract_round_WMH.inputs.csf_boundary_partial_vol_dist = 2.0 extract_round_WMH.inputs.lesion_clust_dist = 1.0 extract_round_WMH.inputs.prob_min_thresh = 0.33 extract_round_WMH.inputs.prob_max_thresh = 0.33 extract_round_WMH.inputs.small_lesion_size = 6.0 extract_round_WMH.inputs.save_data = True extract_round_WMH.inputs.atlas_file = atlas wf.connect(subjectList, ('subject_id', createOutputDir, wf.base_dir, wf.name, extract_round_WMH.name), extract_round_WMH, 'output_dir') wf.connect(round_WMH, 'ridge_pv', extract_round_WMH, 'probability_image') wf.connect(MGDM, 'segmentation', extract_round_WMH, 'segmentation_image') wf.connect(MGDM, 'distance', extract_round_WMH, 'levelset_boundary_image') wf.connect(AddVentHornsirn, 'out_file', extract_round_WMH, 'location_prior_image') #=========================================================================== # extract_round_WMH2 = extract_round_WMH.clone(name='Extract_round_WMH2') # extract_round_WMH2.inputs.gm_boundary_partial_vol_dist = 2.0 # wf.connect(subjectList,('subject_id',createOutputDir,wf.base_dir,wf.name,extract_round_WMH2.name),extract_round_WMH2,'output_dir') # wf.connect(round_WMH,'ridge_pv',extract_round_WMH2,'probability_image') # wf.connect(MGDM,'segmentation',extract_round_WMH2,'segmentation_image') # wf.connect(MGDM,'distance',extract_round_WMH2,'levelset_boundary_image') # wf.connect(AddVentHornsirn,'out_file',extract_round_WMH2,'location_prior_image') # # extract_round_WMH3 = extract_round_WMH.clone(name='Extract_round_WMH3') # extract_round_WMH3.inputs.gm_boundary_partial_vol_dist = 2.0 # wf.connect(subjectList,('subject_id',createOutputDir,wf.base_dir,wf.name,extract_round_WMH3.name),extract_round_WMH3,'output_dir') # wf.connect(round_WMH,'ridge_pv',extract_round_WMH3,'probability_image') # wf.connect(MGDM,'segmentation',extract_round_WMH3,'segmentation_image') # wf.connect(MGDM,'distance',extract_round_WMH3,'levelset_boundary_image') # wf.connect(AddVentHornsirn,'out_file',extract_round_WMH3,'location_prior_image') #=========================================================================== ''' #################################### #### COMBINE BOTH TYPES #### #################################### Small round WMHs and regular WMH together before thresholding + PVS from white matter and internal capsule ''' # Image calculator : WM + IC DVRS DVRS = Node(ImageMaths(), name="DVRS") DVRS.inputs.op_string = "-max" DVRS.inputs.out_file = "DVRS_map.nii.gz" wf.connect(extract_WM_pvs, 'lesion_score', DVRS, "in_file") wf.connect(extract_IC_pvs, "lesion_score", DVRS, "in_file2") # Image calculator : WMH + round WMH = Node(ImageMaths(), name="WMH") WMH.inputs.op_string = "-max" WMH.inputs.out_file = "WMH_map.nii.gz" wf.connect(extract_WMH, 'lesion_score', WMH, "in_file") wf.connect(extract_round_WMH, "lesion_score", WMH, "in_file2") #=========================================================================== # WMH2 = Node(ImageMaths(), name="WMH2") # WMH2.inputs.op_string = "-max" # WMH2.inputs.out_file = "WMH2_map.nii.gz" # wf.connect(extract_WMH2,'lesion_score',WMH2,"in_file") # wf.connect(extract_round_WMH2,"lesion_score", WMH2, "in_file2") # # WMH3 = Node(ImageMaths(), name="WMH3") # WMH3.inputs.op_string = "-max" # WMH3.inputs.out_file = "WMH3_map.nii.gz" # wf.connect(extract_WMH3,'lesion_score',WMH3,"in_file") # wf.connect(extract_round_WMH3,"lesion_score", WMH3, "in_file2") #=========================================================================== # Image calculator : multiply by boundnary partial volume WMH_mul = Node(ImageMaths(), name="WMH_mul") WMH_mul.inputs.op_string = "-mul" WMH_mul.inputs.out_file = "final_mask.nii.gz" wf.connect(WMH, "out_file", WMH_mul, "in_file") wf.connect(MGDM, "distance", WMH_mul, "in_file2") #=========================================================================== # WMH2_mul = Node(ImageMaths(), name="WMH2_mul") # WMH2_mul.inputs.op_string = "-mul" # WMH2_mul.inputs.out_file = "final_mask.nii.gz" # wf.connect(WMH2,"out_file", WMH2_mul,"in_file") # wf.connect(MGDM,"distance", WMH2_mul, "in_file2") # # WMH3_mul = Node(ImageMaths(), name="WMH3_mul") # WMH3_mul.inputs.op_string = "-mul" # WMH3_mul.inputs.out_file = "final_mask.nii.gz" # wf.connect(WMH3,"out_file", WMH3_mul,"in_file") # wf.connect(MGDM,"distance", WMH3_mul, "in_file2") #=========================================================================== ''' ########################################## #### SEGMENTATION THRESHOLD #### ########################################## A threshold of 0.5 is very conservative, because the final lesion score is the product of two probabilities. This needs to be optimized to a value between 0.25 and 0.5 to balance false negatives (dominant at 0.5) and false positives (dominant at low values). ''' # Threshold binary mask : DVRS_mask = Node(Threshold(), name="DVRS_mask") DVRS_mask.inputs.thresh = 0.25 DVRS_mask.inputs.direction = "below" wf.connect(DVRS, "out_file", DVRS_mask, "in_file") # Threshold binary mask : 025 WMH1_025 = Node(Threshold(), name="WMH1_025") WMH1_025.inputs.thresh = 0.25 WMH1_025.inputs.direction = "below" wf.connect(WMH_mul, "out_file", WMH1_025, "in_file") #=========================================================================== # WMH2_025 = Node(Threshold(), name="WMH2_025") # WMH2_025.inputs.thresh = 0.25 # WMH2_025.inputs.direction = "below" # wf.connect(WMH2_mul,"out_file", WMH2_025, "in_file") # # WMH3_025 = Node(Threshold(), name="WMH3_025") # WMH3_025.inputs.thresh = 0.25 # WMH3_025.inputs.direction = "below" # wf.connect(WMH3_mul,"out_file", WMH3_025, "in_file") #=========================================================================== # Threshold binary mask : 050 WMH1_050 = Node(Threshold(), name="WMH1_050") WMH1_050.inputs.thresh = 0.50 WMH1_050.inputs.direction = "below" wf.connect(WMH_mul, "out_file", WMH1_050, "in_file") #=========================================================================== # WMH2_050 = Node(Threshold(), name="WMH2_050") # WMH2_050.inputs.thresh = 0.50 # WMH2_050.inputs.direction = "below" # wf.connect(WMH2_mul,"out_file", WMH2_050, "in_file") # # WMH3_050 = Node(Threshold(), name="WMH3_050") # WMH3_050.inputs.thresh = 0.50 # WMH3_050.inputs.direction = "below" # wf.connect(WMH3_mul,"out_file", WMH3_050, "in_file") #=========================================================================== # Threshold binary mask : 075 WMH1_075 = Node(Threshold(), name="WMH1_075") WMH1_075.inputs.thresh = 0.75 WMH1_075.inputs.direction = "below" wf.connect(WMH_mul, "out_file", WMH1_075, "in_file") #=========================================================================== # WMH2_075 = Node(Threshold(), name="WMH2_075") # WMH2_075.inputs.thresh = 0.75 # WMH2_075.inputs.direction = "below" # wf.connect(WMH2_mul,"out_file", WMH2_075, "in_file") # # WMH3_075 = Node(Threshold(), name="WMH3_075") # WMH3_075.inputs.thresh = 0.75 # WMH3_075.inputs.direction = "below" # wf.connect(WMH3_mul,"out_file", WMH3_075, "in_file") #=========================================================================== ## Outputs DVRS_Output = Node(IdentityInterface(fields=[ 'mask', 'region', 'lesion_size', 'lesion_proba', 'boundary', 'label', 'score' ]), name='DVRS_Output') wf.connect(DVRS_mask, 'out_file', DVRS_Output, 'mask') WMH_output = Node(IdentityInterface(fields=[ 'mask1025', 'mask1050', 'mask1075', 'mask2025', 'mask2050', 'mask2075', 'mask3025', 'mask3050', 'mask3075' ]), name='WMH_output') wf.connect(WMH1_025, 'out_file', WMH_output, 'mask1025') #wf.connect(WMH2_025,'out_file',WMH_output,'mask2025') #wf.connect(WMH3_025,'out_file',WMH_output,'mask3025') wf.connect(WMH1_050, 'out_file', WMH_output, 'mask1050') #wf.connect(WMH2_050,'out_file',WMH_output,'mask2050') #wf.connect(WMH3_050,'out_file',WMH_output,'mask3050') wf.connect(WMH1_075, 'out_file', WMH_output, 'mask1075') #wf.connect(WMH2_075,'out_file',WMH_output,'mask2070') #wf.connect(WMH3_075,'out_file',WMH_output,'mask3075') return wf
def wfmaker(project_dir, raw_dir, subject_id, task_name='', apply_trim=False, apply_dist_corr=False, apply_smooth=False, apply_filter=False, mni_template='2mm', apply_n4=True, ants_threads=8, readable_crash_files=False): """ This function returns a "standard" workflow based on requested settings. Assumes data is in the following directory structure in BIDS format: *Work flow steps*: 1) EPI Distortion Correction (FSL; optional) 2) Trimming (nipy) 3) Realignment/Motion Correction (FSL) 4) Artifact Detection (rapidART/python) 5) Brain Extraction + N4 Bias Correction (ANTs) 6) Coregistration (rigid) (ANTs) 7) Normalization to MNI (non-linear) (ANTs) 8) Low-pass filtering (nilearn; optional) 8) Smoothing (FSL; optional) 9) Downsampling to INT16 precision to save space (nibabel) Args: project_dir (str): full path to the root of project folder, e.g. /my/data/myproject. All preprocessed data will be placed under this foler and the raw_dir folder will be searched for under this folder raw_dir (str): folder name for raw data, e.g. 'raw' which would be automatically converted to /my/data/myproject/raw subject_id (str/int): subject ID to process. Can be either a subject ID string e.g. 'sid-0001' or an integer to index the entire list of subjects in raw_dir, e.g. 0, which would process the first subject apply_trim (int/bool; optional): number of volumes to trim from the beginning of each functional run; default is None task_name (str; optional): which functional task runs to process; default is all runs apply_dist_corr (bool; optional): look for fmap files and perform distortion correction; default False smooth (int/list; optional): smoothing to perform in FWHM mm; if a list is provided will create outputs for each smoothing kernel separately; default False apply_filter (float/list; optional): low-pass/high-freq filtering cut-offs in Hz; if a list is provided will create outputs for each filter cut-off separately. With high temporal resolution scans .25Hz is a decent value to capture respitory artifacts; default None/False mni_template (str; optional): which mm resolution template to use, e.g. '3mm'; default '2mm' apply_n4 (bool; optional): perform N4 Bias Field correction on the anatomical image; default true ants_threads (int; optional): number of threads ANTs should use for its processes; default 8 readable_crash_files (bool; optional): should nipype crash files be saved as txt? This makes them easily readable, but sometimes interferes with nipype's ability to use cached results of successfully run nodes (i.e. picking up where it left off after bugs are fixed); default False Examples: >>> from cosanlab_preproc.wfmaker import wfmaker >>> # Create workflow that performs no distortion correction, trims first 5 TRs, no filtering, 6mm smoothing, and normalizes to 2mm MNI space. Run it with 16 cores. >>> >>> workflow = wfmaker( project_dir = '/data/project', raw_dir = 'raw', apply_trim = 5) >>> >>> workflow.run('MultiProc',plugin_args = {'n_procs': 16}) >>> >>> # Create workflow that performs distortion correction, trims first 25 TRs, no filtering and filtering .25hz, 6mm and 8mm smoothing, and normalizes to 3mm MNI space. Run it serially (will be super slow!). >>> >>> workflow = wfmaker( project_dir = '/data/project', raw_dir = 'raw', apply_trim = 25, apply_dist_corr = True, apply_filter = [0, .25], apply_smooth = [6.0, 8.0], mni = '3mm') >>> >>> workflow.run() """ ################## ### PATH SETUP ### ################## if mni_template not in ['1mm', '2mm', '3mm']: raise ValueError("MNI template must be: 1mm, 2mm, or 3mm") data_dir = os.path.join(project_dir, raw_dir) output_dir = os.path.join(project_dir, 'preprocessed') output_final_dir = os.path.join(output_dir, 'final') output_interm_dir = os.path.join(output_dir, 'intermediate') log_dir = os.path.join(project_dir, 'logs', 'nipype') if not os.path.exists(output_final_dir): os.makedirs(output_final_dir) if not os.path.exists(output_interm_dir): os.makedirs(output_interm_dir) if not os.path.exists(log_dir): os.makedirs(log_dir) # Set MNI template MNItemplate = os.path.join(get_resource_path(), 'MNI152_T1_' + mni_template + '_brain.nii.gz') MNImask = os.path.join(get_resource_path(), 'MNI152_T1_' + mni_template + '_brain_mask.nii.gz') MNItemplatehasskull = os.path.join(get_resource_path(), 'MNI152_T1_' + mni_template + '.nii.gz') # Set ANTs files bet_ants_template = os.path.join(get_resource_path(), 'OASIS_template.nii.gz') bet_ants_prob_mask = os.path.join( get_resource_path(), 'OASIS_BrainCerebellumProbabilityMask.nii.gz') bet_ants_registration_mask = os.path.join( get_resource_path(), 'OASIS_BrainCerebellumRegistrationMask.nii.gz') ################################# ### NIPYPE IMPORTS AND CONFIG ### ################################# # Update nipype global config because workflow.config[] = ..., doesn't seem to work # Can't store nipype config/rc file in container anyway so set them globaly before importing and setting up workflow as suggested here: http://nipype.readthedocs.io/en/latest/users/config_file.html#config-file from nipype import config if readable_crash_files: cfg = dict(execution={'crashfile_format': 'txt'}) config.update_config(cfg) config.update_config( {'logging': { 'log_directory': log_dir, 'log_to_file': True }}) from nipype import logging logging.update_logging(config) # Now import everything else from nipype.interfaces.io import DataSink from nipype.interfaces.utility import Merge, IdentityInterface from nipype.pipeline.engine import Node, Workflow from nipype.interfaces.nipy.preprocess import ComputeMask from nipype.algorithms.rapidart import ArtifactDetect from nipype.interfaces.ants.segmentation import BrainExtraction, N4BiasFieldCorrection from nipype.interfaces.ants import Registration, ApplyTransforms from nipype.interfaces.fsl import MCFLIRT, TOPUP, ApplyTOPUP from nipype.interfaces.fsl.maths import MeanImage from nipype.interfaces.fsl import Merge as MERGE from nipype.interfaces.fsl.utils import Smooth from nipype.interfaces.nipy.preprocess import Trim from .interfaces import Plot_Coregistration_Montage, Plot_Quality_Control, Plot_Realignment_Parameters, Create_Covariates, Down_Sample_Precision, Create_Encoding_File, Filter_In_Mask ################## ### INPUT NODE ### ################## layout = BIDSLayout(data_dir) # Dartmouth subjects are named with the sub- prefix, handle whether we receive an integer identifier for indexing or the full subject id with prefixg if isinstance(subject_id, six.string_types): subId = subject_id[4:] elif isinstance(subject_id, int): subId = layout.get_subjects()[subject_id] subject_id = 'sub-' + subId else: raise TypeError("subject_id should be a string or integer") #Get anat file location anat = layout.get(subject=subId, type='T1w', extensions='.nii.gz')[0].filename #Get functional file locations if task_name: funcs = [ f.filename for f in layout.get(subject=subId, type='bold', task=task_name, extensions='.nii.gz') ] else: funcs = [ f.filename for f in layout.get( subject=subId, type='bold', extensions='.nii.gz') ] #Turn functional file list into interable Node func_scans = Node(IdentityInterface(fields=['scan']), name='func_scans') func_scans.iterables = ('scan', funcs) #Get TR for use in filtering below; we're assuming all BOLD runs have the same TR tr_length = layout.get_metadata(funcs[0])['RepetitionTime'] ##################################### ## TRIM ## ##################################### if apply_trim: trim = Node(Trim(), name='trim') trim.inputs.begin_index = apply_trim ##################################### ## DISTORTION CORRECTION ## ##################################### if apply_dist_corr: #Get fmap file locations fmaps = [ f.filename for f in layout.get( subject=subId, modality='fmap', extensions='.nii.gz') ] if not fmaps: raise IOError( "Distortion Correction requested but field map scans not found..." ) #Get fmap metadata totalReadoutTimes, measurements, fmap_pes = [], [], [] for i, fmap in enumerate(fmaps): # Grab total readout time for each fmap totalReadoutTimes.append( layout.get_metadata(fmap)['TotalReadoutTime']) # Grab measurements (for some reason pyBIDS doesn't grab dcm_meta... fields from side-car json file and json.load, doesn't either; so instead just read the header using nibabel to determine number of scans) measurements.append(nib.load(fmap).header['dim'][4]) # Get phase encoding direction fmap_pe = layout.get_metadata(fmap)["PhaseEncodingDirection"] fmap_pes.append(fmap_pe) encoding_file_writer = Node(interface=Create_Encoding_File(), name='create_encoding') encoding_file_writer.inputs.totalReadoutTimes = totalReadoutTimes encoding_file_writer.inputs.fmaps = fmaps encoding_file_writer.inputs.fmap_pes = fmap_pes encoding_file_writer.inputs.measurements = measurements encoding_file_writer.inputs.file_name = 'encoding_file.txt' merge_to_file_list = Node(interface=Merge(2), infields=['in1', 'in2'], name='merge_to_file_list') merge_to_file_list.inputs.in1 = fmaps[0] merge_to_file_list.inputs.in1 = fmaps[1] #Merge AP and PA distortion correction scans merger = Node(interface=MERGE(dimension='t'), name='merger') merger.inputs.output_type = 'NIFTI_GZ' merger.inputs.in_files = fmaps merger.inputs.merged_file = 'merged_epi.nii.gz' #Create distortion correction map topup = Node(interface=TOPUP(), name='topup') topup.inputs.output_type = 'NIFTI_GZ' #Apply distortion correction to other scans apply_topup = Node(interface=ApplyTOPUP(), name='apply_topup') apply_topup.inputs.output_type = 'NIFTI_GZ' apply_topup.inputs.method = 'jac' apply_topup.inputs.interp = 'spline' ################################### ### REALIGN ### ################################### realign_fsl = Node(MCFLIRT(), name="realign") realign_fsl.inputs.cost = 'mutualinfo' realign_fsl.inputs.mean_vol = True realign_fsl.inputs.output_type = 'NIFTI_GZ' realign_fsl.inputs.save_mats = True realign_fsl.inputs.save_rms = True realign_fsl.inputs.save_plots = True ################################### ### MEAN EPIs ### ################################### #For coregistration after realignment mean_epi = Node(MeanImage(), name='mean_epi') mean_epi.inputs.dimension = 'T' #For after normalization is done to plot checks mean_norm_epi = Node(MeanImage(), name='mean_norm_epi') mean_norm_epi.inputs.dimension = 'T' ################################### ### MASK, ART, COV CREATION ### ################################### compute_mask = Node(ComputeMask(), name='compute_mask') compute_mask.inputs.m = .05 art = Node(ArtifactDetect(), name='art') art.inputs.use_differences = [True, False] art.inputs.use_norm = True art.inputs.norm_threshold = 1 art.inputs.zintensity_threshold = 3 art.inputs.mask_type = 'file' art.inputs.parameter_source = 'FSL' make_cov = Node(Create_Covariates(), name='make_cov') ################################ ### N4 BIAS FIELD CORRECTION ### ################################ if apply_n4: n4_correction = Node(N4BiasFieldCorrection(), name='n4_correction') n4_correction.inputs.copy_header = True n4_correction.inputs.save_bias = False n4_correction.inputs.num_threads = ants_threads n4_correction.inputs.input_image = anat ################################### ### BRAIN EXTRACTION ### ################################### brain_extraction_ants = Node(BrainExtraction(), name='brain_extraction') brain_extraction_ants.inputs.dimension = 3 brain_extraction_ants.inputs.use_floatingpoint_precision = 1 brain_extraction_ants.inputs.num_threads = ants_threads brain_extraction_ants.inputs.brain_probability_mask = bet_ants_prob_mask brain_extraction_ants.inputs.keep_temporary_files = 1 brain_extraction_ants.inputs.brain_template = bet_ants_template brain_extraction_ants.inputs.extraction_registration_mask = bet_ants_registration_mask brain_extraction_ants.inputs.out_prefix = 'bet' ################################### ### COREGISTRATION ### ################################### coregistration = Node(Registration(), name='coregistration') coregistration.inputs.float = False coregistration.inputs.output_transform_prefix = "meanEpi2highres" coregistration.inputs.transforms = ['Rigid'] coregistration.inputs.transform_parameters = [(0.1, ), (0.1, )] coregistration.inputs.number_of_iterations = [[1000, 500, 250, 100]] coregistration.inputs.dimension = 3 coregistration.inputs.num_threads = ants_threads coregistration.inputs.write_composite_transform = True coregistration.inputs.collapse_output_transforms = True coregistration.inputs.metric = ['MI'] coregistration.inputs.metric_weight = [1] coregistration.inputs.radius_or_number_of_bins = [32] coregistration.inputs.sampling_strategy = ['Regular'] coregistration.inputs.sampling_percentage = [0.25] coregistration.inputs.convergence_threshold = [1e-08] coregistration.inputs.convergence_window_size = [10] coregistration.inputs.smoothing_sigmas = [[3, 2, 1, 0]] coregistration.inputs.sigma_units = ['mm'] coregistration.inputs.shrink_factors = [[4, 3, 2, 1]] coregistration.inputs.use_estimate_learning_rate_once = [True] coregistration.inputs.use_histogram_matching = [False] coregistration.inputs.initial_moving_transform_com = True coregistration.inputs.output_warped_image = True coregistration.inputs.winsorize_lower_quantile = 0.01 coregistration.inputs.winsorize_upper_quantile = 0.99 ################################### ### NORMALIZATION ### ################################### # Settings Explanations # Only a few key settings are worth adjusting and most others relate to how ANTs optimizer starts or iterates and won't make a ton of difference # Brian Avants referred to these settings as the last "best tested" when he was aligning fMRI data: https://github.com/ANTsX/ANTsRCore/blob/master/R/antsRegistration.R#L275 # Things that matter the most: # smoothing_sigmas: # how much gaussian smoothing to apply when performing registration, probably want the upper limit of this to match the resolution that the data is collected at e.g. 3mm # Old settings [[3,2,1,0]]*3 # shrink_factors # The coarseness with which to do registration # Old settings [[8,4,2,1]] * 3 # >= 8 may result is some problems causing big chunks of cortex with little fine grain spatial structure to be moved to other parts of cortex # Other settings # transform_parameters: # how much regularization to do for fitting that transformation # for syn this pertains to both the gradient regularization term, and the flow, and elastic terms. Leave the syn settings alone as they seem to be the most well tested across published data sets # radius_or_number_of_bins # This is the bin size for MI metrics and 32 is probably adequate for most use cases. Increasing this might increase precision (e.g. to 64) but takes exponentially longer # use_histogram_matching # Use image intensity distribution to guide registration # Leave it on for within modality registration (e.g. T1 -> MNI), but off for between modality registration (e.g. EPI -> T1) # convergence_threshold # threshold for optimizer # convergence_window_size # how many samples should optimizer average to compute threshold? # sampling_strategy # what strategy should ANTs use to initialize the transform. Regular here refers to approximately random sampling around the center of the image mass normalization = Node(Registration(), name='normalization') normalization.inputs.float = False normalization.inputs.collapse_output_transforms = True normalization.inputs.convergence_threshold = [1e-06, 1e-06, 1e-07] normalization.inputs.convergence_window_size = [10] normalization.inputs.dimension = 3 normalization.inputs.fixed_image = MNItemplate normalization.inputs.initial_moving_transform_com = True normalization.inputs.metric = ['MI', 'MI', 'CC'] normalization.inputs.metric_weight = [1.0] * 3 normalization.inputs.number_of_iterations = [[1000, 500, 250, 100], [1000, 500, 250, 100], [100, 70, 50, 20]] normalization.inputs.num_threads = ants_threads normalization.inputs.output_transform_prefix = 'anat2template' normalization.inputs.output_inverse_warped_image = True normalization.inputs.output_warped_image = True normalization.inputs.radius_or_number_of_bins = [32, 32, 4] normalization.inputs.sampling_percentage = [0.25, 0.25, 1] normalization.inputs.sampling_strategy = ['Regular', 'Regular', 'None'] normalization.inputs.shrink_factors = [[4, 3, 2, 1]] * 3 normalization.inputs.sigma_units = ['vox'] * 3 normalization.inputs.smoothing_sigmas = [[2, 1], [2, 1], [3, 2, 1, 0]] normalization.inputs.transforms = ['Rigid', 'Affine', 'SyN'] normalization.inputs.transform_parameters = [(0.1, ), (0.1, ), (0.1, 3.0, 0.0)] normalization.inputs.use_histogram_matching = True normalization.inputs.winsorize_lower_quantile = 0.005 normalization.inputs.winsorize_upper_quantile = 0.995 normalization.inputs.write_composite_transform = True ################################### ### APPLY TRANSFORMS AND SMOOTH ### ################################### merge_transforms = Node(Merge(2), iterfield=['in2'], name='merge_transforms') # Used for epi -> mni, via (coreg + norm) apply_transforms = Node(ApplyTransforms(), iterfield=['input_image'], name='apply_transforms') apply_transforms.inputs.input_image_type = 3 apply_transforms.inputs.float = False apply_transforms.inputs.num_threads = 12 apply_transforms.inputs.environ = {} apply_transforms.inputs.interpolation = 'BSpline' apply_transforms.inputs.invert_transform_flags = [False, False] apply_transforms.inputs.reference_image = MNItemplate # Used for t1 segmented -> mni, via (norm) apply_transform_seg = Node(ApplyTransforms(), name='apply_transform_seg') apply_transform_seg.inputs.input_image_type = 3 apply_transform_seg.inputs.float = False apply_transform_seg.inputs.num_threads = 12 apply_transform_seg.inputs.environ = {} apply_transform_seg.inputs.interpolation = 'MultiLabel' apply_transform_seg.inputs.invert_transform_flags = [False] apply_transform_seg.inputs.reference_image = MNItemplate ################################### ### PLOTS ### ################################### plot_realign = Node(Plot_Realignment_Parameters(), name="plot_realign") plot_qa = Node(Plot_Quality_Control(), name="plot_qa") plot_normalization_check = Node(Plot_Coregistration_Montage(), name="plot_normalization_check") plot_normalization_check.inputs.canonical_img = MNItemplatehasskull ############################################ ### FILTER, SMOOTH, DOWNSAMPLE PRECISION ### ############################################ #Use cosanlab_preproc for down sampling down_samp = Node(Down_Sample_Precision(), name="down_samp") #Use FSL for smoothing if apply_smooth: smooth = Node(Smooth(), name='smooth') if isinstance(apply_smooth, list): smooth.iterables = ("fwhm", apply_smooth) elif isinstance(apply_smooth, int) or isinstance(apply_smooth, float): smooth.inputs.fwhm = apply_smooth else: raise ValueError("apply_smooth must be a list or int/float") #Use cosanlab_preproc for low-pass filtering if apply_filter: lp_filter = Node(Filter_In_Mask(), name='lp_filter') lp_filter.inputs.mask = MNImask lp_filter.inputs.sampling_rate = tr_length lp_filter.inputs.high_pass_cutoff = 0 if isinstance(apply_filter, list): lp_filter.iterables = ("low_pass_cutoff", apply_filter) elif isinstance(apply_filter, int) or isinstance(apply_filter, float): lp_filter.inputs.low_pass_cutoff = apply_filter else: raise ValueError("apply_filter must be a list or int/float") ################### ### OUTPUT NODE ### ################### #Collect all final outputs in the output dir and get rid of file name additions datasink = Node(DataSink(), name='datasink') datasink.inputs.base_directory = output_final_dir datasink.inputs.container = subject_id # Remove substitutions data_dir_parts = data_dir.split('/')[1:] prefix = ['_scan_'] + data_dir_parts + [subject_id] + ['func'] func_scan_names = [os.path.split(elem)[-1] for elem in funcs] to_replace = [] for elem in func_scan_names: bold_name = elem.split(subject_id + '_')[-1] bold_name = bold_name.split('.nii.gz')[0] to_replace.append(('..'.join(prefix + [elem]), bold_name)) datasink.inputs.substitutions = to_replace ##################### ### INIT WORKFLOW ### ##################### workflow = Workflow(name=subId) workflow.base_dir = output_interm_dir ############################ ######### PART (1a) ######### # func -> discorr -> trim -> realign # OR # func -> trim -> realign # OR # func -> discorr -> realign # OR # func -> realign ############################ if apply_dist_corr: workflow.connect([(encoding_file_writer, topup, [('encoding_file', 'encoding_file')]), (encoding_file_writer, apply_topup, [('encoding_file', 'encoding_file')]), (merger, topup, [('merged_file', 'in_file')]), (func_scans, apply_topup, [('scan', 'in_files')]), (topup, apply_topup, [('out_fieldcoef', 'in_topup_fieldcoef'), ('out_movpar', 'in_topup_movpar')])]) if apply_trim: # Dist Corr + Trim workflow.connect([(apply_topup, trim, [('out_corrected', 'in_file') ]), (trim, realign_fsl, [('out_file', 'in_file')])]) else: # Dist Corr + No Trim workflow.connect([(apply_topup, realign_fsl, [('out_corrected', 'in_file')])]) else: if apply_trim: # No Dist Corr + Trim workflow.connect([(func_scans, trim, [('scan', 'in_file')]), (trim, realign_fsl, [('out_file', 'in_file')])]) else: # No Dist Corr + No Trim workflow.connect([ (func_scans, realign_fsl, [('scan', 'in_file')]), ]) ############################ ######### PART (1n) ######### # anat -> N4 -> bet # OR # anat -> bet ############################ if apply_n4: workflow.connect([(n4_correction, brain_extraction_ants, [('output_image', 'anatomical_image')])]) else: brain_extraction_ants.inputs.anatomical_image = anat ########################################## ############### PART (2) ################# # realign -> coreg -> mni (via t1) # t1 -> mni # covariate creation # plot creation ########################################### workflow.connect([ (realign_fsl, plot_realign, [('par_file', 'realignment_parameters')]), (realign_fsl, plot_qa, [('out_file', 'dat_img')]), (realign_fsl, art, [('out_file', 'realigned_files'), ('par_file', 'realignment_parameters')]), (realign_fsl, mean_epi, [('out_file', 'in_file')]), (realign_fsl, make_cov, [('par_file', 'realignment_parameters')]), (mean_epi, compute_mask, [('out_file', 'mean_volume')]), (compute_mask, art, [('brain_mask', 'mask_file')]), (art, make_cov, [('outlier_files', 'spike_id')]), (art, plot_realign, [('outlier_files', 'outliers')]), (plot_qa, make_cov, [('fd_outliers', 'fd_outliers')]), (brain_extraction_ants, coregistration, [('BrainExtractionBrain', 'fixed_image')]), (mean_epi, coregistration, [('out_file', 'moving_image')]), (brain_extraction_ants, normalization, [('BrainExtractionBrain', 'moving_image')]), (coregistration, merge_transforms, [('composite_transform', 'in2')]), (normalization, merge_transforms, [('composite_transform', 'in1')]), (merge_transforms, apply_transforms, [('out', 'transforms')]), (realign_fsl, apply_transforms, [('out_file', 'input_image')]), (apply_transforms, mean_norm_epi, [('output_image', 'in_file')]), (normalization, apply_transform_seg, [('composite_transform', 'transforms')]), (brain_extraction_ants, apply_transform_seg, [('BrainExtractionSegmentation', 'input_image')]), (mean_norm_epi, plot_normalization_check, [('out_file', 'wra_img')]) ]) ################################################## ################### PART (3) ##################### # epi (in mni) -> filter -> smooth -> down sample # OR # epi (in mni) -> filter -> down sample # OR # epi (in mni) -> smooth -> down sample # OR # epi (in mni) -> down sample ################################################### if apply_filter: workflow.connect([(apply_transforms, lp_filter, [('output_image', 'in_file')])]) if apply_smooth: # Filtering + Smoothing workflow.connect([(lp_filter, smooth, [('out_file', 'in_file')]), (smooth, down_samp, [('smoothed_file', 'in_file') ])]) else: # Filtering + No Smoothing workflow.connect([(lp_filter, down_samp, [('out_file', 'in_file')]) ]) else: if apply_smooth: # No Filtering + Smoothing workflow.connect([ (apply_transforms, smooth, [('output_image', 'in_file')]), (smooth, down_samp, [('smoothed_file', 'in_file')]) ]) else: # No Filtering + No Smoothing workflow.connect([(apply_transforms, down_samp, [('output_image', 'in_file')])]) ########################################## ############### PART (4) ################# # down sample -> save # plots -> save # covs -> save # t1 (in mni) -> save # t1 segmented masks (in mni) -> save ########################################## workflow.connect([ (down_samp, datasink, [('out_file', 'functional.@down_samp')]), (plot_realign, datasink, [('plot', 'functional.@plot_realign')]), (plot_qa, datasink, [('plot', 'functional.@plot_qa')]), (plot_normalization_check, datasink, [('plot', 'functional.@plot_normalization')]), (make_cov, datasink, [('covariates', 'functional.@covariates')]), (normalization, datasink, [('warped_image', 'structural.@normanat')]), (apply_transform_seg, datasink, [('output_image', 'structural.@normanatseg')]) ]) if not os.path.exists(os.path.join(output_dir, 'pipeline.png')): workflow.write_graph(dotfilename=os.path.join(output_dir, 'pipeline'), format='png') print(f"Creating workflow for subject: {subject_id}") if ants_threads == 8: print( f"ANTs will utilize the default of {ants_threads} threads for parallel processing." ) else: print( f"ANTs will utilize the user-requested {ants_threads} threads for parallel processing." ) return workflow
def se_fmap_workflow(name=WORKFLOW_NAME, settings=None): """ Estimates the fieldmap using TOPUP on series of :abbr:`SE (Spin-Echo)` images acquired with varying :abbr:`PE (phase encoding)` direction. Outputs:: outputnode.mag_brain - The average magnitude image, skull-stripped outputnode.fmap_mask - The brain mask applied to the fieldmap outputnode.fieldmap - The estimated fieldmap in Hz """ if settings is None: settings = {} workflow = pe.Workflow(name=name) inputnode = pe.Node(niu.IdentityInterface(fields=['input_images']), name='inputnode') outputnode = pe.Node( niu.IdentityInterface(fields=['fieldmap', 'fmap_mask', 'mag_brain']), name='outputnode') # Read metadata meta = pe.MapNode( ReadSidecarJSON(fields=['TotalReadoutTime', 'PhaseEncodingDirection']), iterfield=['in_file'], name='metadata') encfile = pe.Node(interface=niu.Function( input_names=['input_images', 'in_dict'], output_names=['parameters_file'], function=create_encoding_file), name='TopUp_encfile', updatehash=True) # Head motion correction fslmerge = pe.Node(fsl.Merge(dimension='t'), name='SE_merge') hmc_se = pe.Node(fsl.MCFLIRT(cost='normcorr', mean_vol=True), name='SE_head_motion_corr') fslsplit = pe.Node(fsl.Split(dimension='t'), name='SE_split') # Run topup to estimate field distortions, do not estimate movement # since it is done in hmc_se topup = pe.Node(fsl.TOPUP(estmov=0), name='TopUp') # Use the least-squares method to correct the dropout of the SE images unwarp_mag = pe.Node(fsl.ApplyTOPUP(method='lsr'), name='TopUpApply') # Remove bias inu_n4 = pe.Node(N4BiasFieldCorrection(dimension=3), name='SE_bias') # Skull strip corrected SE image to get reference brain and mask mag_bet = pe.Node(fsl.BET(mask=True, robust=True), name='SE_brain') workflow.connect([ (inputnode, meta, [('input_images', 'in_file')]), (inputnode, encfile, [('input_images', 'input_images')]), (inputnode, fslmerge, [('input_images', 'in_files')]), (fslmerge, hmc_se, [('merged_file', 'in_file')]), (meta, encfile, [('out_dict', 'in_dict')]), (encfile, topup, [('parameters_file', 'encoding_file')]), (hmc_se, topup, [('out_file', 'in_file')]), (topup, unwarp_mag, [('out_fieldcoef', 'in_topup_fieldcoef'), ('out_movpar', 'in_topup_movpar')]), (encfile, unwarp_mag, [('parameters_file', 'encoding_file')]), (hmc_se, fslsplit, [('out_file', 'in_file')]), (fslsplit, unwarp_mag, [('out_files', 'in_files'), (('out_files', gen_list), 'in_index')]), (unwarp_mag, inu_n4, [('out_corrected', 'input_image')]), (inu_n4, mag_bet, [('output_image', 'in_file')]), (topup, outputnode, [('out_field', 'fieldmap')]), (mag_bet, outputnode, [('out_file', 'mag_brain'), ('mask_file', 'fmap_mask')]) ]) # Reports section se_png = pe.Node(niu.Function( input_names=['in_file', 'overlay_file', 'out_file'], output_names=['out_file'], function=stripped_brain_overlay), name='PNG_SE_corr') se_png.inputs.out_file = 'corrected_SE_and_mask.png' datasink = pe.Node( nio.DataSink(base_directory=op.join(settings['work_dir'], 'images')), name='datasink', parameterization=False) workflow.connect([ (unwarp_mag, se_png, [('out_corrected', 'overlay_file')]), (mag_bet, se_png, [('mask_file', 'in_file')]), (se_png, datasink, [('out_file', '@corrected_SE_and_mask')]) ]) return workflow
def _bias_field_correction(self, orig_path, temp_path): '''_BIAS_FIELD_CORRECTION Apply N4BiasFieldCorrection method on a volume and save the output into temporary folder. Settings can be found in btc_settings.py. Original paper can be found here: https://www.ncbi.nlm.nih.gov/pubmed/20378467 Inputs: ------- - orig_path: path for original volume - temp_path: path for temporary volume which is the output of bias field correction --- NOTE --- This function has been tested to deal with .nii.gz files both in Windows 7 and Ubuntu 16.04. It is necessary to install or compile ANTs first. For Windows: Download ANTs 2.1 for Windows from this link: https://github.com/ANTsX/ANTs/releases. Extract files in to folder, and add this folder's path into system path. For Ubuntu: Download source code from here: https://github.com/ANTsX/ANTs. Compile ANTs as instructions shown in: https://github.com/ANTsX/ANTs/wiki/Compiling-ANTs-on-Linux-and-Mac-OS. ''' print("N4ITK on: " + orig_path) n4 = N4BiasFieldCorrection() n4.inputs.input_image = orig_path n4.inputs.output_image = temp_path n4.inputs.dimension = N4_DIMENSION n4.inputs.n_iterations = N4_ITERATION n4.inputs.shrink_factor = N4_SHRINK_FACTOR n4.inputs.convergence_threshold = N4_THRESHOLD n4.inputs.bspline_fitting_distance = N4_BSPLINE # Run command line silently in UBUNTU n4.run() # Run command line in WINDOWS # Do not forget import denpendicy at the head of script # import subprocess # Run command line with information printing in WINDOWS # subprocess.call(n4.cmdline.split(" ")) # Run command line silently in WINDOWS # devnull = open(os.devnull, 'w') # subprocess.call(n4.cmdline.split(" "), stdout=devnull, stderr=devnull) return