def get_Tmean(in_file_path): with tempfile.TemporaryDirectory() as tmpdirname: # tMean_path = f'{tmpdirname}/tMean.nii.gz' tMean_path = 'tMean.nii.gz' mean_image = MeanImage(in_file=in_file_path, dimension='T', out_file=tMean_path) mean_image.run() mean_image = nib.load(tMean_path).dataobj return mean_image
def cale(input_dir, output_dir): fns = glob(op.join(input_dir, '*.nii.gz')) merger = Merge() merger.inputs.in_files = fns merger.inputs.dimension = 't' merger.inputs.merged_file = op.join(output_dir, 'cALE.nii.gz') meanimg = MeanImage() meanimg.inputs.in_file = op.join(output_dir, 'cALE.nii.gz') meanimg.inputs.dimensions = 'T' meanimg.inputs.out_file = op.join(output_dir, 'cALE.nii.gz') maths = MultiImageMaths() maths.inputs.in_file = op.join(output_dir, 'cALE.nii.gz') maths.inputs.op_string = '-mul {0}'.format(len(fns)) maths.inputs.out_file = op.join(output_dir, 'cALE.nii.gz') thresh = Threshold() thresh.inputs.in_file = op.join(output_dir, 'cALE.nii.gz') thresh.inputs.thresh = np.floor(len(fns) / 2) thresh.inputs.direction = 'below' thresh.inputs.out_file = op.join( output_dir, 'cALE_thresh-{0}.nii.gz'.format(np.floor(len(fns) / 2)))
def mni_tmplt(db_path, img_list): merger = pe.Node(Merge(), name='merger') # merger = Merge() # merger.inputs.merged_file = os.path.join(db_path, 'extras', 'merged.nii') merger.inputs.in_files = img_list merger.inputs.dimension = 't' merger.inputs.output_type = 'NIFTI' # merger.run() mean = pe.Node(MeanImage(), name='mean') mean.inputs.output_type = 'NIFTI' sm = pe.Node(Smooth(), name='sm') sm.inputs.fwhm = 8 # sm.inputs.output_type = 'NIFTI' mean.inputs.out_file = os.path.join(db_path, 'extra', 'mean.nii') ppln = pe.Workflow(name='ppln') ppln.connect([ (merger, mean, [('merged_file', 'in_file')]), (mean, sm, [('out_file', 'in_files')]), ]) ppln.run() img = nib.load(os.path.join(db_path, 'extra', 'mean.nii')) scld_vox = (img.get_data() / img.get_data().max()) new_img = nib.Nifti1Image(scld_vox, img.affine, img.header) nib.save(new_img, os.path.join(db_path, 'extra', 'st_sp_tmpl.nii'))
def test_MeanImage_outputs(): output_map = dict(out_file=dict(), ) outputs = MeanImage.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value
def get_f_means(self): data_selection_func = self.data_selection.loc[ self.data_selection.datatype == 'func'] for _, elem in tqdm(data_selection_func.iterrows(), total=len(data_selection_func), postfix='Tmean'): in_path = elem.path out_dir = self.tmean_dir / f'sub-{elem.subject}' / f'ses-{elem.session}' / elem.datatype out_dir.mkdir(parents=True, exist_ok=True) out_path = out_dir / elem.path.split('/')[-1] mean_image = MeanImage(in_file=in_path, dimension='T', out_file=out_path) mean_image.run() self.data_selection.loc[self.data_selection.path == elem.path, 'tmean_path'] = out_path
def test_MeanImage_inputs(): input_map = dict( args=dict(argstr='%s', ), dimension=dict( argstr='-%smean', position=4, usedefault=True, ), environ=dict( nohash=True, usedefault=True, ), ignore_exception=dict( nohash=True, usedefault=True, ), in_file=dict( argstr='%s', mandatory=True, position=2, ), internal_datatype=dict( argstr='-dt %s', position=1, ), nan2zeros=dict( argstr='-nan', position=3, ), out_file=dict( argstr='%s', genfile=True, hash_files=False, position=-2, ), output_datatype=dict( argstr='-odt %s', position=-1, ), output_type=dict(), terminal_output=dict( mandatory=True, nohash=True, ), ) inputs = MeanImage.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_MeanImage_inputs(): input_map = dict(args=dict(argstr='%s', ), dimension=dict(argstr='-%smean', position=4, usedefault=True, ), environ=dict(nohash=True, usedefault=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='%s', mandatory=True, position=2, ), internal_datatype=dict(argstr='-dt %s', position=1, ), nan2zeros=dict(argstr='-nan', position=3, ), out_file=dict(argstr='%s', genfile=True, hash_files=False, position=-2, ), output_datatype=dict(argstr='-odt %s', position=-1, ), output_type=dict(), terminal_output=dict(mandatory=True, nohash=True, ), ) inputs = MeanImage.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value
filename = os.path.join(data_dir, subject_id, 'nifti/resting', 'slice_timing.txt') print "getting slice time sequence from", filename with open(filename) as f: st = map(float, f) print st realigner.inputs.slice_times = st else: # ascend alternate every 2nd slice, starting at 2nd slice realigner.inputs.slice_times = 'asc_alt_2_1' realigner.inputs.slice_info = 2 realigner.run() # Step#3 get T-mean of rs image after realignment fslmaths = MeanImage() fslmaths.inputs.in_file = 'corr_rest_roi.nii.gz' fslmaths.inputs.out_file = 'mean_corr_rest_roi.nii.gz' fslmaths.inputs.dimension = 'T' fslmaths.run() # Step#4 get binary mask & skull stripped imag img_StMoco = os.path.abspath('corr_rest_roi.nii.gz') btr = fsl.BET() btr.inputs.in_file = img_StMoco btr.inputs.mask = True btr.run() # Step#5 tsnr calculation on realigned image tsnr = misc.TSNR()
# merge all gray matter probability map across subjects gm_merged = os.path.join(outdir, 'gm_prob_merged.nii.gz') merger = Merge() merger.inputs.in_files = gm_file_list merger.inputs.dimension = 't' merger.inputs.output_type = 'NIFTI_GZ' merger.inputs.merged_file = gm_merged merger.run() # get the average of the merged map gm_merged_ave = gm_merged[:-7] + '_mean.nii.gz' print('...average GM map: ', gm_merged_ave) tmean = MeanImage() tmean.inputs.in_file = gm_merged tmean.inputs.dimension = 'T' tmean.inputs.output_type = 'NIFTI_GZ' tmean.inputs.out_file = gm_merged_ave tmean.run() # binarize the group level gray matter map gm_mask = gm_merged_ave[:-7] + '_mask_060.nii.gz' binarize = MathsCommand() binarize.inputs.args = '-thr 0.60 -bin' binarize.inputs.in_file = gm_merged_ave binarize.inputs.out_file = gm_mask binarize.run()
def predict_mask(in_file: str, masking_config_path=None, input_type: str = 'anat'): """ The image is first resampled into the resolution of the template space, which has a voxel size of 0.2 × 0.2 × 0.2. This is done with the Resample command from the FSL library which is an analysis tool for FMRI, MRI and DTI brain imaging data. Then, the image is preprocessed using the preprocessing methods of the model class. The predictions of the model are reconstructed to a 3D mask via the command Nifit1Image from nibabel. This is done using the same affine space as the input image. The latter is then reshaped into the original shape inverting the preprocessing step, either with the opencv resize method or by cropping. Additionally, the binary mask is resampled into its original affine space, before being multiplied with the brain image to extract the ROI. Parameters ---------- in_file : str path to the file that is to be masked masking_config_path : str path to the masking config. The masking config is a json file. All parameters have default values that will be set in the "get_masking_opts" method. The masking config may contain following parameters (if any of them is not given in the config, the default value will be taken from mlebe/masking/config/default_schema.json): model_folder_path: str The path to the pretrained model. If not set the default mlebe model will be selected. use_cuda: bool boolean indicating if cuda will be used for the masking visualisation_path: str if set, the masking predictions will be saved to this destination. crop_values: if set, the input bids images will be cropped with given values in the x-y dimensions. bias_field_correction: dict If set, the input image will be bias corrected before given as input to the model. The parameter of the bias correction can be given as a dictionary. The default values can be found in the default_schema.json config. input_type : str either 'func' for CDV or BOLD contrast or 'anat' for T2 contrast Returns ------- resampled_mask_path : str path to the mask nii_path_masked : str path to the masked image """ import os from os import path from pathlib import Path import ants import nibabel as nib import numpy as np import pandas as pd from ants.registration import resample_image from nipype.interfaces.fsl.maths import MeanImage from mlebe import log from mlebe.masking.utils import get_mask, get_mlebe_models, get_biascorrect_opts_defaults from mlebe.masking.utils import remove_outliers, get_masking_opts, crop_bids_image, \ reconstruct_image, pad_to_shape, get_model_config log.info( f'Starting masking of {in_file} with config {masking_config_path}.') masking_opts = get_masking_opts(masking_config_path, input_type) if masking_opts['masked_dir']: masked_dir = masking_opts['masked_dir'] df_selection = pd.read_csv(f'{masked_dir}/data_selection.csv') df_selection = df_selection.loc[df_selection.path.str.endswith( in_file)] nii_path_masked = df_selection.masked_path.item() resampled_mask_path = df_selection.mask_path.item() assert nii_path_masked, f'nii_path_masked not found for {in_file}' assert resampled_mask_path, f'nii_path_masked not found for {resampled_mask_path}' assert Path(nii_path_masked).exists( ), f'nii_path_masked {nii_path_masked} does not exist.' assert Path(resampled_mask_path).exists( ), f'resampled_mask_path {resampled_mask_path} does not exist.' return nii_path_masked, [resampled_mask_path], resampled_mask_path if 'model_folder_path' not in masking_opts or not masking_opts[ 'model_folder_path']: # if no model_folder_path is given in the config, the default models are selected. masking_opts['model_folder_path'] = get_mlebe_models(input_type) model_config = get_model_config(masking_opts) input = in_file if input_type == 'func': tMean_path = 'tMean_.nii.gz' mean_image = MeanImage(in_file=input, dimension='T', out_file=tMean_path) mean_image.run() # command = 'fslmaths {a} -Tmean {b}'.format(a=input, b=tMean_path) # log.info(f'Executing command "{command}"') # os.system(command) assert Path(tMean_path).exists() input = tMean_path resampled_path = 'resampled_input.nii.gz' resampled_nii_path = path.abspath(path.expanduser(resampled_path)) if masking_opts['testing']: resampled_nii = resample_image(ants.image_read(str(input)), (0.2, 0.2, 0.2), False) nib.save(resampled_nii, resampled_nii_path) else: resample_cmd = 'ResampleImage 3 {input} '.format( input=input) + resampled_nii_path + ' 0.2x0.2x0.2' os.system(resample_cmd) log.info(f'Resample image with "{resample_cmd}"') if 'crop_values' in masking_opts and masking_opts['crop_values']: crop_bids_image(resampled_nii_path, masking_opts['crop_values']) """ Bias correction """ if 'bias_field_correction' in masking_opts and masking_opts[ 'bias_field_correction']: bias_correction_config = get_biascorrect_opts_defaults(masking_opts) bias_corrected_path = path.abspath( path.expanduser('corrected_input.nii.gz')) if masking_opts['testing']: convergence_args = bias_correction_config['convergence'].strip( '][').split(', ') iters = [int(elem) for elem in convergence_args[0].split('x')] tol = float(convergence_args[1]) bias_corrected = ants.n4_bias_field_correction( ants.image_read(resampled_nii_path), bias_correction_config['bspline_fitting'], convergence={ 'iters': iters, 'tol': tol }, shrink_factor=bias_correction_config['shrink_factor']) nib.save(bias_corrected, bias_corrected_path) else: command = 'N4BiasFieldCorrection --bspline-fitting {} -d 3 --input-image {} --convergence {} --output {} ' \ '--shrink-factor {}'.format( bias_correction_config['bspline_fitting'], resampled_nii_path, bias_correction_config['convergence'], bias_corrected_path, bias_correction_config['shrink_factor']) os.system(command) log.info(f'Apply bias correction with "{command}"') else: bias_corrected_path = resampled_nii_path image = nib.load(bias_corrected_path) in_file_data = image.get_data() """ Getting the mask """ ori_shape = np.moveaxis(in_file_data, 2, 0).shape in_file_data, mask_pred, network_input = get_mask( model_config, in_file_data, ori_shape, use_cuda=masking_opts['use_cuda']) mask_pred = remove_outliers(mask_pred) if 'visualisation_path' in masking_opts and masking_opts[ 'visualisation_path']: log.info(f'visualisation_path is {masking_opts["visualisation_path"]}') save_visualisation(masking_opts, in_file, network_input, mask_pred) """ Reconstruct to original image size """ resized = reconstruct_image(ori_shape, mask_pred) resized_path = 'resized_mask.nii.gz' resized_path = path.abspath(path.expanduser(resized_path)) resized_mask = nib.Nifti1Image(resized, image.affine, image.header) nib.save(resized_mask, resized_path) # get voxel sizes from input input_image = nib.load(input) input_img_affine = input_image.affine voxel_sizes = nib.affines.voxel_sizes(input_img_affine) resampled_mask_path = 'resampled_mask.nii.gz' resampled_mask_path = path.abspath(path.expanduser(resampled_mask_path)) if masking_opts['testing']: resized_mask = ants.image_read(resized_path) resampled_mask_data = resample_image( resized_mask, (voxel_sizes[0], voxel_sizes[1], voxel_sizes[2]), False, 1) else: resample_cmd = 'ResampleImage 3 {input} '.format( input=resized_path ) + ' ' + resampled_mask_path + ' {x}x{y}x{z} '.format( x=voxel_sizes[0], y=voxel_sizes[1], z=voxel_sizes[2]) + ' 0 1' log.info(f'Resample image with "{resample_cmd}"') os.system(resample_cmd) resampled_mask = nib.load(resampled_mask_path) resampled_mask_data = resampled_mask.get_data() input_image_data = input_image.get_data() if resampled_mask_data.shape != input_image_data.shape: resampled_mask_data = pad_to_shape(resampled_mask_data, input_image_data) if masking_opts['testing']: nib.save(resampled_mask_data, resampled_mask_path) resampled_mask_data = resampled_mask_data.numpy() else: nib.save( nib.Nifti1Image(resampled_mask_data, input_image.affine, input_image.header), resampled_mask_path) """ Masking of the input image """ log.info('Masking the input image with the generated mask.') masked_image = np.multiply(resampled_mask_data, input_image_data).astype( 'float32' ) # nibabel gives a non-helpful error if trying to save data that has dtype float64 nii_path_masked = 'masked_output.nii.gz' nii_path_masked = path.abspath(path.expanduser(nii_path_masked)) masked_image = nib.Nifti1Image(masked_image, input_image.affine, input_image.header) nib.save(masked_image, nii_path_masked) log.info(f'Masking of input image {in_file} finished successfully.') # f/s_biascorrect takes a list as input for the mask while biascorrect takes directly the path return nii_path_masked, [resampled_mask_path], resampled_mask_path
def builder(subject_id, subId, project_dir, data_dir, output_dir, output_final_dir, output_interm_dir, layout, anat=None, funcs=None, fmaps=None, task_name='', session=None, apply_trim=False, apply_dist_corr=False, apply_smooth=False, apply_filter=False, mni_template='2mm', apply_n4=True, ants_threads=8, readable_crash_files=False, write_logs=True): """ Core function that returns a workflow. See wfmaker for more details. Args: subject_id: name of subject folder for final outputted sub-folder name subId: abbreviate name of subject for intermediate outputted sub-folder name project_dir: full path to root of project data_dir: full path to raw data files output_dir: upper level output dir (others will be nested within this) output_final_dir: final preprocessed sub-dir name output_interm_dir: intermediate preprcess sub-dir name layout: BIDS layout instance """ ################## ### PATH SETUP ### ################## if session is not None: session = int(session) if session < 10: session = '0' + str(session) else: session = str(session) # Set MNI template MNItemplate = os.path.join(get_resource_path(), 'MNI152_T1_' + mni_template + '_brain.nii.gz') MNImask = os.path.join(get_resource_path(), 'MNI152_T1_' + mni_template + '_brain_mask.nii.gz') MNItemplatehasskull = os.path.join(get_resource_path(), 'MNI152_T1_' + mni_template + '.nii.gz') # Set ANTs files bet_ants_template = os.path.join(get_resource_path(), 'OASIS_template.nii.gz') bet_ants_prob_mask = os.path.join( get_resource_path(), 'OASIS_BrainCerebellumProbabilityMask.nii.gz') bet_ants_registration_mask = os.path.join( get_resource_path(), 'OASIS_BrainCerebellumRegistrationMask.nii.gz') ################################# ### NIPYPE IMPORTS AND CONFIG ### ################################# # Update nipype global config because workflow.config[] = ..., doesn't seem to work # Can't store nipype config/rc file in container anyway so set them globaly before importing and setting up workflow as suggested here: http://nipype.readthedocs.io/en/latest/users/config_file.html#config-file # Create subject's intermediate directory before configuring nipype and the workflow because that's where we'll save log files in addition to intermediate files if not os.path.exists(os.path.join(output_interm_dir, subId, 'logs')): os.makedirs(os.path.join(output_interm_dir, subId, 'logs')) log_dir = os.path.join(output_interm_dir, subId, 'logs') from nipype import config if readable_crash_files: cfg = dict(execution={'crashfile_format': 'txt'}) config.update_config(cfg) config.update_config({ 'logging': { 'log_directory': log_dir, 'log_to_file': write_logs }, 'execution': { 'crashdump_dir': log_dir } }) from nipype import logging logging.update_logging(config) # Now import everything else from nipype.interfaces.io import DataSink from nipype.interfaces.utility import Merge, IdentityInterface from nipype.pipeline.engine import Node, Workflow from nipype.interfaces.nipy.preprocess import ComputeMask from nipype.algorithms.rapidart import ArtifactDetect from nipype.interfaces.ants.segmentation import BrainExtraction, N4BiasFieldCorrection from nipype.interfaces.ants import Registration, ApplyTransforms from nipype.interfaces.fsl import MCFLIRT, TOPUP, ApplyTOPUP from nipype.interfaces.fsl.maths import MeanImage from nipype.interfaces.fsl import Merge as MERGE from nipype.interfaces.fsl.utils import Smooth from nipype.interfaces.nipy.preprocess import Trim from .interfaces import Plot_Coregistration_Montage, Plot_Quality_Control, Plot_Realignment_Parameters, Create_Covariates, Down_Sample_Precision, Create_Encoding_File, Filter_In_Mask ################## ### INPUT NODE ### ################## # Turn functional file list into interable Node func_scans = Node(IdentityInterface(fields=['scan']), name='func_scans') func_scans.iterables = ('scan', funcs) # Get TR for use in filtering below; we're assuming all BOLD runs have the same TR tr_length = layout.get_metadata(funcs[0])['RepetitionTime'] ##################################### ## TRIM ## ##################################### if apply_trim: trim = Node(Trim(), name='trim') trim.inputs.begin_index = apply_trim ##################################### ## DISTORTION CORRECTION ## ##################################### if apply_dist_corr: # Get fmap file locations fmaps = [ f.filename for f in layout.get( subject=subId, modality='fmap', extensions='.nii.gz') ] if not fmaps: raise IOError( "Distortion Correction requested but field map scans not found..." ) # Get fmap metadata totalReadoutTimes, measurements, fmap_pes = [], [], [] for i, fmap in enumerate(fmaps): # Grab total readout time for each fmap totalReadoutTimes.append( layout.get_metadata(fmap)['TotalReadoutTime']) # Grab measurements (for some reason pyBIDS doesn't grab dcm_meta... fields from side-car json file and json.load, doesn't either; so instead just read the header using nibabel to determine number of scans) measurements.append(nib.load(fmap).header['dim'][4]) # Get phase encoding direction fmap_pe = layout.get_metadata(fmap)["PhaseEncodingDirection"] fmap_pes.append(fmap_pe) encoding_file_writer = Node(interface=Create_Encoding_File(), name='create_encoding') encoding_file_writer.inputs.totalReadoutTimes = totalReadoutTimes encoding_file_writer.inputs.fmaps = fmaps encoding_file_writer.inputs.fmap_pes = fmap_pes encoding_file_writer.inputs.measurements = measurements encoding_file_writer.inputs.file_name = 'encoding_file.txt' merge_to_file_list = Node(interface=Merge(2), infields=['in1', 'in2'], name='merge_to_file_list') merge_to_file_list.inputs.in1 = fmaps[0] merge_to_file_list.inputs.in1 = fmaps[1] # Merge AP and PA distortion correction scans merger = Node(interface=MERGE(dimension='t'), name='merger') merger.inputs.output_type = 'NIFTI_GZ' merger.inputs.in_files = fmaps merger.inputs.merged_file = 'merged_epi.nii.gz' # Create distortion correction map topup = Node(interface=TOPUP(), name='topup') topup.inputs.output_type = 'NIFTI_GZ' # Apply distortion correction to other scans apply_topup = Node(interface=ApplyTOPUP(), name='apply_topup') apply_topup.inputs.output_type = 'NIFTI_GZ' apply_topup.inputs.method = 'jac' apply_topup.inputs.interp = 'spline' ################################### ### REALIGN ### ################################### realign_fsl = Node(MCFLIRT(), name="realign") realign_fsl.inputs.cost = 'mutualinfo' realign_fsl.inputs.mean_vol = True realign_fsl.inputs.output_type = 'NIFTI_GZ' realign_fsl.inputs.save_mats = True realign_fsl.inputs.save_rms = True realign_fsl.inputs.save_plots = True ################################### ### MEAN EPIs ### ################################### # For coregistration after realignment mean_epi = Node(MeanImage(), name='mean_epi') mean_epi.inputs.dimension = 'T' # For after normalization is done to plot checks mean_norm_epi = Node(MeanImage(), name='mean_norm_epi') mean_norm_epi.inputs.dimension = 'T' ################################### ### MASK, ART, COV CREATION ### ################################### compute_mask = Node(ComputeMask(), name='compute_mask') compute_mask.inputs.m = .05 art = Node(ArtifactDetect(), name='art') art.inputs.use_differences = [True, False] art.inputs.use_norm = True art.inputs.norm_threshold = 1 art.inputs.zintensity_threshold = 3 art.inputs.mask_type = 'file' art.inputs.parameter_source = 'FSL' make_cov = Node(Create_Covariates(), name='make_cov') ################################ ### N4 BIAS FIELD CORRECTION ### ################################ if apply_n4: n4_correction = Node(N4BiasFieldCorrection(), name='n4_correction') n4_correction.inputs.copy_header = True n4_correction.inputs.save_bias = False n4_correction.inputs.num_threads = ants_threads n4_correction.inputs.input_image = anat ################################### ### BRAIN EXTRACTION ### ################################### brain_extraction_ants = Node(BrainExtraction(), name='brain_extraction') brain_extraction_ants.inputs.dimension = 3 brain_extraction_ants.inputs.use_floatingpoint_precision = 1 brain_extraction_ants.inputs.num_threads = ants_threads brain_extraction_ants.inputs.brain_probability_mask = bet_ants_prob_mask brain_extraction_ants.inputs.keep_temporary_files = 1 brain_extraction_ants.inputs.brain_template = bet_ants_template brain_extraction_ants.inputs.extraction_registration_mask = bet_ants_registration_mask brain_extraction_ants.inputs.out_prefix = 'bet' ################################### ### COREGISTRATION ### ################################### coregistration = Node(Registration(), name='coregistration') coregistration.inputs.float = False coregistration.inputs.output_transform_prefix = "meanEpi2highres" coregistration.inputs.transforms = ['Rigid'] coregistration.inputs.transform_parameters = [(0.1, ), (0.1, )] coregistration.inputs.number_of_iterations = [[1000, 500, 250, 100]] coregistration.inputs.dimension = 3 coregistration.inputs.num_threads = ants_threads coregistration.inputs.write_composite_transform = True coregistration.inputs.collapse_output_transforms = True coregistration.inputs.metric = ['MI'] coregistration.inputs.metric_weight = [1] coregistration.inputs.radius_or_number_of_bins = [32] coregistration.inputs.sampling_strategy = ['Regular'] coregistration.inputs.sampling_percentage = [0.25] coregistration.inputs.convergence_threshold = [1e-08] coregistration.inputs.convergence_window_size = [10] coregistration.inputs.smoothing_sigmas = [[3, 2, 1, 0]] coregistration.inputs.sigma_units = ['mm'] coregistration.inputs.shrink_factors = [[4, 3, 2, 1]] coregistration.inputs.use_estimate_learning_rate_once = [True] coregistration.inputs.use_histogram_matching = [False] coregistration.inputs.initial_moving_transform_com = True coregistration.inputs.output_warped_image = True coregistration.inputs.winsorize_lower_quantile = 0.01 coregistration.inputs.winsorize_upper_quantile = 0.99 ################################### ### NORMALIZATION ### ################################### # Settings Explanations # Only a few key settings are worth adjusting and most others relate to how ANTs optimizer starts or iterates and won't make a ton of difference # Brian Avants referred to these settings as the last "best tested" when he was aligning fMRI data: https://github.com/ANTsX/ANTsRCore/blob/master/R/antsRegistration.R#L275 # Things that matter the most: # smoothing_sigmas: # how much gaussian smoothing to apply when performing registration, probably want the upper limit of this to match the resolution that the data is collected at e.g. 3mm # Old settings [[3,2,1,0]]*3 # shrink_factors # The coarseness with which to do registration # Old settings [[8,4,2,1]] * 3 # >= 8 may result is some problems causing big chunks of cortex with little fine grain spatial structure to be moved to other parts of cortex # Other settings # transform_parameters: # how much regularization to do for fitting that transformation # for syn this pertains to both the gradient regularization term, and the flow, and elastic terms. Leave the syn settings alone as they seem to be the most well tested across published data sets # radius_or_number_of_bins # This is the bin size for MI metrics and 32 is probably adequate for most use cases. Increasing this might increase precision (e.g. to 64) but takes exponentially longer # use_histogram_matching # Use image intensity distribution to guide registration # Leave it on for within modality registration (e.g. T1 -> MNI), but off for between modality registration (e.g. EPI -> T1) # convergence_threshold # threshold for optimizer # convergence_window_size # how many samples should optimizer average to compute threshold? # sampling_strategy # what strategy should ANTs use to initialize the transform. Regular here refers to approximately random sampling around the center of the image mass normalization = Node(Registration(), name='normalization') normalization.inputs.float = False normalization.inputs.collapse_output_transforms = True normalization.inputs.convergence_threshold = [1e-06] normalization.inputs.convergence_window_size = [10] normalization.inputs.dimension = 3 normalization.inputs.fixed_image = MNItemplate normalization.inputs.initial_moving_transform_com = True normalization.inputs.metric = ['MI', 'MI', 'CC'] normalization.inputs.metric_weight = [1.0] * 3 normalization.inputs.number_of_iterations = [[1000, 500, 250, 100], [1000, 500, 250, 100], [100, 70, 50, 20]] normalization.inputs.num_threads = ants_threads normalization.inputs.output_transform_prefix = 'anat2template' normalization.inputs.output_inverse_warped_image = True normalization.inputs.output_warped_image = True normalization.inputs.radius_or_number_of_bins = [32, 32, 4] normalization.inputs.sampling_percentage = [0.25, 0.25, 1] normalization.inputs.sampling_strategy = ['Regular', 'Regular', 'None'] normalization.inputs.shrink_factors = [[8, 4, 2, 1]] * 3 normalization.inputs.sigma_units = ['vox'] * 3 normalization.inputs.smoothing_sigmas = [[3, 2, 1, 0]] * 3 normalization.inputs.transforms = ['Rigid', 'Affine', 'SyN'] normalization.inputs.transform_parameters = [(0.1, ), (0.1, ), (0.1, 3.0, 0.0)] normalization.inputs.use_histogram_matching = True normalization.inputs.winsorize_lower_quantile = 0.005 normalization.inputs.winsorize_upper_quantile = 0.995 normalization.inputs.write_composite_transform = True # NEW SETTINGS (need to be adjusted; specifically shink_factors and smoothing_sigmas need to be the same length) # normalization = Node(Registration(), name='normalization') # normalization.inputs.float = False # normalization.inputs.collapse_output_transforms = True # normalization.inputs.convergence_threshold = [1e-06, 1e-06, 1e-07] # normalization.inputs.convergence_window_size = [10] # normalization.inputs.dimension = 3 # normalization.inputs.fixed_image = MNItemplate # normalization.inputs.initial_moving_transform_com = True # normalization.inputs.metric = ['MI', 'MI', 'CC'] # normalization.inputs.metric_weight = [1.0]*3 # normalization.inputs.number_of_iterations = [[1000, 500, 250, 100], # [1000, 500, 250, 100], # [100, 70, 50, 20]] # normalization.inputs.num_threads = ants_threads # normalization.inputs.output_transform_prefix = 'anat2template' # normalization.inputs.output_inverse_warped_image = True # normalization.inputs.output_warped_image = True # normalization.inputs.radius_or_number_of_bins = [32, 32, 4] # normalization.inputs.sampling_percentage = [0.25, 0.25, 1] # normalization.inputs.sampling_strategy = ['Regular', # 'Regular', # 'None'] # normalization.inputs.shrink_factors = [[4, 3, 2, 1]]*3 # normalization.inputs.sigma_units = ['vox']*3 # normalization.inputs.smoothing_sigmas = [[2, 1], [2, 1], [3, 2, 1, 0]] # normalization.inputs.transforms = ['Rigid', 'Affine', 'SyN'] # normalization.inputs.transform_parameters = [(0.1,), # (0.1,), # (0.1, 3.0, 0.0)] # normalization.inputs.use_histogram_matching = True # normalization.inputs.winsorize_lower_quantile = 0.005 # normalization.inputs.winsorize_upper_quantile = 0.995 # normalization.inputs.write_composite_transform = True ################################### ### APPLY TRANSFORMS AND SMOOTH ### ################################### merge_transforms = Node(Merge(2), iterfield=['in2'], name='merge_transforms') # Used for epi -> mni, via (coreg + norm) apply_transforms = Node(ApplyTransforms(), iterfield=['input_image'], name='apply_transforms') apply_transforms.inputs.input_image_type = 3 apply_transforms.inputs.float = False apply_transforms.inputs.num_threads = 12 apply_transforms.inputs.environ = {} apply_transforms.inputs.interpolation = 'BSpline' apply_transforms.inputs.invert_transform_flags = [False, False] apply_transforms.inputs.reference_image = MNItemplate # Used for t1 segmented -> mni, via (norm) apply_transform_seg = Node(ApplyTransforms(), name='apply_transform_seg') apply_transform_seg.inputs.input_image_type = 3 apply_transform_seg.inputs.float = False apply_transform_seg.inputs.num_threads = 12 apply_transform_seg.inputs.environ = {} apply_transform_seg.inputs.interpolation = 'MultiLabel' apply_transform_seg.inputs.invert_transform_flags = [False] apply_transform_seg.inputs.reference_image = MNItemplate ################################### ### PLOTS ### ################################### plot_realign = Node(Plot_Realignment_Parameters(), name="plot_realign") plot_qa = Node(Plot_Quality_Control(), name="plot_qa") plot_normalization_check = Node(Plot_Coregistration_Montage(), name="plot_normalization_check") plot_normalization_check.inputs.canonical_img = MNItemplatehasskull ############################################ ### FILTER, SMOOTH, DOWNSAMPLE PRECISION ### ############################################ # Use cosanlab_preproc for down sampling down_samp = Node(Down_Sample_Precision(), name="down_samp") # Use FSL for smoothing if apply_smooth: smooth = Node(Smooth(), name='smooth') if isinstance(apply_smooth, list): smooth.iterables = ("fwhm", apply_smooth) elif isinstance(apply_smooth, int) or isinstance(apply_smooth, float): smooth.inputs.fwhm = apply_smooth else: raise ValueError("apply_smooth must be a list or int/float") # Use cosanlab_preproc for low-pass filtering if apply_filter: lp_filter = Node(Filter_In_Mask(), name='lp_filter') lp_filter.inputs.mask = MNImask lp_filter.inputs.sampling_rate = tr_length lp_filter.inputs.high_pass_cutoff = 0 if isinstance(apply_filter, list): lp_filter.iterables = ("low_pass_cutoff", apply_filter) elif isinstance(apply_filter, int) or isinstance(apply_filter, float): lp_filter.inputs.low_pass_cutoff = apply_filter else: raise ValueError("apply_filter must be a list or int/float") ################### ### OUTPUT NODE ### ################### # Collect all final outputs in the output dir and get rid of file name additions datasink = Node(DataSink(), name='datasink') if session: datasink.inputs.base_directory = os.path.join(output_final_dir, subject_id) datasink.inputs.container = 'ses-' + session else: datasink.inputs.base_directory = output_final_dir datasink.inputs.container = subject_id # Remove substitutions data_dir_parts = data_dir.split('/')[1:] if session: prefix = ['_scan_'] + data_dir_parts + [subject_id] + [ 'ses-' + session ] + ['func'] else: prefix = ['_scan_'] + data_dir_parts + [subject_id] + ['func'] func_scan_names = [os.path.split(elem)[-1] for elem in funcs] to_replace = [] for elem in func_scan_names: bold_name = elem.split(subject_id + '_')[-1] bold_name = bold_name.split('.nii.gz')[0] to_replace.append(('..'.join(prefix + [elem]), bold_name)) datasink.inputs.substitutions = to_replace ##################### ### INIT WORKFLOW ### ##################### # If we have sessions provide the full path to the subject's intermediate directory # and only rely on workflow init to create the session container *within* that directory # Otherwise just point to the intermediate directory and let the workflow init create the subject container within the intermediate directory if session: workflow = Workflow(name='ses_' + session) workflow.base_dir = os.path.join(output_interm_dir, subId) else: workflow = Workflow(name=subId) workflow.base_dir = output_interm_dir ############################ ######### PART (1a) ######### # func -> discorr -> trim -> realign # OR # func -> trim -> realign # OR # func -> discorr -> realign # OR # func -> realign ############################ if apply_dist_corr: workflow.connect([(encoding_file_writer, topup, [('encoding_file', 'encoding_file')]), (encoding_file_writer, apply_topup, [('encoding_file', 'encoding_file')]), (merger, topup, [('merged_file', 'in_file')]), (func_scans, apply_topup, [('scan', 'in_files')]), (topup, apply_topup, [('out_fieldcoef', 'in_topup_fieldcoef'), ('out_movpar', 'in_topup_movpar')])]) if apply_trim: # Dist Corr + Trim workflow.connect([(apply_topup, trim, [('out_corrected', 'in_file') ]), (trim, realign_fsl, [('out_file', 'in_file')])]) else: # Dist Corr + No Trim workflow.connect([(apply_topup, realign_fsl, [('out_corrected', 'in_file')])]) else: if apply_trim: # No Dist Corr + Trim workflow.connect([(func_scans, trim, [('scan', 'in_file')]), (trim, realign_fsl, [('out_file', 'in_file')])]) else: # No Dist Corr + No Trim workflow.connect([ (func_scans, realign_fsl, [('scan', 'in_file')]), ]) ############################ ######### PART (1n) ######### # anat -> N4 -> bet # OR # anat -> bet ############################ if apply_n4: workflow.connect([(n4_correction, brain_extraction_ants, [('output_image', 'anatomical_image')])]) else: brain_extraction_ants.inputs.anatomical_image = anat ########################################## ############### PART (2) ################# # realign -> coreg -> mni (via t1) # t1 -> mni # covariate creation # plot creation ########################################### workflow.connect([ (realign_fsl, plot_realign, [('par_file', 'realignment_parameters')]), (realign_fsl, plot_qa, [('out_file', 'dat_img')]), (realign_fsl, art, [('out_file', 'realigned_files'), ('par_file', 'realignment_parameters')]), (realign_fsl, mean_epi, [('out_file', 'in_file')]), (realign_fsl, make_cov, [('par_file', 'realignment_parameters')]), (mean_epi, compute_mask, [('out_file', 'mean_volume')]), (compute_mask, art, [('brain_mask', 'mask_file')]), (art, make_cov, [('outlier_files', 'spike_id')]), (art, plot_realign, [('outlier_files', 'outliers')]), (plot_qa, make_cov, [('fd_outliers', 'fd_outliers')]), (brain_extraction_ants, coregistration, [('BrainExtractionBrain', 'fixed_image')]), (mean_epi, coregistration, [('out_file', 'moving_image')]), (brain_extraction_ants, normalization, [('BrainExtractionBrain', 'moving_image')]), (coregistration, merge_transforms, [('composite_transform', 'in2')]), (normalization, merge_transforms, [('composite_transform', 'in1')]), (merge_transforms, apply_transforms, [('out', 'transforms')]), (realign_fsl, apply_transforms, [('out_file', 'input_image')]), (apply_transforms, mean_norm_epi, [('output_image', 'in_file')]), (normalization, apply_transform_seg, [('composite_transform', 'transforms')]), (brain_extraction_ants, apply_transform_seg, [('BrainExtractionSegmentation', 'input_image')]), (mean_norm_epi, plot_normalization_check, [('out_file', 'wra_img')]) ]) ################################################## ################### PART (3) ##################### # epi (in mni) -> filter -> smooth -> down sample # OR # epi (in mni) -> filter -> down sample # OR # epi (in mni) -> smooth -> down sample # OR # epi (in mni) -> down sample ################################################### if apply_filter: workflow.connect([(apply_transforms, lp_filter, [('output_image', 'in_file')])]) if apply_smooth: # Filtering + Smoothing workflow.connect([(lp_filter, smooth, [('out_file', 'in_file')]), (smooth, down_samp, [('smoothed_file', 'in_file') ])]) else: # Filtering + No Smoothing workflow.connect([(lp_filter, down_samp, [('out_file', 'in_file')]) ]) else: if apply_smooth: # No Filtering + Smoothing workflow.connect([ (apply_transforms, smooth, [('output_image', 'in_file')]), (smooth, down_samp, [('smoothed_file', 'in_file')]) ]) else: # No Filtering + No Smoothing workflow.connect([(apply_transforms, down_samp, [('output_image', 'in_file')])]) ########################################## ############### PART (4) ################# # down sample -> save # plots -> save # covs -> save # t1 (in mni) -> save # t1 segmented masks (in mni) -> save # realignment parms -> save ########################################## workflow.connect([ (down_samp, datasink, [('out_file', 'functional.@down_samp')]), (plot_realign, datasink, [('plot', 'functional.@plot_realign')]), (plot_qa, datasink, [('plot', 'functional.@plot_qa')]), (plot_normalization_check, datasink, [('plot', 'functional.@plot_normalization')]), (make_cov, datasink, [('covariates', 'functional.@covariates')]), (normalization, datasink, [('warped_image', 'structural.@normanat')]), (apply_transform_seg, datasink, [('output_image', 'structural.@normanatseg')]), (realign_fsl, datasink, [('par_file', 'functional.@motionparams')]) ]) if not os.path.exists(os.path.join(output_dir, 'pipeline.png')): workflow.write_graph(dotfilename=os.path.join(output_dir, 'pipeline'), format='png') print(f"Creating workflow for subject: {subject_id}") if ants_threads != 8: print( f"ANTs will utilize the user-requested {ants_threads} threads for parallel processing." ) return workflow
else: out_image[idx] = stats.zscore(in_image[idx]) print("Note: NOT inverting z-scores.") img = nib.Nifti1Image(out_image, nib.load(input_image).affine) img.to_filename("rcfe.nii") return path.abspath("rcfe.nii") # Motion correction on fmri time series mcflirt_node = Node(MCFLIRT(mean_vol=True, output_type='NIFTI'), name="mcflirt") # mcflirt_node = Node(MCFLIRT(mean_vol=True, output_type='NIFTI'), iterables=['in_file'], name="mcflirt") # Compute mean(fslmaths) of the fmri time series mean_fmri_node = Node(MeanImage(output_type='NIFTI'), name="meanimage") # Skull Strip the fmri time series bet_fmri_node = Node(BET(output_type='NIFTI', mask=True), name="bet_fmri") # Bias Correct the fmri time series bias_correction_node = Node(N4BiasFieldCorrection(), name='bias_correction') # Returns the relative concentration of brain iron rcfe_node = Node(Function(input_names=['input_image', 'mask_image'], output_names=['output_image'], function=compute_rcFe), name="rcfe") # coregister (skullstripped) mean of the fmri time series to the skull stripped T1 structural flirt_node = Node(
def wfmaker(project_dir, raw_dir, subject_id, task_name='', apply_trim=False, apply_dist_corr=False, apply_smooth=False, apply_filter=False, mni_template='2mm', apply_n4=True, ants_threads=8, readable_crash_files=False): """ This function returns a "standard" workflow based on requested settings. Assumes data is in the following directory structure in BIDS format: *Work flow steps*: 1) EPI Distortion Correction (FSL; optional) 2) Trimming (nipy) 3) Realignment/Motion Correction (FSL) 4) Artifact Detection (rapidART/python) 5) Brain Extraction + N4 Bias Correction (ANTs) 6) Coregistration (rigid) (ANTs) 7) Normalization to MNI (non-linear) (ANTs) 8) Low-pass filtering (nilearn; optional) 8) Smoothing (FSL; optional) 9) Downsampling to INT16 precision to save space (nibabel) Args: project_dir (str): full path to the root of project folder, e.g. /my/data/myproject. All preprocessed data will be placed under this foler and the raw_dir folder will be searched for under this folder raw_dir (str): folder name for raw data, e.g. 'raw' which would be automatically converted to /my/data/myproject/raw subject_id (str/int): subject ID to process. Can be either a subject ID string e.g. 'sid-0001' or an integer to index the entire list of subjects in raw_dir, e.g. 0, which would process the first subject apply_trim (int/bool; optional): number of volumes to trim from the beginning of each functional run; default is None task_name (str; optional): which functional task runs to process; default is all runs apply_dist_corr (bool; optional): look for fmap files and perform distortion correction; default False smooth (int/list; optional): smoothing to perform in FWHM mm; if a list is provided will create outputs for each smoothing kernel separately; default False apply_filter (float/list; optional): low-pass/high-freq filtering cut-offs in Hz; if a list is provided will create outputs for each filter cut-off separately. With high temporal resolution scans .25Hz is a decent value to capture respitory artifacts; default None/False mni_template (str; optional): which mm resolution template to use, e.g. '3mm'; default '2mm' apply_n4 (bool; optional): perform N4 Bias Field correction on the anatomical image; default true ants_threads (int; optional): number of threads ANTs should use for its processes; default 8 readable_crash_files (bool; optional): should nipype crash files be saved as txt? This makes them easily readable, but sometimes interferes with nipype's ability to use cached results of successfully run nodes (i.e. picking up where it left off after bugs are fixed); default False Examples: >>> from cosanlab_preproc.wfmaker import wfmaker >>> # Create workflow that performs no distortion correction, trims first 5 TRs, no filtering, 6mm smoothing, and normalizes to 2mm MNI space. Run it with 16 cores. >>> >>> workflow = wfmaker( project_dir = '/data/project', raw_dir = 'raw', apply_trim = 5) >>> >>> workflow.run('MultiProc',plugin_args = {'n_procs': 16}) >>> >>> # Create workflow that performs distortion correction, trims first 25 TRs, no filtering and filtering .25hz, 6mm and 8mm smoothing, and normalizes to 3mm MNI space. Run it serially (will be super slow!). >>> >>> workflow = wfmaker( project_dir = '/data/project', raw_dir = 'raw', apply_trim = 25, apply_dist_corr = True, apply_filter = [0, .25], apply_smooth = [6.0, 8.0], mni = '3mm') >>> >>> workflow.run() """ ################## ### PATH SETUP ### ################## if mni_template not in ['1mm', '2mm', '3mm']: raise ValueError("MNI template must be: 1mm, 2mm, or 3mm") data_dir = os.path.join(project_dir, raw_dir) output_dir = os.path.join(project_dir, 'preprocessed') output_final_dir = os.path.join(output_dir, 'final') output_interm_dir = os.path.join(output_dir, 'intermediate') log_dir = os.path.join(project_dir, 'logs', 'nipype') if not os.path.exists(output_final_dir): os.makedirs(output_final_dir) if not os.path.exists(output_interm_dir): os.makedirs(output_interm_dir) if not os.path.exists(log_dir): os.makedirs(log_dir) # Set MNI template MNItemplate = os.path.join(get_resource_path(), 'MNI152_T1_' + mni_template + '_brain.nii.gz') MNImask = os.path.join(get_resource_path(), 'MNI152_T1_' + mni_template + '_brain_mask.nii.gz') MNItemplatehasskull = os.path.join(get_resource_path(), 'MNI152_T1_' + mni_template + '.nii.gz') # Set ANTs files bet_ants_template = os.path.join(get_resource_path(), 'OASIS_template.nii.gz') bet_ants_prob_mask = os.path.join( get_resource_path(), 'OASIS_BrainCerebellumProbabilityMask.nii.gz') bet_ants_registration_mask = os.path.join( get_resource_path(), 'OASIS_BrainCerebellumRegistrationMask.nii.gz') ################################# ### NIPYPE IMPORTS AND CONFIG ### ################################# # Update nipype global config because workflow.config[] = ..., doesn't seem to work # Can't store nipype config/rc file in container anyway so set them globaly before importing and setting up workflow as suggested here: http://nipype.readthedocs.io/en/latest/users/config_file.html#config-file from nipype import config if readable_crash_files: cfg = dict(execution={'crashfile_format': 'txt'}) config.update_config(cfg) config.update_config( {'logging': { 'log_directory': log_dir, 'log_to_file': True }}) from nipype import logging logging.update_logging(config) # Now import everything else from nipype.interfaces.io import DataSink from nipype.interfaces.utility import Merge, IdentityInterface from nipype.pipeline.engine import Node, Workflow from nipype.interfaces.nipy.preprocess import ComputeMask from nipype.algorithms.rapidart import ArtifactDetect from nipype.interfaces.ants.segmentation import BrainExtraction, N4BiasFieldCorrection from nipype.interfaces.ants import Registration, ApplyTransforms from nipype.interfaces.fsl import MCFLIRT, TOPUP, ApplyTOPUP from nipype.interfaces.fsl.maths import MeanImage from nipype.interfaces.fsl import Merge as MERGE from nipype.interfaces.fsl.utils import Smooth from nipype.interfaces.nipy.preprocess import Trim from .interfaces import Plot_Coregistration_Montage, Plot_Quality_Control, Plot_Realignment_Parameters, Create_Covariates, Down_Sample_Precision, Create_Encoding_File, Filter_In_Mask ################## ### INPUT NODE ### ################## layout = BIDSLayout(data_dir) # Dartmouth subjects are named with the sub- prefix, handle whether we receive an integer identifier for indexing or the full subject id with prefixg if isinstance(subject_id, six.string_types): subId = subject_id[4:] elif isinstance(subject_id, int): subId = layout.get_subjects()[subject_id] subject_id = 'sub-' + subId else: raise TypeError("subject_id should be a string or integer") #Get anat file location anat = layout.get(subject=subId, type='T1w', extensions='.nii.gz')[0].filename #Get functional file locations if task_name: funcs = [ f.filename for f in layout.get(subject=subId, type='bold', task=task_name, extensions='.nii.gz') ] else: funcs = [ f.filename for f in layout.get( subject=subId, type='bold', extensions='.nii.gz') ] #Turn functional file list into interable Node func_scans = Node(IdentityInterface(fields=['scan']), name='func_scans') func_scans.iterables = ('scan', funcs) #Get TR for use in filtering below; we're assuming all BOLD runs have the same TR tr_length = layout.get_metadata(funcs[0])['RepetitionTime'] ##################################### ## TRIM ## ##################################### if apply_trim: trim = Node(Trim(), name='trim') trim.inputs.begin_index = apply_trim ##################################### ## DISTORTION CORRECTION ## ##################################### if apply_dist_corr: #Get fmap file locations fmaps = [ f.filename for f in layout.get( subject=subId, modality='fmap', extensions='.nii.gz') ] if not fmaps: raise IOError( "Distortion Correction requested but field map scans not found..." ) #Get fmap metadata totalReadoutTimes, measurements, fmap_pes = [], [], [] for i, fmap in enumerate(fmaps): # Grab total readout time for each fmap totalReadoutTimes.append( layout.get_metadata(fmap)['TotalReadoutTime']) # Grab measurements (for some reason pyBIDS doesn't grab dcm_meta... fields from side-car json file and json.load, doesn't either; so instead just read the header using nibabel to determine number of scans) measurements.append(nib.load(fmap).header['dim'][4]) # Get phase encoding direction fmap_pe = layout.get_metadata(fmap)["PhaseEncodingDirection"] fmap_pes.append(fmap_pe) encoding_file_writer = Node(interface=Create_Encoding_File(), name='create_encoding') encoding_file_writer.inputs.totalReadoutTimes = totalReadoutTimes encoding_file_writer.inputs.fmaps = fmaps encoding_file_writer.inputs.fmap_pes = fmap_pes encoding_file_writer.inputs.measurements = measurements encoding_file_writer.inputs.file_name = 'encoding_file.txt' merge_to_file_list = Node(interface=Merge(2), infields=['in1', 'in2'], name='merge_to_file_list') merge_to_file_list.inputs.in1 = fmaps[0] merge_to_file_list.inputs.in1 = fmaps[1] #Merge AP and PA distortion correction scans merger = Node(interface=MERGE(dimension='t'), name='merger') merger.inputs.output_type = 'NIFTI_GZ' merger.inputs.in_files = fmaps merger.inputs.merged_file = 'merged_epi.nii.gz' #Create distortion correction map topup = Node(interface=TOPUP(), name='topup') topup.inputs.output_type = 'NIFTI_GZ' #Apply distortion correction to other scans apply_topup = Node(interface=ApplyTOPUP(), name='apply_topup') apply_topup.inputs.output_type = 'NIFTI_GZ' apply_topup.inputs.method = 'jac' apply_topup.inputs.interp = 'spline' ################################### ### REALIGN ### ################################### realign_fsl = Node(MCFLIRT(), name="realign") realign_fsl.inputs.cost = 'mutualinfo' realign_fsl.inputs.mean_vol = True realign_fsl.inputs.output_type = 'NIFTI_GZ' realign_fsl.inputs.save_mats = True realign_fsl.inputs.save_rms = True realign_fsl.inputs.save_plots = True ################################### ### MEAN EPIs ### ################################### #For coregistration after realignment mean_epi = Node(MeanImage(), name='mean_epi') mean_epi.inputs.dimension = 'T' #For after normalization is done to plot checks mean_norm_epi = Node(MeanImage(), name='mean_norm_epi') mean_norm_epi.inputs.dimension = 'T' ################################### ### MASK, ART, COV CREATION ### ################################### compute_mask = Node(ComputeMask(), name='compute_mask') compute_mask.inputs.m = .05 art = Node(ArtifactDetect(), name='art') art.inputs.use_differences = [True, False] art.inputs.use_norm = True art.inputs.norm_threshold = 1 art.inputs.zintensity_threshold = 3 art.inputs.mask_type = 'file' art.inputs.parameter_source = 'FSL' make_cov = Node(Create_Covariates(), name='make_cov') ################################ ### N4 BIAS FIELD CORRECTION ### ################################ if apply_n4: n4_correction = Node(N4BiasFieldCorrection(), name='n4_correction') n4_correction.inputs.copy_header = True n4_correction.inputs.save_bias = False n4_correction.inputs.num_threads = ants_threads n4_correction.inputs.input_image = anat ################################### ### BRAIN EXTRACTION ### ################################### brain_extraction_ants = Node(BrainExtraction(), name='brain_extraction') brain_extraction_ants.inputs.dimension = 3 brain_extraction_ants.inputs.use_floatingpoint_precision = 1 brain_extraction_ants.inputs.num_threads = ants_threads brain_extraction_ants.inputs.brain_probability_mask = bet_ants_prob_mask brain_extraction_ants.inputs.keep_temporary_files = 1 brain_extraction_ants.inputs.brain_template = bet_ants_template brain_extraction_ants.inputs.extraction_registration_mask = bet_ants_registration_mask brain_extraction_ants.inputs.out_prefix = 'bet' ################################### ### COREGISTRATION ### ################################### coregistration = Node(Registration(), name='coregistration') coregistration.inputs.float = False coregistration.inputs.output_transform_prefix = "meanEpi2highres" coregistration.inputs.transforms = ['Rigid'] coregistration.inputs.transform_parameters = [(0.1, ), (0.1, )] coregistration.inputs.number_of_iterations = [[1000, 500, 250, 100]] coregistration.inputs.dimension = 3 coregistration.inputs.num_threads = ants_threads coregistration.inputs.write_composite_transform = True coregistration.inputs.collapse_output_transforms = True coregistration.inputs.metric = ['MI'] coregistration.inputs.metric_weight = [1] coregistration.inputs.radius_or_number_of_bins = [32] coregistration.inputs.sampling_strategy = ['Regular'] coregistration.inputs.sampling_percentage = [0.25] coregistration.inputs.convergence_threshold = [1e-08] coregistration.inputs.convergence_window_size = [10] coregistration.inputs.smoothing_sigmas = [[3, 2, 1, 0]] coregistration.inputs.sigma_units = ['mm'] coregistration.inputs.shrink_factors = [[4, 3, 2, 1]] coregistration.inputs.use_estimate_learning_rate_once = [True] coregistration.inputs.use_histogram_matching = [False] coregistration.inputs.initial_moving_transform_com = True coregistration.inputs.output_warped_image = True coregistration.inputs.winsorize_lower_quantile = 0.01 coregistration.inputs.winsorize_upper_quantile = 0.99 ################################### ### NORMALIZATION ### ################################### # Settings Explanations # Only a few key settings are worth adjusting and most others relate to how ANTs optimizer starts or iterates and won't make a ton of difference # Brian Avants referred to these settings as the last "best tested" when he was aligning fMRI data: https://github.com/ANTsX/ANTsRCore/blob/master/R/antsRegistration.R#L275 # Things that matter the most: # smoothing_sigmas: # how much gaussian smoothing to apply when performing registration, probably want the upper limit of this to match the resolution that the data is collected at e.g. 3mm # Old settings [[3,2,1,0]]*3 # shrink_factors # The coarseness with which to do registration # Old settings [[8,4,2,1]] * 3 # >= 8 may result is some problems causing big chunks of cortex with little fine grain spatial structure to be moved to other parts of cortex # Other settings # transform_parameters: # how much regularization to do for fitting that transformation # for syn this pertains to both the gradient regularization term, and the flow, and elastic terms. Leave the syn settings alone as they seem to be the most well tested across published data sets # radius_or_number_of_bins # This is the bin size for MI metrics and 32 is probably adequate for most use cases. Increasing this might increase precision (e.g. to 64) but takes exponentially longer # use_histogram_matching # Use image intensity distribution to guide registration # Leave it on for within modality registration (e.g. T1 -> MNI), but off for between modality registration (e.g. EPI -> T1) # convergence_threshold # threshold for optimizer # convergence_window_size # how many samples should optimizer average to compute threshold? # sampling_strategy # what strategy should ANTs use to initialize the transform. Regular here refers to approximately random sampling around the center of the image mass normalization = Node(Registration(), name='normalization') normalization.inputs.float = False normalization.inputs.collapse_output_transforms = True normalization.inputs.convergence_threshold = [1e-06, 1e-06, 1e-07] normalization.inputs.convergence_window_size = [10] normalization.inputs.dimension = 3 normalization.inputs.fixed_image = MNItemplate normalization.inputs.initial_moving_transform_com = True normalization.inputs.metric = ['MI', 'MI', 'CC'] normalization.inputs.metric_weight = [1.0] * 3 normalization.inputs.number_of_iterations = [[1000, 500, 250, 100], [1000, 500, 250, 100], [100, 70, 50, 20]] normalization.inputs.num_threads = ants_threads normalization.inputs.output_transform_prefix = 'anat2template' normalization.inputs.output_inverse_warped_image = True normalization.inputs.output_warped_image = True normalization.inputs.radius_or_number_of_bins = [32, 32, 4] normalization.inputs.sampling_percentage = [0.25, 0.25, 1] normalization.inputs.sampling_strategy = ['Regular', 'Regular', 'None'] normalization.inputs.shrink_factors = [[4, 3, 2, 1]] * 3 normalization.inputs.sigma_units = ['vox'] * 3 normalization.inputs.smoothing_sigmas = [[2, 1], [2, 1], [3, 2, 1, 0]] normalization.inputs.transforms = ['Rigid', 'Affine', 'SyN'] normalization.inputs.transform_parameters = [(0.1, ), (0.1, ), (0.1, 3.0, 0.0)] normalization.inputs.use_histogram_matching = True normalization.inputs.winsorize_lower_quantile = 0.005 normalization.inputs.winsorize_upper_quantile = 0.995 normalization.inputs.write_composite_transform = True ################################### ### APPLY TRANSFORMS AND SMOOTH ### ################################### merge_transforms = Node(Merge(2), iterfield=['in2'], name='merge_transforms') # Used for epi -> mni, via (coreg + norm) apply_transforms = Node(ApplyTransforms(), iterfield=['input_image'], name='apply_transforms') apply_transforms.inputs.input_image_type = 3 apply_transforms.inputs.float = False apply_transforms.inputs.num_threads = 12 apply_transforms.inputs.environ = {} apply_transforms.inputs.interpolation = 'BSpline' apply_transforms.inputs.invert_transform_flags = [False, False] apply_transforms.inputs.reference_image = MNItemplate # Used for t1 segmented -> mni, via (norm) apply_transform_seg = Node(ApplyTransforms(), name='apply_transform_seg') apply_transform_seg.inputs.input_image_type = 3 apply_transform_seg.inputs.float = False apply_transform_seg.inputs.num_threads = 12 apply_transform_seg.inputs.environ = {} apply_transform_seg.inputs.interpolation = 'MultiLabel' apply_transform_seg.inputs.invert_transform_flags = [False] apply_transform_seg.inputs.reference_image = MNItemplate ################################### ### PLOTS ### ################################### plot_realign = Node(Plot_Realignment_Parameters(), name="plot_realign") plot_qa = Node(Plot_Quality_Control(), name="plot_qa") plot_normalization_check = Node(Plot_Coregistration_Montage(), name="plot_normalization_check") plot_normalization_check.inputs.canonical_img = MNItemplatehasskull ############################################ ### FILTER, SMOOTH, DOWNSAMPLE PRECISION ### ############################################ #Use cosanlab_preproc for down sampling down_samp = Node(Down_Sample_Precision(), name="down_samp") #Use FSL for smoothing if apply_smooth: smooth = Node(Smooth(), name='smooth') if isinstance(apply_smooth, list): smooth.iterables = ("fwhm", apply_smooth) elif isinstance(apply_smooth, int) or isinstance(apply_smooth, float): smooth.inputs.fwhm = apply_smooth else: raise ValueError("apply_smooth must be a list or int/float") #Use cosanlab_preproc for low-pass filtering if apply_filter: lp_filter = Node(Filter_In_Mask(), name='lp_filter') lp_filter.inputs.mask = MNImask lp_filter.inputs.sampling_rate = tr_length lp_filter.inputs.high_pass_cutoff = 0 if isinstance(apply_filter, list): lp_filter.iterables = ("low_pass_cutoff", apply_filter) elif isinstance(apply_filter, int) or isinstance(apply_filter, float): lp_filter.inputs.low_pass_cutoff = apply_filter else: raise ValueError("apply_filter must be a list or int/float") ################### ### OUTPUT NODE ### ################### #Collect all final outputs in the output dir and get rid of file name additions datasink = Node(DataSink(), name='datasink') datasink.inputs.base_directory = output_final_dir datasink.inputs.container = subject_id # Remove substitutions data_dir_parts = data_dir.split('/')[1:] prefix = ['_scan_'] + data_dir_parts + [subject_id] + ['func'] func_scan_names = [os.path.split(elem)[-1] for elem in funcs] to_replace = [] for elem in func_scan_names: bold_name = elem.split(subject_id + '_')[-1] bold_name = bold_name.split('.nii.gz')[0] to_replace.append(('..'.join(prefix + [elem]), bold_name)) datasink.inputs.substitutions = to_replace ##################### ### INIT WORKFLOW ### ##################### workflow = Workflow(name=subId) workflow.base_dir = output_interm_dir ############################ ######### PART (1a) ######### # func -> discorr -> trim -> realign # OR # func -> trim -> realign # OR # func -> discorr -> realign # OR # func -> realign ############################ if apply_dist_corr: workflow.connect([(encoding_file_writer, topup, [('encoding_file', 'encoding_file')]), (encoding_file_writer, apply_topup, [('encoding_file', 'encoding_file')]), (merger, topup, [('merged_file', 'in_file')]), (func_scans, apply_topup, [('scan', 'in_files')]), (topup, apply_topup, [('out_fieldcoef', 'in_topup_fieldcoef'), ('out_movpar', 'in_topup_movpar')])]) if apply_trim: # Dist Corr + Trim workflow.connect([(apply_topup, trim, [('out_corrected', 'in_file') ]), (trim, realign_fsl, [('out_file', 'in_file')])]) else: # Dist Corr + No Trim workflow.connect([(apply_topup, realign_fsl, [('out_corrected', 'in_file')])]) else: if apply_trim: # No Dist Corr + Trim workflow.connect([(func_scans, trim, [('scan', 'in_file')]), (trim, realign_fsl, [('out_file', 'in_file')])]) else: # No Dist Corr + No Trim workflow.connect([ (func_scans, realign_fsl, [('scan', 'in_file')]), ]) ############################ ######### PART (1n) ######### # anat -> N4 -> bet # OR # anat -> bet ############################ if apply_n4: workflow.connect([(n4_correction, brain_extraction_ants, [('output_image', 'anatomical_image')])]) else: brain_extraction_ants.inputs.anatomical_image = anat ########################################## ############### PART (2) ################# # realign -> coreg -> mni (via t1) # t1 -> mni # covariate creation # plot creation ########################################### workflow.connect([ (realign_fsl, plot_realign, [('par_file', 'realignment_parameters')]), (realign_fsl, plot_qa, [('out_file', 'dat_img')]), (realign_fsl, art, [('out_file', 'realigned_files'), ('par_file', 'realignment_parameters')]), (realign_fsl, mean_epi, [('out_file', 'in_file')]), (realign_fsl, make_cov, [('par_file', 'realignment_parameters')]), (mean_epi, compute_mask, [('out_file', 'mean_volume')]), (compute_mask, art, [('brain_mask', 'mask_file')]), (art, make_cov, [('outlier_files', 'spike_id')]), (art, plot_realign, [('outlier_files', 'outliers')]), (plot_qa, make_cov, [('fd_outliers', 'fd_outliers')]), (brain_extraction_ants, coregistration, [('BrainExtractionBrain', 'fixed_image')]), (mean_epi, coregistration, [('out_file', 'moving_image')]), (brain_extraction_ants, normalization, [('BrainExtractionBrain', 'moving_image')]), (coregistration, merge_transforms, [('composite_transform', 'in2')]), (normalization, merge_transforms, [('composite_transform', 'in1')]), (merge_transforms, apply_transforms, [('out', 'transforms')]), (realign_fsl, apply_transforms, [('out_file', 'input_image')]), (apply_transforms, mean_norm_epi, [('output_image', 'in_file')]), (normalization, apply_transform_seg, [('composite_transform', 'transforms')]), (brain_extraction_ants, apply_transform_seg, [('BrainExtractionSegmentation', 'input_image')]), (mean_norm_epi, plot_normalization_check, [('out_file', 'wra_img')]) ]) ################################################## ################### PART (3) ##################### # epi (in mni) -> filter -> smooth -> down sample # OR # epi (in mni) -> filter -> down sample # OR # epi (in mni) -> smooth -> down sample # OR # epi (in mni) -> down sample ################################################### if apply_filter: workflow.connect([(apply_transforms, lp_filter, [('output_image', 'in_file')])]) if apply_smooth: # Filtering + Smoothing workflow.connect([(lp_filter, smooth, [('out_file', 'in_file')]), (smooth, down_samp, [('smoothed_file', 'in_file') ])]) else: # Filtering + No Smoothing workflow.connect([(lp_filter, down_samp, [('out_file', 'in_file')]) ]) else: if apply_smooth: # No Filtering + Smoothing workflow.connect([ (apply_transforms, smooth, [('output_image', 'in_file')]), (smooth, down_samp, [('smoothed_file', 'in_file')]) ]) else: # No Filtering + No Smoothing workflow.connect([(apply_transforms, down_samp, [('output_image', 'in_file')])]) ########################################## ############### PART (4) ################# # down sample -> save # plots -> save # covs -> save # t1 (in mni) -> save # t1 segmented masks (in mni) -> save ########################################## workflow.connect([ (down_samp, datasink, [('out_file', 'functional.@down_samp')]), (plot_realign, datasink, [('plot', 'functional.@plot_realign')]), (plot_qa, datasink, [('plot', 'functional.@plot_qa')]), (plot_normalization_check, datasink, [('plot', 'functional.@plot_normalization')]), (make_cov, datasink, [('covariates', 'functional.@covariates')]), (normalization, datasink, [('warped_image', 'structural.@normanat')]), (apply_transform_seg, datasink, [('output_image', 'structural.@normanatseg')]) ]) if not os.path.exists(os.path.join(output_dir, 'pipeline.png')): workflow.write_graph(dotfilename=os.path.join(output_dir, 'pipeline'), format='png') print(f"Creating workflow for subject: {subject_id}") if ants_threads == 8: print( f"ANTs will utilize the default of {ants_threads} threads for parallel processing." ) else: print( f"ANTs will utilize the user-requested {ants_threads} threads for parallel processing." ) return workflow
def genNormalizeDwiWF( name='NormalizeDwi', base_dir=op.abspath('.'), input_dir=None, input_temp='%s/%s/*%s', input_temp_args={ 'ref_T1': [['subject_id', 'bias_corrected_images', '_mT1.nii.gz']], 'forward_deformation_field': [['subject_id', 'forward_deformation_field', '_y_T1.nii.gz']], 'denoised_dwi': [['subject_id', 'denoised_dwi_series', '_dwi_denoised.nii.gz']], 'bval': [['subject_id', 'raw_bvals', '_bval.gz']], 'bvec': [['subject_id', 'processed_bvecs', '_bvecs.gz']], 'apply_to': [[ 'subject_id', 'apply_to_files', ['_ICVF.nii.gz', '_ISOVF.nii.gz', '_OD.nii.gz'] ]] }, subjects=None, spm_standalone=None, mcr=None): # Generate WF wf = Workflow(name=name) wf.base_dir = base_dir #Node: subject List subjectList = Node(IdentityInterface(fields=['subject_id'], mandatory_inputs=True), name="subjectList") if subjects: subjectList.iterables = ('subject_id', subjects) else: subjectList.iterables = ('subject_id', [ pth for pth in os.listdir(input_dir) if os.path.isdir(op.join(input_dir, pth)) ]) print subjectList.iterables scanList = Node(DataGrabber(infields=['subject_id'], outfields=[ 'ref_T1', 'forward_deformation_field', 'denoised_dwi', 'bval', 'bvec', 'apply_to' ]), name="scanList") scanList.inputs.base_directory = input_dir scanList.inputs.ignore_exception = False scanList.inputs.raise_on_empty = True scanList.inputs.sort_filelist = False scanList.inputs.template = input_temp scanList.inputs.template_args = input_temp_args wf.connect(subjectList, "subject_id", scanList, "subject_id") # Unzip everythin for spm gunzipT1 = Node(Gunzip(), name='gunzipT1') wf.connect(scanList, "ref_T1", gunzipT1, "in_file") gunzipDF = Node(Gunzip(), name='gunzipDF') wf.connect(scanList, "forward_deformation_field", gunzipDF, "in_file") gunzipbval = Node(Gunzip(), name='gunzipbval') wf.connect(scanList, "bval", gunzipbval, "in_file") gunzipbvec = Node(Gunzip(), name='gunzipbvec') wf.connect(scanList, "bvec", gunzipbvec, "in_file") gunzipApplyTo = MapNode(Gunzip(), iterfield=["in_file"], name='gunzipApplyTo') wf.connect(scanList, "apply_to", gunzipApplyTo, "in_file") # Extract b=0 frames from denoised DWI and average them to make a ref_dwi dwib0 = Node(DWIExtract(), name="dwib0") dwib0.inputs.bzero = True dwib0.inputs.out_file = "dwib0.nii.gz" wf.connect(scanList, "denoised_dwi", dwib0, "in_file") wf.connect(gunzipbval, "out_file", dwib0, "in_bval") wf.connect(gunzipbvec, "out_file", dwib0, "in_bvec") # Make an average image avgb0 = Node(MeanImage(), name="avgb0") avgb0.inputs.nan2zeros = True avgb0.inputs.output_type = "NIFTI" avgb0.inputs.out_file = "avg_dwib0.nii" avgb0.inputs.dimension = "T" wf.connect(dwib0, "out_file", avgb0, "in_file") # spm Normalize WF spmNormProc = genSpmNormalizeDwiWF(name="spmNormProc", spm_standalone=spm_standalone, mcr=mcr) wf.connect(gunzipT1, "out_file", spmNormProc, "inputNode.ref_T1") wf.connect(gunzipDF, "out_file", spmNormProc, "inputNode.forward_deformation_field") wf.connect(avgb0, "out_file", spmNormProc, "inputNode.ref_dwi") wf.connect(gunzipApplyTo, "out_file", spmNormProc, "inputNode.apply_to") # Datasink datasink = Node(DataSink(base_directory=base_dir, container='%sSink' % name), name='Datasink') wf.connect(spmNormProc, "outputNode.normalized_files", datasink, "normalized_files") return wf
def gnl_correction(input, file_bash, file_coeff, python3_env, python2_env, path_output, cleanup=True): """ The purpose of the following function is to correct for gradient nonlinearities. A corrected file is written using spline interpolation. The function needs FSL to be included in the search path. Inputs: *input: filename of input image. *file_bash: filename of bash script which calls the gradient unwarping toolbox. *file_coeff: filename of siemens coefficient file. *python3_env: name of python3 virtual environment. *python2_env: name of python2 virtual environment. *path_output: path where output is written. *cleanup: delete intermediate files. created by Daniel Haenelt Date created: 10-01-2020 Last modified: 10-01-2020 """ import os import shutil as sh import numpy as np import nibabel as nb from nipype.interfaces.fsl import ConvertWarp, Merge from nipype.interfaces.fsl.maths import MeanImage from nipype.interfaces.fsl.preprocess import ApplyWarp from lib.io.get_filename import get_filename from lib.cmap.generate_coordinate_mapping import generate_coordinate_mapping # get fileparts path, name, ext = get_filename(input) # make subfolders path_grad = os.path.join(path_output, "grad") if not os.path.exists(path_grad): os.makedirs(path_grad) # parse arguments file_output = os.path.join(path_output, name + "_gnlcorr" + ext) file_warp = os.path.join(path_grad, "warp.nii.gz") file_jacobian = os.path.join(path_grad, "warp_jacobian.nii.gz") # run gradient unwarp os.system("bash " + file_bash + \ " " + python3_env + \ " " + python2_env + \ " " + path_grad + \ " " + input + \ " trilinear.nii.gz" + \ " " + file_coeff) # now create an appropriate warpfield output (relative convention) convertwarp = ConvertWarp() convertwarp.inputs.reference = os.path.join(path_grad, "trilinear.nii.gz") convertwarp.inputs.warp1 = os.path.join(path_grad, "fullWarp_abs.nii.gz") convertwarp.inputs.abswarp = True convertwarp.inputs.out_relwarp = True convertwarp.inputs.out_file = file_warp convertwarp.inputs.args = "--jacobian=" + file_jacobian convertwarp.run() # convertwarp's jacobian output has 8 frames, each combination of one-sided differences, so average them calcmean = MeanImage() calcmean.inputs.in_file = file_jacobian calcmean.inputs.dimension = "T" calcmean.inputs.out_file = file_jacobian calcmean.run() # apply warp to first volume applywarp = ApplyWarp() applywarp.inputs.in_file = input applywarp.inputs.ref_file = input applywarp.inputs.relwarp = True applywarp.inputs.field_file = file_warp applywarp.inputs.output_type = "NIFTI" applywarp.inputs.out_file = file_output applywarp.inputs.interp = "spline" applywarp.run() # normalise warped output image to initial intensity range data_img = nb.load(input) data_array = data_img.get_fdata() max_data = np.max(data_array) min_data = np.min(data_array) data_img = nb.load(file_output) data_array = data_img.get_fdata() data_array[data_array < min_data] = 0 data_array[data_array > max_data] = max_data output = nb.Nifti1Image(data_array, data_img.affine, data_img.header) nb.save(output, file_output) # calculate gradient deviations os.system("calc_grad_perc_dev" + \ " --fullwarp=" + file_warp + \ " -o " + os.path.join(path_grad,"grad_dev")) # merge directions merger = Merge() merger.inputs.in_files = [ os.path.join(path_grad, "grad_dev_x.nii.gz"), os.path.join(path_grad, "grad_dev_y.nii.gz"), os.path.join(path_grad, "grad_dev_z.nii.gz") ] merger.inputs.dimension = 't' merger.inputs.merged_file = os.path.join(path_grad, "grad_dev.nii.gz") merger.run() # convert from % deviation to absolute data_img = nb.load(os.path.join(path_grad, "grad_dev.nii.gz")) data_array = data_img.get_fdata() data_array = data_array / 100 output = nb.Nifti1Image(data_array, data_img.affine, data_img.header) nb.save(output, os.path.join(path_grad, "grad_dev.nii.gz")) # warp coordinate mapping generate_coordinate_mapping(input, 0, path_grad, suffix="gnl", time=False, write_output=True) applywarp = ApplyWarp() applywarp.inputs.in_file = os.path.join(path_grad, "cmap_gnl.nii") applywarp.inputs.ref_file = input applywarp.inputs.relwarp = True applywarp.inputs.field_file = file_warp applywarp.inputs.out_file = os.path.join(path_grad, "cmap_gnl.nii") applywarp.inputs.interp = "trilinear" applywarp.inputs.output_type = "NIFTI" applywarp.run() # clean intermediate files if cleanup: sh.rmtree(path_grad, ignore_errors=True)