def genDhollTract_wf(nfibers=50000, sshell=False, wdir=None, nthreads=1, name='genDhollTract_wf'): """ Set up workflow to generate tracts with Dhollander response """ # Generate tract genTract = pe.Node(mrt.Tractography(), name='genTract') genTract.base_dir = wdir genTract.inputs.n_tracks = np.int(nfibers * 2) if sshell is False: # Single-shell genTract.inputs.out_file = 'space-Template_desc-iFOD2_tractography.tck' genTract.inputs.algorithm = 'iFOD2' else: genTract.inputs.out_file = 'space-Template_desc-TensorProb_tractography.tck' genTract.inputs.algorithm = 'Tensor_Prob' genTract.inputs.nthreads = nthreads genTract.interface.num_threads = nthreads # Spherical-deconvolution informed filtering of tractography siftTract = pe.Node(mrt.SIFT(), name='siftTract') siftTract.base_dir = wdir siftTract.inputs.term_number = nfibers if sshell is False: # Multi-shell siftTract.inputs.out_file = 'space-Template_desc-iFOD2_tractography.tck' else: # Single-shell siftTract.inputs.out_file = 'space-Template_desc-TensorProb_tractography.tck' siftTract.inputs.nthreads = nthreads siftTract.interface.num_threads = nthreads # Convert to VTK tractConvert = pe.Node(mrt.TCKConvert(), name='convTract') tractConvert.base_dir = wdir if sshell is False: # Multi-shell tractConvert.inputs.out_file = 'space-Template_desc-iFOD2_tractography.vtk' else: # Single-shell tractConvert.inputs.out_file = 'space-Template_desc-TensorProb_tractography.vtk' tractConvert.inputs.nthreads = nthreads tractConvert.interface.num_threads = nthreads # Build workflow workflow = pe.Workflow(name=name) workflow.connect([(genTract, siftTract, [('out_file', 'in_file')]), (siftTract, tractConvert, [('out_file', 'in_file')])]) return workflow
def generate_tracts(fod_wm: Path, tractogram: Path, seg_5tt: Path): tk = mrt.Tractography() tk.inputs.in_file = fod_wm tk.inputs.out_file = tractogram tk.inputs.act_file = seg_5tt tk.inputs.backtrack = True tk.inputs.algorithm = "iFOD2" tk.inputs.max_length = 250 tk.inputs.out_seeds = tractogram.parent / "seeds.csv" # tk.inputs.power = 3 tk.inputs.crop_at_gmwmi = True tk.inputs.seed_dynamic = fod_wm tk.inputs.select = 350000 print(tk.cmdline) tk.run() return tk.inputs.out_file
def generate_tracks(fod_wm: str, seg_5tt: str): working_dir = os.path.dirname(fod_wm) tk = mrt.Tractography() tk.inputs.in_file = fod_wm tk.inputs.out_file = f"{working_dir}/tractogram.tck" tk.inputs.act_file = seg_5tt tk.inputs.backtrack = True tk.inputs.algorithm = "iFOD2" tk.inputs.max_length = 250 tk.inputs.out_seeds = f"{working_dir}/seeds.nii" # tk.inputs.power = 3 tk.inputs.crop_at_gmwmi = True tk.inputs.seed_dynamic = fod_wm tk.inputs.select = 350000 print(tk.cmdline) tk.run() return tk.inputs.out_file
def preprocess_dwi_data(self, data, index, acqp, atlas2use, ResponseSD_algorithm='tournier', fod_algorithm='csd', tract_algorithm='iFOD2', streamlines_number='10M'): ''' preprocessing of dwi data and connectome extraction Parameters ---------- subjects_dir = path to the subjects' folders data: tuple | a tuple having the path to dwi, bvecs and bvals files. It is obtained using the function grab_data() index: str | Name of text file specifying the relationship between the images in --imain and the information in --acqp and --topup. E.g. index.txt acqp: str | Name of text file with information about the acquisition of the images in --imain atlas2use: str | The input node parcellation image ResponseSD_algorithm (optional): str | Select the algorithm to be used to complete the script operation; Options are: dhollander, fa, manual, msmt_5tt, tax, tournier (Default is 'tournier') fod_algorithm (optional): str | The algorithm to use for FOD estimation. (options are: csd,msmt_csd) (Default is 'csd') tract_algorithm (optional): str | specify the tractography algorithm to use. Valid choices are: FACT, iFOD1, iFOD2, Nulldist1, Nulldist2, SD_Stream, Seedtest, Tensor_Det, Tensor_Prob (Default is 'iFOD2') streamlines_number (optional): str | set the desired number of streamlines (Default is '10M') ''' if len(data[0]) != len(data[1]): raise ValueError( 'dwi datas do not have the same shape of bvec files') if len(data[0]) != len(data[2]): raise ValueError( 'dwi datas do not have the same shape of bval files') if len(data[1]) != len(data[2]): raise ValueError( 'bvec files do not have the same shape of bvec files') for subj in range(len(data[0])): print('Extracting B0 volume for subject', subj) self.roi = fsl.ExtractROI( in_file=data[0][subj], roi_file=os.path.join( os.path.split(data[0][subj])[0] + '/' + os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_nodiff.nii.gz'), t_min=0, t_size=1) self.roi.run() print('Converting into .mif for subject', subj) self.mrconvert = mrt.MRConvert() self.mrconvert.inputs.in_file = data[0][subj] self.mrconvert.inputs.grad_fsl = (data[1][subj], data[2][subj]) self.mrconvert.inputs.out_file = os.path.join( os.path.split(data[0][subj])[0] + '/' + os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_dwi.mif') self.mrconvert.run() print('Denoising data for subject', subj) self.denoise = mrt.DWIDenoise() self.denoise.inputs.in_file = os.path.join( os.path.split(data[0][subj])[0] + '/' + os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_dwi.mif') self.denoise.inputs.noise = os.path.join( os.path.split(data[0][subj])[0] + '/' + os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_noise.mif') self.denoise.inputs.out_file = os.path.join( os.path.split(data[0][subj])[0] + '/' + os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_dwi_denoised.mif') self.denoise.run() self.denoise_convert = mrt.MRConvert() self.denoise_convert.inputs.in_file = os.path.join( os.path.split(data[0][subj])[0] + '/' + os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_dwi_denoised.mif') self.denoise_convert.inputs.out_file = os.path.join( os.path.split(data[0][subj])[0] + '/' + os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_dwi_denoised.nii.gz') self.denoise_convert.run() print('Skull stripping for subject', subj) self.mybet = fsl.BET() self.mybet.inputs.in_file = os.path.join( os.path.split(data[0][subj])[0] + '/' + os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_nodiff.nii.gz') self.mybet.inputs.out_file = os.path.join( os.path.split(data[0][subj])[0] + '/' + os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_denoised_brain.nii.gz') self.mybet.inputs.frac = 0.1 self.mybet.inputs.robust = True self.mybet.inputs.mask = True self.mybet.run() print('Running Eddy for subject', subj) self.eddy = Eddy() self.eddy.inputs.in_file = os.path.join( os.path.split(data[0][subj])[0] + '/' + os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_dwi_denoised.nii.gz') self.eddy.inputs.in_file = os.path.join( os.path.split(data[0][subj])[0] + '/' + os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_denoised_brain_mask.nii.gz') self.eddy.inputs.in_acqp = acqp self.eddy.inputs.in_bvec = data[1][subj] self.eddy.inputs.in_bval = data[2][subj] self.eddy.inputs.out_base = os.path.join( os.path.split(data[0][subj])[0] + '/' + os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_dwi_denoised_eddy.nii.gz') self.eddy.run() print('Running Bias Correction for subject', subj) self.bias_correct = mrt.DWIBiasCorrect() self.bias_correct.inputs.use_ants = True self.bias_correct.inputs.in_file = os.path.join( os.path.split(data[0][subj])[0] + '/' + os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_dwi_denoised_eddy.nii.gz') self.bias_correct.inputs.grad_fsl = (os.path.join( os.path.split(data[0][subj])[0] + '/' + os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_dwi_denoised_eddy.eddy_rotated_bvecs.bvec'), data[2][subj]) self.bias_correct.inputs.bias = os.path.join( os.path.split(data[0][subj])[0] + '/' + os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_bias.mif') self.bias_correct.inputs.out_file = os.path.join( os.path.split(data[0][subj])[0] + '/' + os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_dwi_denoised_eddy_unbiased.mif') self.bias_correct.run() print('Calculating Response function for subject', subj) self.resp = mrt.ResponseSD() self.resp.inputs.in_file = os.path.join( os.path.split(data[0][subj])[0] + '/' + os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_dwi_denoised_eddy_unbiased.mif') self.resp.inputs.algorithm = ResponseSD_algorithm self.resp.inputs.grad_fsl = (os.path.join( os.path.split(data[0][subj])[0] + '/' + os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_dwi_denoised_eddy.eddy_rotated_bvecs.bvec'), data[2][subj]) self.resp.inputs.wm_file = os.path.join( os.path.split(data[0][subj])[0] + '/' + os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_response.txt') self.resp.run() print('Estimating FOD for subject', subj) self.fod = mrt.EstimateFOD() self.fod.inputs.algorithm = fod_algorithm self.fod.inputs.in_file = os.path.join( os.path.split(data[0][subj])[0] + '/' + os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_dwi_denoised_eddy_unbiased.mif') self.fod.inputs.wm_txt = os.path.join( os.path.split(data[0][subj])[0] + '/' + os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_response.txt') self.fod.inputs.mask_file = os.path.join( os.path.split(data[0][subj])[0] + '/' + os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_denoised_brain_mask.nii.gz') self.fod.inputs.grad_fsl = os.path.join( os.path.split(data[0][subj])[0] + '/' + os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_response.txt') self.fod.run() print('Extracting whole brain tract for subject', subj) self.tk = mrt.Tractography() self.tk.inputs.in_file = os.path.join( os.path.split(data[0][subj])[0] + '/' + os.path.split(data[0][0])[1].split(".nii.gz")[0] + 'fods.mif') self.tk.inputs.roi_mask = os.path.join( os.path.split(data[0][subj])[0] + '/' + os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_denoised_brain_mask.nii.gz') self.tk.inputs.algorithm = tract_algorithm self.tk.inputs.seed_image = os.path.join( os.path.split(data[0][subj])[0] + '/' + os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_denoised_brain_mask.nii.gz') self.tk.inputs.select = streamlines_number self.tk.inputs.out_file = os.path.join( os.path.split(data[0][subj])[0] + '/' + os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_whole_brain_' + streamlines_number + '.tck') self.tk.run() print('Extracting connectome for subject', subj) self.mat = mrt.BuildConnectome() self.mat.inputs.in_file = os.path.join( os.path.split(data[0][subj])[0] + '/' + os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_whole_brain_' + streamlines_number + '.tck') self.mat.inputs.in_parc = atlas2use self.mat.inputs.out_file = os.path.join( os.path.split(data[0][subj])[0] + '/' + os.path.split(data[0][0])[1].split(".nii.gz")[0] + '_connectome.csv') self.mat.run()
import os from os.path import abspath from datetime import datetime from IPython.display import Image import pydot from nipype import Workflow, Node, MapNode, Function, config from nipype.interfaces.fsl import TOPUP, ApplyTOPUP, BET, ExtractROI, Eddy, FLIRT, FUGUE from nipype.interfaces.fsl.maths import MathsCommand import nipype.interfaces.utility as util import nipype.interfaces.mrtrix3 as mrt #Requirements for the workflow to run smoothly: All files as in NIfTI-format and named according to the following standard: #Images are from the tonotopy DKI sequences on the 7T Philips Achieva scanner in Lund. It should work with any DKI sequence and possibly also a standard DTI but the setting for B0-corrections, epi-distortion corrections and eddy current corrections will be wrong. #DKI file has a base name shared with bvec and bval in FSL format. E.g. "DKI.nii.gz" "DKI.bvec" and "DKI.bval". #There is one b0-volume with reversed (P->A) phase encoding called DKIbase+_revenc. E.g. "DKI_revenc.nii.gz". #Philips B0-map magnitude and phase offset (in Hz) images. #One input file for topup describing the images as specified by topup. #Set nbrOfThreads to number of available CPU threads to run the analyses. ### Need to make better revenc for the 15 version if we choose to use it (i.e. same TE and TR) #Set to relevant directory/parameters datadir=os.path.abspath("/Users/ling-men/Documents/MRData/testDKI") rawDKI_base='DKI_15' B0map_base = 'B0map' nbrOfThreads=6 print_graph = True acqparam_file = os.path.join(datadir,'acqparams.txt') index_file = os.path.join(datadir,'index.txt') #### #config.enable_debug_mode() DKI_nii=os.path.join(datadir, rawDKI_base+'.nii.gz') DKI_bval=os.path.join(datadir, rawDKI_base+'.bval')
dwi_file = os.path.abspath('/om/user/ksitek/exvivo/mrtrix/dwi.mif') dti_file = os.path.abspath('/om/user/ksitek/exvivo/mrtrix/dti/dti.mif') mask_file = os.path.join('/om/user/ksitek/exvivo/data/', 'Reg_S64550_nii_b0-slice_mask.nii.gz') ''' tsr = mrt.FitTensor() tsr.inputs.in_file = os.path.join('/om/user/ksitek/exvivo/mrtrix/','dwi.mif') tsr.inputs.in_mask = os.path.join('/om/user/ksitek/exvivo/data/','Reg_S64550_nii_b0-slice_mask.nii.gz') tsr.inputs.grad_fsl = (bvecs, bvals) tsr.inputs.nthreads = n_threads tsr.inputs.out_file = dti_file tsr.cmdline tsr.run() ''' tk = mrt.Tractography() tk.inputs.in_file = dwi_file tk.inputs.algorithm = 'Tensor_Det' tk.inputs.seed_image = mask_file tk.inputs.grad_fsl = (bvecs, bvals) tk.inputs.n_tracks = 1000000 tk.inputs.out_file = os.path.abspath( '/om/user/ksitek/exvivo/mrtrix/dti/tensor_det_1m.tck') tk.inputs.nthreads = n_threads tk.run() ''' import nipype.interfaces.mrtrix tck2trk = mrtrix.MRTrix2TrackVis() tck2trk.inputs.out_filename = os.path.abspath('/om/user/ksitek/exvivo/mrtrix/dti/mrtrix_detDTI.trk') tck2trk.run() '''