def suffix(): #pylint: disable=unused-variable import os from mrtrix3 import app global _suffix if _suffix: return _suffix fsl_output_type = os.environ.get('FSLOUTPUTTYPE', '') if fsl_output_type == 'NIFTI': app.debug('NIFTI -> .nii') _suffix = '.nii' elif fsl_output_type == 'NIFTI_GZ': app.debug('NIFTI_GZ -> .nii.gz') _suffix = '.nii.gz' elif fsl_output_type == 'NIFTI_PAIR': app.debug('NIFTI_PAIR -> .img') _suffix = '.img' elif fsl_output_type == 'NIFTI_PAIR_GZ': app.error('MRtrix3 does not support compressed NIFTI pairs; please change FSLOUTPUTTYPE environment variable') elif fsl_output_type: app.warn('Unrecognised value for environment variable FSLOUTPUTTYPE (\"' + fsl_output_type + '\"): Expecting compressed NIfTIs, but FSL commands may fail') _suffix = '.nii.gz' else: app.warn('Environment variable FSLOUTPUTTYPE not set; FSL commands may fail, or script may fail to locate FSL command outputs') _suffix = '.nii.gz' return _suffix
def checkGIFinput(image_path): from mrtrix3 import app, image dim = image.Header(image_path).size() if len(dim) < 4: app.error('Image \'' + image_path + '\' does not look like GIF segmentation (less than 4 spatial dimensions)') if min(dim[:4]) == 1: app.error('Image \'' + image_path + '\' does not look like GIF segmentation (axis with size 1)')
def getInputs(): #pylint: disable=unused-variable from mrtrix3 import app, image, path, run image.check3DNonunity(path.fromUser(app.args.input, False)) run.command('mrconvert ' + path.fromUser(app.args.input, True) + ' ' + path.toTemp('input.mif', True)) if app.args.mask: run.command('mrconvert ' + path.fromUser(app.args.mask, True) + ' ' + path.toTemp('mask.mif', True) + ' -datatype bit -strides -1,+2,+3') if app.args.t2: if not image.match(app.args.input, app.args.t2): app.error('Provided T2 image does not match input T1 image') run.command('mrconvert ' + path.fromUser(app.args.t2, True) + ' ' + path.toTemp('T2.nii', True) + ' -strides -1,+2,+3')
def exeName(name): #pylint: disable=unused-variable from mrtrix3 import app from distutils.spawn import find_executable if find_executable('fsl5.0-' + name): output = 'fsl5.0-' + name elif find_executable(name): output = name else: app.error('Could not find FSL program \"' + name + '\"; please verify FSL install') app.debug(output) return output
def checkFirst(prefix, structures): #pylint: disable=unused-variable import os from mrtrix3 import app, file, path # pylint: disable=redefined-builtin vtk_files = [ prefix + '-' + struct + '_first.vtk' for struct in structures ] existing_file_count = sum([ os.path.exists(filename) for filename in vtk_files ]) if existing_file_count != len(vtk_files): if 'SGE_ROOT' in os.environ: app.console('FSL FIRST job has been submitted to SGE; awaiting completion') app.console('(note however that FIRST may fail silently, and hence this script may hang indefinitely)') file.waitFor(vtk_files) else: app.error('FSL FIRST has failed; only ' + str(existing_file_count) + ' of ' + str(len(vtk_files)) + ' structures were segmented successfully (check ' + path.toTemp('first.logs', False) + ')')
def findImage(name): import os from mrtrix3 import app basename = name.split('.')[0] if os.path.isfile(basename + suffix()): app.debug('Image at expected location: \"' + basename + suffix() + '\"') return basename + suffix() for suf in ['.nii', '.nii.gz', '.img']: if os.path.isfile(basename + suf): app.debug('Expected image at \"' + basename + suffix() + '\", but found at \"' + basename + suf + '\"') return basename + suf app.error('Unable to find FSL output file for path \"' + name + '\"')
def findImage(name): #pylint: disable=unused-variable import os from mrtrix3 import app prefix = os.path.join(os.path.dirname(name), os.path.basename(name).split('.')[0]) if os.path.isfile(prefix + suffix()): app.debug('Image at expected location: \"' + prefix + suffix() + '\"') return prefix + suffix() for suf in ['.nii', '.nii.gz', '.img']: if os.path.isfile(prefix + suf): app.debug('Expected image at \"' + prefix + suffix() + '\", but found at \"' + prefix + suf + '\"') return prefix + suf app.error('Unable to find FSL output file for path \"' + name + '\"') return ''
def statistic(image_path, stat, options=''): #pylint: disable=unused-variable import shlex, subprocess from mrtrix3 import app, run command = [ run.exeName(run.versionMatch('mrstats')), image_path, '-output', stat ] if options: command.extend(shlex.split(options)) if app.verbosity > 1: app.console('Command: \'' + ' '.join(command) + '\' (piping data to local storage)') proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=None) result, dummy_err = proc.communicate() result = result.rstrip().decode('utf-8') if app.verbosity > 1: app.console('Result: ' + result) if proc.returncode: app.error('Error trying to calculate statistic \'' + stat + '\' from image \'' + image_path + '\'') return result
def _shebang(item): import os from mrtrix3 import app from distutils.spawn import find_executable # If a complete path has been provided rather than just a file name, don't perform any additional file search if os.sep in item: path = item else: path = versionMatch(item) if path == item: path = find_executable(exeName(item)) if not path: app.debug('File \"' + item + '\": Could not find file to query') return [] # Read the first 1024 bytes of the file with open(path, 'rb') as f: data = f.read(1024) # Try to find the shebang line for line in data.splitlines(): # Are there any non-text characters? If so, it's a binary file, so no need to looking for a shebang try : line = str(line.decode('utf-8')) except: app.debug('File \"' + item + '\": Not a text file') return [] line = line.strip() if len(line) > 2 and line[0:2] == '#!': # Need to strip first in case there's a gap between the shebang symbol and the interpreter path shebang = line[2:].strip().split(' ') if app.isWindows(): # On Windows, /usr/bin/env can't be easily found, and any direct interpreter path will have a similar issue. # Instead, manually find the right interpreter to call using distutils if os.path.basename(shebang[0]) == 'env': new_shebang = [ os.path.abspath(find_executable(exeName(shebang[1]))) ] new_shebang.extend(shebang[2:]) shebang = new_shebang else: new_shebang = [ os.path.abspath(find_executable(exeName(os.path.basename(shebang[0])))) ] new_shebang.extend(shebang[1:]) shebang = new_shebang if not shebang or not shebang[0]: app.error('Malformed shebang in file \"' + item + '\": \"' + line + '\"') app.debug('File \"' + item + '\": string \"' + line + '\": ' + str(shebang)) return shebang app.debug('File \"' + item + '\": No shebang found') return []
def match(image_one, image_two, max_dim=0, check_transform=True): #pylint: disable=unused-variable, too-many-return-statements import math from mrtrix3 import app if not isinstance(image_one, Header): if not isinstance(image_one, str): app.error('Error trying to test \'' + str(image_one) + '\': Not an image header or file path') image_one = Header(image_one) if not isinstance(image_two, Header): if not isinstance(image_two, str): app.error('Error trying to test \'' + str(image_two) + '\': Not an image header or file path') image_two = Header(image_two) debug_prefix = '\'' + image_one.name() + '\' \'' + image_two.name() + '\'' # Handle possibility of only checking up to a certain axis if max_dim: if max_dim > min(len(image_one.size()), len(image_two.size())): app.debug(debug_prefix + ' dimensionality less than specified maximum (' + str(max_dim) + ')') return False else: if len(image_one.size()) != len(image_two.size()): app.debug(debug_prefix + ' dimensionality mismatch (' + str(len(image_one.size())) + ' vs. ' + str(len(image_two.size())) + ')') return False max_dim = len(image_one.size()) # Image dimensions if not image_one.size()[:max_dim] == image_two.size()[:max_dim]: app.debug(debug_prefix + ' axis size mismatch (' + str(image_one.size()) + ' ' + str(image_two.size()) + ')') return False # Voxel size for one, two in zip(image_one.spacing()[:max_dim], image_two.spacing()[:max_dim]): if one and two and not math.isnan(one) and not math.isnan(two): if (abs(two-one) / (0.5*(one+two))) > 1e-04: app.debug(debug_prefix + ' voxel size mismatch (' + str(image_one.spacing()) + ' ' + str(image_two.spacing()) + ')') return False # Image transform if check_transform: for line_one, line_two in zip(image_one.transform(), image_two.transform()): for one, two in zip(line_one[:3], line_two[:3]): if abs(one-two) > 1e-4: app.debug(debug_prefix + ' transform (rotation) mismatch (' + str(image_one.transform()) + ' ' + str(image_two.transform()) + ')') return False if abs(line_one[3]-line_two[3]) > 1e-2: app.debug(debug_prefix + ' transform (translation) mismatch (' + str(image_one.transform()) + ' ' + str(image_two.transform()) + ')') return False # Everything matches! app.debug(debug_prefix + ' image match') return True
def getScheme(arg): #pylint: disable=unused-variable from mrtrix3 import app, image if not isinstance(arg, image.Header): if not isinstance(arg, str): app.error('Error trying to derive phase-encoding scheme from \'' + str(arg) + '\': Not an image header or file path') arg = image.Header(arg) if 'pe_scheme' in arg.keyval(): app.debug(str(arg.keyval()['pe_scheme'])) return arg.keyval()['pe_scheme'] if 'PhaseEncodingDirection' not in arg.keyval(): return None line = direction(arg.keyval()['PhaseEncodingDirection']) if 'TotalReadoutTime' in arg.keyval(): line = [ float(value) for value in line ] line.append(float(arg.keyval()['TotalReadoutTime'])) num_volumes = 1 if len(arg.size()) < 4 else arg.size()[3] app.debug(str(line) + ' x ' + str(num_volumes) + ' rows') return [ line ] * num_volumes
def axis2dir(string): #pylint: disable=unused-variable from mrtrix3 import app if string == 'i': direction = [1,0,0] elif string == 'i-': direction = [-1,0,0] elif string == 'j': direction = [0,1,0] elif string == 'j-': direction = [0,-1,0] elif string == 'k': direction = [0,0,1] elif string == 'k-': direction = [0,0,-1] else: app.error('Unrecognized NIfTI axis & direction specifier: ' + string) app.debug(string + ' -> ' + str(direction)) return direction
def __init__(self, image_path): import json, os, subprocess from mrtrix3 import app, path, run filename = path.newTemporary('json') command = [ run.exeName(run.versionMatch('mrinfo')), image_path, '-json_all', filename ] if app.verbosity > 1: app.console('Loading header for image file \'' + image_path + '\'') app.debug(str(command)) result = subprocess.call(command, stdout=None, stderr=None) if result: app.error('Could not access header information for image \'' + image_path + '\'') try: with open(filename, 'r') as f: data = json.load(f) except UnicodeDecodeError: with open(filename, 'r') as f: data = json.loads(f.read().decode('utf-8', errors='replace')) os.remove(filename) try: #self.__dict__.update(data) # Load the individual header elements manually, for a couple of reasons: # - So that pylint knows that they'll be there # - Write to private members, and give read-only access self._name = data['name'] self._size = data['size'] self._spacing = data['spacing'] self._strides = data['strides'] self._format = data['format'] self._datatype = data['datatype'] self._intensity_offset = data['intensity_offset'] self._intensity_scale = data['intensity_scale'] self._transform = data['transform'] if not 'keyval' in data or not data['keyval']: self._keyval = {} else: self._keyval = data['keyval'] except: app.error('Error in reading header information from file \'' + image_path + '\'') app.debug(str(vars(self)))
def match(image_one, image_two, max_dim=0): #pylint: disable=unused-variable, too-many-return-statements import math from mrtrix3 import app if not isinstance(image_one, Header): if not isinstance(image_one, str): app.error('Error trying to test \'' + str(image_one) + '\': Not an image header or file path') image_one = Header(image_one) if not isinstance(image_two, Header): if not isinstance(image_two, str): app.error('Error trying to test \'' + str(image_two) + '\': Not an image header or file path') image_two = Header(image_two) debug_prefix = '\'' + image_one.name() + '\' \'' + image_two.name() + '\'' # Handle possibility of only checking up to a certain axis if max_dim: if max_dim > min(len(image_one.size()), len(image_two.size())): app.debug(debug_prefix + ' dimensionality less than specified maximum (' + str(max_dim) + ')') return False else: if len(image_one.size()) != len(image_two.size()): app.debug(debug_prefix + ' dimensionality mismatch (' + str(len(image_one.size())) + ' vs. ' + str(len(image_two.size())) + ')') return False max_dim = len(image_one.size()) # Image dimensions if not image_one.size()[:max_dim] == image_two.size()[:max_dim]: app.debug(debug_prefix + ' axis size mismatch (' + str(image_one.size()) + ' ' + str(image_two.size()) + ')') return False # Voxel size for one, two in zip(image_one.spacing()[:max_dim], image_two.spacing()[:max_dim]): if one and two and not math.isnan(one) and not math.isnan(two): if (abs(two-one) / (0.5*(one+two))) > 1e-04: app.debug(debug_prefix + ' voxel size mismatch (' + str(image_one.spacing()) + ' ' + str(image_two.spacing()) + ')') return False # Image transform for line_one, line_two in zip(image_one.transform(), image_two.transform()): for one, two in zip(line_one[:3], line_two[:3]): if abs(one-two) > 1e-4: app.debug(debug_prefix + ' transform (rotation) mismatch (' + str(image_one.transform()) + ' ' + str(image_two.transform()) + ')') return False if abs(line_one[3]-line_two[3]) > 1e-2: app.debug(debug_prefix + ' transform (translation) mismatch (' + str(image_one.transform()) + ' ' + str(image_two.transform()) + ')') return False # Everything matches! app.debug(debug_prefix + ' image match') return True
def direction(string): #pylint: disable=unused-variable from mrtrix3 import app pe_dir = '' try: PE_axis = abs(int(string)) if PE_axis > 2: app.error('When specified as a number, phase encode axis must be either 0, 1 or 2 (positive or negative)') reverse = (string.contains('-')) # Allow -0 pe_dir = [0,0,0] if reverse: pe_dir[PE_axis] = -1 else: pe_dir[PE_axis] = 1 except: string = string.lower() if string == 'lr': pe_dir = [1,0,0] elif string == 'rl': pe_dir = [-1,0,0] elif string == 'pa': pe_dir = [0,1,0] elif string == 'ap': pe_dir = [0,-1,0] elif string == 'is': pe_dir = [0,0,1] elif string == 'si': pe_dir = [0,0,-1] elif string == 'i': pe_dir = [1,0,0] elif string == 'i-': pe_dir = [-1,0,0] elif string == 'j': pe_dir = [0,1,0] elif string == 'j-': pe_dir = [0,-1,0] elif string == 'k': pe_dir = [0,0,1] elif string == 'k-': pe_dir = [0,0,-1] else: app.error('Unrecognized phase encode direction specifier: ' + string) app.debug(string + ' -> ' + str(pe_dir)) return pe_dir
def getScheme(arg): #pylint: disable=unused-variable from mrtrix3 import app, image if not isinstance(arg, image.Header): if not isinstance(arg, str): app.error('Error trying to derive phase-encoding scheme from \'' + str(arg) + '\': Not an image header or file path') arg = image.Header(arg) if 'pe_scheme' in arg.keyval(): app.debug(str(arg.keyval()['pe_scheme'])) return arg.keyval()['pe_scheme'] if 'PhaseEncodingDirection' not in arg.keyval(): return None line = direction(arg.keyval()['PhaseEncodingDirection']) if 'TotalReadoutTime' in arg.keyval(): line = [float(value) for value in line] line.append(float(arg.keyval()['TotalReadoutTime'])) num_volumes = 1 if len(arg.size()) < 4 else arg.size()[3] app.debug(str(line) + ' x ' + str(num_volumes) + ' rows') return [line] * num_volumes
def dir(string): from mrtrix3 import app pe_dir = '' try: PE_axis = abs(int(string)) if PE_axis > 2: app.error('When specified as a number, phase encode axis must be either 0, 1 or 2 (positive or negative)') reverse = (string.contains('-')) # Allow -0 pe_dir = [0,0,0] if reverse: pe_dir[PE_axis] = -1 else: pe_dir[PE_axis] = 1 except: string = string.lower() if string == 'lr': pe_dir = [1,0,0] elif string == 'rl': pe_dir = [-1,0,0] elif string == 'pa': pe_dir = [0,1,0] elif string == 'ap': pe_dir = [0,-1,0] elif string == 'is': pe_dir = [0,0,1] elif string == 'si': pe_dir = [0,0,-1] elif string == 'i': pe_dir = [1,0,0] elif string == 'i-': pe_dir = [-1,0,0] elif string == 'j': pe_dir = [0,1,0] elif string == 'j-': pe_dir = [0,-1,0] elif string == 'k': pe_dir = [0,0,1] elif string == 'k-': pe_dir = [0,0,-1] else: app.error('Unrecognized phase encode direction specifier: ' + string) app.debug(string + ' -> ' + str(pe_dir)) return pe_dir
def checkFirst(prefix, structures): #pylint: disable=unused-variable import os from mrtrix3 import app, file, path # pylint: disable=redefined-builtin vtk_files = [prefix + '-' + struct + '_first.vtk' for struct in structures] existing_file_count = sum( [os.path.exists(filename) for filename in vtk_files]) if existing_file_count != len(vtk_files): if 'SGE_ROOT' in os.environ: app.console( 'FSL FIRST job has been submitted to SGE; awaiting completion') app.console( '(note however that FIRST may fail silently, and hence this script may hang indefinitely)' ) file.waitFor(vtk_files) else: app.error('FSL FIRST has failed; only ' + str(existing_file_count) + ' of ' + str(len(vtk_files)) + ' structures were segmented successfully (check ' + path.toTemp('first.logs', False) + ')')
def eddyBinary(cuda): import os from mrtrix3 import app from distutils.spawn import find_executable if cuda: if find_executable('eddy_cuda'): app.debug('Selecting CUDA version of eddy') return 'eddy_cuda' else: app.warn('CUDA version of eddy not found; running standard version') if find_executable('eddy_openmp'): path = 'eddy_openmp' elif find_executable('eddy'): path = 'eddy' elif find_executable('fsl5.0-eddy'): path = 'fsl5.0-eddy' else: app.error('Could not find FSL program eddy; please verify FSL install') app.debug(path) return path
def statistic(image_path, stat, options=''): #pylint: disable=unused-variable import shlex, subprocess from mrtrix3 import app, run command = [ run.exeName(run.versionMatch('mrstats')), image_path, '-output', stat ] if options: command.extend(shlex.split(options)) if app.verbosity > 1: app.console('Command: \'' + ' '.join(command) + '\' (piping data to local storage)') proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=None) result, dummy_err = proc.communicate() result = result.rstrip().decode('utf-8') if app.verbosity > 1: app.console('Result: ' + result) if proc.returncode: app.error('Error trying to calculate statistic \'' + statistic + '\' from image \'' + image_path + '\'') return result
def versionMatch(item): import os from distutils.spawn import find_executable from mrtrix3 import app global _mrtrix_bin_path, _mrtrix_exe_list if not item in _mrtrix_exe_list: app.debug('Command ' + item + ' not found in MRtrix3 bin/ directory') return item exe_path_manual = os.path.join(_mrtrix_bin_path, exeName(item)) if os.path.isfile(exe_path_manual): app.debug('Version-matched executable for ' + item + ': ' + exe_path_manual) return exe_path_manual exe_path_sys = find_executable(exeName(item)) if exe_path_sys and os.path.isfile(exe_path_sys): app.debug('Using non-version-matched executable for ' + item + ': ' + exe_path_sys) return exe_path_sys app.error('Unable to find executable for MRtrix3 command ' + item)
def suffix(): import os from mrtrix3 import app fsl_output_type = os.environ.get('FSLOUTPUTTYPE', '') if fsl_output_type == 'NIFTI': app.debug('NIFTI -> .nii') return '.nii' if fsl_output_type == 'NIFTI_GZ': app.debug('NIFTI_GZ -> .nii.gz') return '.nii.gz' if fsl_output_type == 'NIFTI_PAIR': app.debug('NIFTI_PAIR -> .img') return '.img' if fsl_output_type == 'NIFTI_PAIR_GZ': app.error( 'MRtrix3 does not support compressed NIFTI pairs; please change FSLOUTPUTTYPE environment variable' ) app.warn( 'Environment variable FSLOUTPUTTYPE not set; FSL commands may fail, or script may fail to locate FSL command outputs' ) return '.nii.gz'
def versionMatch(item): from distutils.spawn import find_executable from mrtrix3 import app global _mrtrix_bin_path, _mrtrix_exe_list if not item in _mrtrix_exe_list: app.debug('Command ' + item + ' not found in MRtrix3 bin/ directory') return item exe_path_manual = os.path.join(_mrtrix_bin_path, exeName(item)) if os.path.isfile(exe_path_manual): app.debug('Version-matched executable for ' + item + ': ' + exe_path_manual) return exe_path_manual exe_path_sys = find_executable(exeName(item)) if exe_path_sys and os.path.isfile(exe_path_sys): app.debug('Using non-version-matched executable for ' + item + ': ' + exe_path_sys) return exe_path_sys app.error('Unable to find executable for MRtrix3 command ' + item) return ''
def execute(): #pylint: disable=unused-variable import os, shutil from mrtrix3 import app, image, path, run shells = [ int(round(float(x))) for x in image.mrinfo('dwi.mif', 'shell_bvalues').split() ] # Get lmax information (if provided) lmax = [ ] if app.args.lmax: lmax = [ int(x.strip()) for x in app.args.lmax.split(',') ] if not len(lmax) == len(shells): app.error('Number of manually-defined lmax\'s (' + str(len(lmax)) + ') does not match number of b-value shells (' + str(len(shells)) + ')') for l in lmax: if l%2: app.error('Values for lmax must be even') if l<0: app.error('Values for lmax must be non-negative') # Do we have directions, or do we need to calculate them? if not os.path.exists('dirs.mif'): run.command('dwi2tensor dwi.mif - -mask in_voxels.mif | tensor2metric - -vector dirs.mif') # Get response function bvalues_option = ' -shells ' + ','.join(map(str,shells)) lmax_option = '' if lmax: lmax_option = ' -lmax ' + ','.join(map(str,lmax)) run.command('amp2response dwi.mif in_voxels.mif dirs.mif response.txt' + bvalues_option + lmax_option) run.function(shutil.copyfile, 'response.txt', path.fromUser(app.args.output, False)) run.function(shutil.copyfile, 'in_voxels.mif', 'voxels.mif')
def __init__(self, image_path): import json, os, subprocess from mrtrix3 import app, path, run filename = path.newTemporary('json') command = [ run.exeName(run.versionMatch('mrinfo')), image_path, '-json_all', filename ] if app.verbosity > 1: app.console('Loading header for image file \'' + image_path + '\'') app.debug(str(command)) result = subprocess.call(command, stdout=None, stderr=None) if result: app.error('Could not access header information for image \'' + image_path + '\'') try: with open(filename, 'r') as f: data = json.load(f) except UnicodeDecodeError: with open(filename, 'r') as f: data = json.loads(f.read().decode('utf-8', errors='replace')) os.remove(filename) try: #self.__dict__.update(data) # Load the individual header elements manually, for a couple of reasons: # - So that pylint knows that they'll be there # - Write to private members, and give read-only access self._name = data['name'] self._size = data['size'] self._spacing = data['spacing'] self._strides = data['strides'] self._format = data['format'] self._datatype = data['datatype'] self._intensity_offset = data['intensity_offset'] self._intensity_scale = data['intensity_scale'] self._transform = data['transform'] if not 'keyval' in data or not data['keyval']: self._keyval = { } else: self._keyval = data['keyval'] except: app.error('Error in reading header information from file \'' + image_path + '\'') app.debug(str(vars(self)))
def execute(): import os, shutil from mrtrix3 import app, image, path, run bvalues = [ int(round(float(x))) for x in image.headerField('dwi.mif', 'shells').split() ] if len(bvalues) < 2: app.error('Need at least 2 unique b-values (including b=0).') lmax_option = '' if app.args.lmax: lmax_option = ' -lmax ' + app.args.lmax if not app.args.mask: run.command('maskfilter mask.mif erode mask_eroded.mif -npass ' + str(app.args.erode)) mask_path = 'mask_eroded.mif' else: mask_path = 'mask.mif' run.command('dwi2tensor dwi.mif -mask ' + mask_path + ' tensor.mif') run.command('tensor2metric tensor.mif -fa fa.mif -vector vector.mif -mask ' + mask_path) if app.args.threshold: run.command('mrthreshold fa.mif voxels.mif -abs ' + str(app.args.threshold)) else: run.command('mrthreshold fa.mif voxels.mif -top ' + str(app.args.number)) run.command('dwiextract dwi.mif - -singleshell -no_bzero | amp2response - voxels.mif vector.mif response.txt' + lmax_option) run.function(shutil.copyfile, 'response.txt', path.fromUser(app.args.output, False))
def execute(): #pylint: disable=unused-variable import shutil from mrtrix3 import app, image, path, run bvalues = [ int(round(float(x))) for x in image.mrinfo('dwi.mif', 'shell_bvalues').split() ] if len(bvalues) < 2: app.error('Need at least 2 unique b-values (including b=0).') lmax_option = '' if app.args.lmax: lmax_option = ' -lmax ' + app.args.lmax if not app.args.mask: run.command('maskfilter mask.mif erode mask_eroded.mif -npass ' + str(app.args.erode)) mask_path = 'mask_eroded.mif' else: mask_path = 'mask.mif' run.command('dwi2tensor dwi.mif -mask ' + mask_path + ' tensor.mif') run.command('tensor2metric tensor.mif -fa fa.mif -vector vector.mif -mask ' + mask_path) if app.args.threshold: run.command('mrthreshold fa.mif voxels.mif -abs ' + str(app.args.threshold)) else: run.command('mrthreshold fa.mif voxels.mif -top ' + str(app.args.number)) run.command('dwiextract dwi.mif - -singleshell -no_bzero | amp2response - voxels.mif vector.mif response.txt' + lmax_option) run.function(shutil.copyfile, 'response.txt', path.fromUser(app.args.output, False))
def execute(): import os, sys from mrtrix3 import app, path, run lut_input_path = 'LUT.txt' if not os.path.exists('LUT.txt'): gif3_home = os.environ.get('GIFDB_HOME', '') if not gif3_home: app.error( 'Environment variable GIFDB_HOME is not set. Please set this variable manually, or provide script with path to file GIF3ColourLUT.txt using -lut option.' ) lut_input_path = os.path.join(gif_home, 'GIF3ColourLUT.txt') if not os.path.isfile(lut_input_path): app.error( 'Could not find GIF3 lookup table file (expected location: ' + lut_input_path + '), and none provided using -lut') if app.args.sgm_amyg_hipp: lut_output_file_name = 'GIF32ACT_sgm_amyg_hipp.txt' else: lut_output_file_name = 'GIF32ACT.txt' lut_output_path = os.path.join(path.sharedDataPath(), path.scriptSubDirName(), lut_output_file_name) if not os.path.isfile(lut_output_path): app.error( 'Could not find lookup table file for converting GIF3 parcellation output to tissues (expected location: ' + lut_output_path + ')') # Initial conversion from GIF3 parcellation to five principal tissue types run.command('labelconvert input.mif ' + lut_input_path + ' ' + lut_output_path + ' indices.mif') # Use mrcrop to reduce file size if app.args.nocrop: image = 'indices.mif' else: image = 'indices_cropped.mif' run.command( 'mrthreshold indices.mif - -abs 0.5 | mrcrop indices.mif ' + image + ' -mask -') # Convert into the 5TT format for ACT run.command('mrcalc ' + image + ' 1 -eq cgm.mif') run.command('mrcalc ' + image + ' 2 -eq sgm.mif') run.command('mrcalc ' + image + ' 3 -eq wm.mif') run.command('mrcalc ' + image + ' 4 -eq csf.mif') run.command('mrcalc ' + image + ' 5 -eq path.mif') run.command( 'mrcat cgm.mif sgm.mif wm.mif csf.mif path.mif - -axis 3 | mrconvert - result.mif -datatype float32' ) # Created by Neil Oxtoby on 2018-03-02 # Modified from freesurfer.py in MRtrix3 v3.0
def check3DNonunity(image_in): #pylint: disable=unused-variable from mrtrix3 import app if not isinstance(image_in, Header): if not isinstance(image_in, str): app.error('Error trying to test \'' + str(image_in) + '\': Not an image header or file path') image_in = Header(image_in) if len(image_in.size()) < 3: app.error('Image \'' + image_in.name() + '\' does not contain 3 spatial dimensions') if min(image_in.size()[:3]) == 1: app.error('Image \'' + image_in.name() + '\' does not contain 3D spatial information (has axis with size 1)') app.debug('Image \'' + image_in.name() + '\' is >= 3D, and does not contain a unity spatial dimension')
def execute(): import os, sys from mrtrix3 import app, path, run lut_input_path = 'LUT.txt' if not os.path.exists('LUT.txt'): freesurfer_home = os.environ.get('FREESURFER_HOME', '') if not freesurfer_home: app.error( 'Environment variable FREESURFER_HOME is not set; please run appropriate FreeSurfer configuration script, set this variable manually, or provide script with path to file FreeSurferColorLUT.txt using -lut option' ) lut_input_path = os.path.join(freesurfer_home, 'FreeSurferColorLUT.txt') if not os.path.isfile(lut_input_path): app.error( 'Could not find FreeSurfer lookup table file (expected location: ' + freesurfer_lut + '), and none provided using -lut') if app.args.sgm_amyg_hipp: lut_output_file_name = 'FreeSurfer2ACT_sgm_amyg_hipp.txt' else: lut_output_file_name = 'FreeSurfer2ACT.txt' lut_output_path = os.path.join(path.sharedDataPath(), path.scriptSubDirName(), lut_output_file_name) if not os.path.isfile(lut_output_path): app.error( 'Could not find lookup table file for converting FreeSurfer parcellation output to tissues (expected location: ' + lut_output_path + ')') # Initial conversion from FreeSurfer parcellation to five principal tissue types run.command('labelconvert input.mif ' + lut_input_path + ' ' + lut_output_path + ' indices.mif') # Use mrcrop to reduce file size if app.args.nocrop: image = 'indices.mif' else: image = 'indices_cropped.mif' run.command( 'mrthreshold indices.mif - -abs 0.5 | mrcrop indices.mif ' + image + ' -mask -') # Convert into the 5TT format for ACT run.command('mrcalc ' + image + ' 1 -eq cgm.mif') run.command('mrcalc ' + image + ' 2 -eq sgm.mif') run.command('mrcalc ' + image + ' 3 -eq wm.mif') run.command('mrcalc ' + image + ' 4 -eq csf.mif') run.command('mrcalc ' + image + ' 5 -eq path.mif') run.command( 'mrcat cgm.mif sgm.mif wm.mif csf.mif path.mif - -axis 3 | mrconvert - result.mif -datatype float32' )
def execute(): import os, shutil from mrtrix3 import app, image, path, run shells = [ int(round(float(x))) for x in image.headerField('dwi.mif', 'shells').split() ] # Get lmax information (if provided) lmax = [] if app.args.lmax: lmax = [int(x.strip()) for x in app.args.lmax.split(',')] if not len(lmax) == len(shells): app.error('Number of manually-defined lmax\'s (' + str(len(lmax)) + ') does not match number of b-value shells (' + str(len(shells)) + ')') for l in lmax: if l % 2: app.error('Values for lmax must be even') if l < 0: app.error('Values for lmax must be non-negative') # Do we have directions, or do we need to calculate them? if not os.path.exists('dirs.mif'): run.command( 'dwi2tensor dwi.mif - -mask in_voxels.mif | tensor2metric - -vector dirs.mif' ) # Get response function bvalues_option = ' -shell ' + ','.join(map(str, shells)) lmax_option = '' if lmax: lmax_option = ' -lmax ' + ','.join(map(str, lmax)) run.command('amp2response dwi.mif in_voxels.mif dirs.mif response.txt' + bvalues_option + lmax_option) run.function(shutil.copyfile, 'response.txt', path.fromUser(app.args.output, False)) run.function(shutil.copyfile, 'in_voxels.mif', 'voxels.mif')
def execute(): #pylint: disable=unused-variable import os.path #pylint: disable=unused-variable from mrtrix3 import app, path, run lut_input_path = 'LUT.txt' if not os.path.exists('LUT.txt'): freesurfer_home = os.environ.get('FREESURFER_HOME', '') if not freesurfer_home: app.error('Environment variable FREESURFER_HOME is not set; please run appropriate FreeSurfer configuration script, set this variable manually, or provide script with path to file FreeSurferColorLUT.txt using -lut option') lut_input_path = os.path.join(freesurfer_home, 'FreeSurferColorLUT.txt') if not os.path.isfile(lut_input_path): app.error('Could not find FreeSurfer lookup table file (expected location: ' + lut_input_path + '), and none provided using -lut') if app.args.sgm_amyg_hipp: lut_output_file_name = 'FreeSurfer2ACT_sgm_amyg_hipp.txt' else: lut_output_file_name = 'FreeSurfer2ACT.txt' lut_output_path = os.path.join(path.sharedDataPath(), path.scriptSubDirName(), lut_output_file_name) if not os.path.isfile(lut_output_path): app.error('Could not find lookup table file for converting FreeSurfer parcellation output to tissues (expected location: ' + lut_output_path + ')') # Initial conversion from FreeSurfer parcellation to five principal tissue types run.command('labelconvert input.mif ' + lut_input_path + ' ' + lut_output_path + ' indices.mif') # Use mrcrop to reduce file size if app.args.nocrop: image = 'indices.mif' else: image = 'indices_cropped.mif' run.command('mrthreshold indices.mif - -abs 0.5 | mrcrop indices.mif ' + image + ' -mask -') # Convert into the 5TT format for ACT run.command('mrcalc ' + image + ' 1 -eq cgm.mif') run.command('mrcalc ' + image + ' 2 -eq sgm.mif') run.command('mrcalc ' + image + ' 3 -eq wm.mif') run.command('mrcalc ' + image + ' 4 -eq csf.mif') run.command('mrcalc ' + image + ' 5 -eq path.mif') run.command('mrcat cgm.mif sgm.mif wm.mif csf.mif path.mif - -axis 3 | mrconvert - result.mif -datatype float32')
type=int, default='20000000', help='define the number of streamlines to be computed ' 'when performing tractography on the FOD template. ' '(group3 analysis level only)') options.add_argument('-num_tracks_sift', type=int, default='2000000', help='define the number of streamlines to ' 'remain after performing SIFT on the tractogram' '(group3 analysis level only)') app.parse() if app.isWindows(): app.error('Script cannot be run on Windows due to FSL dependency') subjects_to_analyze = [] # only for a subset of subjects if app.args.participant_label: subjects_to_analyze = app.args.participant_label # for all subjects else: subject_dirs = glob.glob(os.path.join(app.args.in_dir, 'sub-*')) subjects_to_analyze = [ subject_dir.split("-")[-1] for subject_dir in subject_dirs ] # create output subjects directory all_subjects_dir = os.path.join(app.args.output_dir, 'subjects') if not os.path.exists(all_subjects_dir):
def execute(): import os from distutils.spawn import find_executable from mrtrix3 import app, file, fsl, image, path, run if app.isWindows(): app.error( '\'fsl\' algorithm of 5ttgen script cannot be run on Windows: FSL not available on Windows' ) fsl_path = os.environ.get('FSLDIR', '') if not fsl_path: app.error( 'Environment variable FSLDIR is not set; please run appropriate FSL configuration script' ) ssroi_cmd = 'standard_space_roi' if not find_executable(ssroi_cmd): ssroi_cmd = 'fsl5.0-standard_space_roi' if not find_executable(ssroi_cmd): app.error( 'Could not find FSL program standard_space_roi; please verify FSL install' ) bet_cmd = 'bet' if not find_executable(bet_cmd): bet_cmd = 'fsl5.0-bet' if not find_executable(bet_cmd): app.error( 'Could not find FSL program bet; please verify FSL install') fast_cmd = 'fast' if not find_executable(fast_cmd): fast_cmd = 'fsl5.0-fast' if not find_executable(fast_cmd): app.error( 'Could not find FSL program fast; please verify FSL install') first_cmd = 'run_first_all' if not find_executable(first_cmd): first_cmd = "fsl5.0-run_first_all" if not find_executable(first_cmd): app.error( 'Could not find FSL program run_first_all; please verify FSL install' ) first_atlas_path = os.path.join(fsl_path, 'data', 'first', 'models_336_bin') if not os.path.isdir(first_atlas_path): app.error( 'Atlases required for FSL\'s FIRST program not installed; please install fsl-first-data using your relevant package manager' ) fsl_suffix = fsl.suffix() sgm_structures = [ 'L_Accu', 'R_Accu', 'L_Caud', 'R_Caud', 'L_Pall', 'R_Pall', 'L_Puta', 'R_Puta', 'L_Thal', 'R_Thal' ] if app.args.sgm_amyg_hipp: sgm_structures.extend(['L_Amyg', 'R_Amyg', 'L_Hipp', 'R_Hipp']) run.command('mrconvert input.mif T1.nii -stride -1,+2,+3') fast_t1_input = 'T1.nii' fast_t2_input = '' # Decide whether or not we're going to do any brain masking if os.path.exists('mask.mif'): fast_t1_input = 'T1_masked' + fsl_suffix # Check to see if the mask matches the T1 image if image.match('T1.nii', 'mask.mif'): run.command('mrcalc T1.nii mask.mif -mult ' + fast_t1_input) mask_path = 'mask.mif' else: app.warn('Mask image does not match input image - re-gridding') run.command( 'mrtransform mask.mif mask_regrid.mif -template T1.nii') run.command('mrcalc T1.nii mask_regrid.mif ' + fast_t1_input) mask_path = 'mask_regrid.mif' if os.path.exists('T2.nii'): fast_t2_input = 'T2_masked' + fsl_suffix run.command('mrcalc T2.nii ' + mask_path + ' -mult ' + fast_t2_input) elif app.args.premasked: fast_t1_input = 'T1.nii' if os.path.exists('T2.nii'): fast_t2_input = 'T2.nii' else: # Use FSL command standard_space_roi to do an initial masking of the image before BET # Also reduce the FoV of the image # Using MNI 1mm dilated brain mask rather than the -b option in standard_space_roi (which uses the 2mm mask); the latter looks 'buggy' to me... Unfortunately even with the 1mm 'dilated' mask, it can still cut into some brain areas, hence the explicit dilation mni_mask_path = os.path.join(fsl_path, 'data', 'standard', 'MNI152_T1_1mm_brain_mask_dil.nii.gz') mni_mask_dilation = 0 if os.path.exists(mni_mask_path): mni_mask_dilation = 4 else: mni_mask_path = os.path.join( fsl_path, 'data', 'standard', 'MNI152_T1_2mm_brain_mask_dil.nii.gz') if os.path.exists(mni_mask_path): mni_mask_dilation = 2 if mni_mask_dilation: run.command('maskfilter ' + mni_mask_path + ' dilate mni_mask.nii -npass ' + str(mni_mask_dilation)) if app.args.nocrop: ssroi_roi_option = ' -roiNONE' else: ssroi_roi_option = ' -roiFOV' run.command( ssroi_cmd + ' T1.nii T1_preBET' + fsl_suffix + ' -maskMASK mni_mask.nii' + ssroi_roi_option, False) else: run.command(ssroi_cmd + ' T1.nii T1_preBET' + fsl_suffix + ' -b', False) pre_bet_image = fsl.findImage('T1_preBET') # BET run.command(bet_cmd + ' ' + pre_bet_image + ' T1_BET' + fsl_suffix + ' -f 0.15 -R') fast_t1_input = fsl.findImage('T1_BET' + fsl_suffix) if os.path.exists('T2.nii'): if app.args.nocrop: fast_t2_input = 'T2.nii' else: # Just a reduction of FoV, no sub-voxel interpolation going on run.command('mrtransform T2.nii T2_cropped.nii -template ' + fast_t1_input + ' -interp nearest') fast_t2_input = 'T2_cropped.nii' # Finish branching based on brain masking # FAST if fast_t2_input: run.command(fast_cmd + ' -S 2 ' + fast_t2_input + ' ' + fast_t1_input) else: run.command(fast_cmd + ' ' + fast_t1_input) # FIRST first_input_is_brain_extracted = '' if app.args.premasked: first_input_is_brain_extracted = ' -b' run.command(first_cmd + ' -m none -s ' + ','.join(sgm_structures) + ' -i T1.nii -o first' + first_input_is_brain_extracted) fsl.checkFirst('first', sgm_structures) # Convert FIRST meshes to partial volume images pve_image_list = [] for struct in sgm_structures: pve_image_path = 'mesh2pve_' + struct + '.mif' vtk_in_path = 'first-' + struct + '_first.vtk' vtk_temp_path = struct + '.vtk' run.command('meshconvert ' + vtk_in_path + ' ' + vtk_temp_path + ' -transform first2real T1.nii') run.command('mesh2pve ' + vtk_temp_path + ' ' + fast_t1_input + ' ' + pve_image_path) pve_image_list.append(pve_image_path) pve_cat = ' '.join(pve_image_list) run.command('mrmath ' + pve_cat + ' sum - | mrcalc - 1.0 -min all_sgms.mif') # Combine the tissue images into the 5TT format within the script itself fast_output_prefix = fast_t1_input.split('.')[0] fast_csf_output = fsl.findImage(fast_output_prefix + '_pve_0') fast_gm_output = fsl.findImage(fast_output_prefix + '_pve_1') fast_wm_output = fsl.findImage(fast_output_prefix + '_pve_2') # Step 1: Run LCC on the WM image run.command( 'mrthreshold ' + fast_wm_output + ' - -abs 0.001 | maskfilter - connect - -connectivity | mrcalc 1 - 1 -gt -sub remove_unconnected_wm_mask.mif -datatype bit' ) # Step 2: Generate the images in the same fashion as the old 5ttgen binary used to: # - Preserve CSF as-is # - Preserve SGM, unless it results in a sum of volume fractions greater than 1, in which case clamp # - Multiply the FAST volume fractions of GM and CSF, so that the sum of CSF, SGM, CGM and WM is 1.0 run.command('mrcalc ' + fast_csf_output + ' remove_unconnected_wm_mask.mif -mult csf.mif') run.command('mrcalc 1.0 csf.mif -sub all_sgms.mif -min sgm.mif') run.command('mrcalc 1.0 csf.mif sgm.mif -add -sub ' + fast_gm_output + ' ' + fast_wm_output + ' -add -div multiplier.mif') run.command( 'mrcalc multiplier.mif -finite multiplier.mif 0.0 -if multiplier_noNAN.mif' ) run.command( 'mrcalc ' + fast_gm_output + ' multiplier_noNAN.mif -mult remove_unconnected_wm_mask.mif -mult cgm.mif' ) run.command( 'mrcalc ' + fast_wm_output + ' multiplier_noNAN.mif -mult remove_unconnected_wm_mask.mif -mult wm.mif' ) run.command('mrcalc 0 wm.mif -min path.mif') run.command( 'mrcat cgm.mif sgm.mif wm.mif csf.mif path.mif - -axis 3 | mrconvert - combined_precrop.mif -stride +2,+3,+4,+1' ) # Use mrcrop to reduce file size (improves caching of image data during tracking) if app.args.nocrop: run.command('mrconvert combined_precrop.mif result.mif') else: run.command( 'mrmath combined_precrop.mif sum - -axis 3 | mrthreshold - - -abs 0.5 | mrcrop combined_precrop.mif result.mif -mask -' )
def runGroup(output_dir): # Check presence of all required input files before proceeding # Pre-calculate paths of all files since many will be used in more than one location class subjectPaths(object): def __init__(self, label): self.in_dwi = os.path.join(output_dir, label, 'dwi', label + '_dwi.nii.gz') self.in_bvec = os.path.join(output_dir, label, 'dwi', label + '_dwi.bvec') self.in_bval = os.path.join(output_dir, label, 'dwi', label + '_dwi.bval') self.in_json = os.path.join(output_dir, label, 'dwi', label + '_dwi.json') self.in_rf = os.path.join(output_dir, label, 'dwi', label + '_response.txt') self.in_connectome = os.path.join(output_dir, label, 'connectome', label + '_connectome.csv') self.in_mu = os.path.join(output_dir, label, 'connectome', label + '_mu.txt') for entry in vars(self).values(): if not os.path.exists(entry): app.error( 'Unable to find critical subject data (expected location: ' + entry + ')') with open(self.in_mu, 'r') as f: self.mu = float(f.read()) self.RF = [] with open(self.in_rf, 'r') as f: for line in f: self.RF.append([float(v) for v in line.split()]) self.temp_mask = os.path.join('masks', label + '.mif') self.temp_fa = os.path.join('images', label + '.mif') self.temp_bzero = os.path.join('bzeros', label + '.mif') self.temp_warp = os.path.join('warps', label + '.mif') self.temp_voxels = os.path.join('voxels', label + '.mif') self.median_bzero = 0.0 self.dwiintensitynorm_factor = 1.0 self.RF_multiplier = 1.0 self.global_multiplier = 1.0 self.temp_connectome = os.path.join('connectomes', label + '.csv') self.out_scale_bzero = os.path.join( output_dir, label, 'connectome', label + '_scalefactor_bzero.csv') self.out_scale_RF = os.path.join( output_dir, label, 'connectome', label + '_scalefactor_response.csv') self.out_connectome = os.path.join( output_dir, label, 'connectome', label + '_connectome_scaled.csv') self.label = label subject_list = [ 'sub-' + sub_dir.split("-")[-1] for sub_dir in glob.glob(os.path.join(output_dir, 'sub-*')) ] if not subject_list: app.error( 'No processed subject data found in output directory for group analysis' ) subjects = [] for label in subject_list: subjects.append(subjectPaths(label)) app.makeTempDir() app.gotoTempDir() # First pass through subject data in group analysis: # - Grab DWI data (written back from single-subject analysis back into BIDS format) # - Generate mask and FA images to be used in populate template generation # - Generate mean b=0 image for each subject for later use progress = app.progressBar('Importing and preparing subject data', len(subjects)) run.function(os.makedirs, 'bzeros') run.function(os.makedirs, 'images') run.function(os.makedirs, 'masks') for s in subjects: grad_import_option = ' -fslgrad ' + s.in_bvec + ' ' + s.in_bval run.command('dwi2mask ' + s.in_dwi + ' ' + s.temp_mask + grad_import_option) run.command('dwi2tensor ' + s.in_dwi + ' - -mask ' + s.temp_mask + grad_import_option + ' | tensor2metric - -fa ' + s.temp_fa) run.command('dwiextract ' + s.in_dwi + grad_import_option + ' - -bzero | mrmath - mean ' + s.temp_bzero + ' -axis 3') progress.increment() progress.done() # First group-level calculation: Generate the population FA template app.console( 'Generating population template for inter-subject intensity normalisation WM mask derivation' ) run.command( 'population_template images -mask_dir masks -warp_dir warps template.mif ' '-type rigid_affine_nonlinear -rigid_scale 0.25,0.5,0.8,1.0 -affine_scale 0.7,0.8,1.0,1.0 ' '-nl_scale 0.5,0.75,1.0,1.0,1.0 -nl_niter 5,5,5,5,5 -linear_no_pause') file.delTemporary('images') file.delTemporary('masks') # Second pass through subject data in group analysis: # - Warp template FA image back to subject space & threshold to define a WM mask in subject space # - Calculate the median subject b=0 value within this mask # - Store this in a file, and contribute to calculation of the mean of these values across subjects # - Contribute to the group average response function progress = app.progressBar( 'Generating group-average response function and intensity normalisation factors', len(subjects) + 1) run.function(os.makedirs, 'voxels') sum_median_bzero = 0.0 sum_RF = [] for s in subjects: run.command('mrtransform template.mif -warp_full ' + s.temp_warp + ' - -from 2 -template ' + s.temp_bzero + ' | ' 'mrthreshold - ' + s.temp_voxels + ' -abs 0.4') s.median_bzero = float( image.statistic(s.temp_bzero, 'median', '-mask ' + s.temp_voxels)) file.delTemporary(s.temp_bzero) file.delTemporary(s.temp_voxels) file.delTemporary(s.temp_warp) sum_median_bzero += s.median_bzero if sum_RF: sum_RF = [[a + b for a, b in zip(one, two)] for one, two in zip(sum_RF, s.RF)] else: sum_RF = s.RF progress.increment() file.delTemporary('bzeros') file.delTemporary('voxels') file.delTemporary('warps') progress.done() # Second group-level calculation: # - Calculate the mean of median b=0 values # - Calculate the mean response function, and extract the l=0 values from it mean_median_bzero = sum_median_bzero / len(subjects) mean_RF = [[v / len(subjects) for v in line] for line in sum_RF] mean_RF_lzero = [line[0] for line in mean_RF] # Third pass through subject data in group analysis: # - Scale the connectome strengths: # - Multiply by SIFT proportionality coefficient mu # - Multiply by (mean median b=0) / (subject median b=0) # - Multiply by (subject RF size) / (mean RF size) # (needs to account for multi-shell data) # - Write the result to file progress = app.progressBar( 'Applying normalisation scaling to subject connectomes', len(subjects)) run.function(os.makedirs, 'connectomes') for s in subjects: RF_lzero = [line[0] for line in s.RF] s.RF_multiplier = 1.0 for (mean, subj) in zip(mean_RF_lzero, RF_lzero): s.RF_multiplier = s.RF_multiplier * subj / mean # Don't want to be scaling connectome independently for differences in RF l=0 terms across all shells; # use the geometric mean of the per-shell scale factors s.RF_multiplier = math.pow(s.RF_multiplier, 1.0 / len(mean_RF_lzero)) s.bzero_multiplier = mean_median_bzero / s.median_bzero s.global_multiplier = s.mu * s.bzero_multiplier * s.RF_multiplier connectome = [] with open(s.in_connectome, 'r') as f: for line in f: connectome.append([float(v) for v in line.split()]) with open(s.temp_connectome, 'w') as f: for line in connectome: f.write(' '.join([str(v * s.global_multiplier) for v in line]) + '\n') progress.increment() progress.done() # Third group-level calculation: Generate the group mean connectome # For any higher-level analysis (e.g. NBSE, computing connectome global measures, etc.), # trying to incorporate such analysis into this particular pipeline script is likely to # overly complicate the interface, and not actually provide much in terms of # convenience / reproducibility guarantees. The primary functionality of this group-level # analysis is therefore to achieve inter-subject connection density normalisation; users # then have the flexibility to subsequently analyse the data however they choose (ideally # based on subject classification data provided with the BIDS-compliant dataset). progress = app.progressBar('Calculating group mean connectome', len(subjects) + 1) mean_connectome = [] for s in subjects: connectome = [] with open(s.temp_connectome, 'r') as f: for line in f: connectome.append([float(v) for v in line.split()]) if mean_connectome: mean_connectome = [[c1 + c2 for c1, c2 in zip(r1, r2)] for r1, r2 in zip(mean_connectome, connectome)] else: mean_connectome = connectome progress.increment() mean_connectome = [[v / len(subjects) for v in row] for row in mean_connectome] progress.done() # Write results of interest back to the output directory; # both per-subject and group information progress = app.progressBar('Writing results to output directory', len(subjects) + 2) for s in subjects: run.function(shutil.copyfile, s.temp_connectome, s.out_connectome) with open(s.out_scale_bzero, 'w') as f: f.write(str(s.bzero_multiplier)) with open(s.out_scale_RF, 'w') as f: f.write(str(s.RF_multiplier)) progress.increment() with open(os.path.join(output_dir, 'mean_response.txt'), 'w') as f: for row in mean_RF: f.write(' '.join([str(v) for v in row]) + '\n') progress.increment() with open(os.path.join(output_dir, 'mean_connectome.csv'), 'w') as f: for row in mean_connectome: f.write(' '.join([str(v) for v in row]) + '\n') progress.done()
def execute(): import math, os, shutil from mrtrix3 import app, image, path, run # Ideally want to use the oversampling-based regridding of the 5TT image from the SIFT model, not mrtransform # May need to commit 5ttregrid... # Verify input 5tt image run.command('5ttcheck 5tt.mif', False) # Get shell information shells = [ int(round(float(x))) for x in image.headerField('dwi.mif', 'shells').split() ] if len(shells) < 3: app.warn( 'Less than three b-value shells; response functions will not be applicable in resolving three tissues using MSMT-CSD algorithm' ) # Get lmax information (if provided) wm_lmax = [] if app.args.lmax: wm_lmax = [int(x.strip()) for x in app.args.lmax.split(',')] if not len(wm_lmax) == len(shells): app.error('Number of manually-defined lmax\'s (' + str(len(wm_lmax)) + ') does not match number of b-value shells (' + str(len(shells)) + ')') for l in wm_lmax: if l % 2: app.error('Values for lmax must be even') if l < 0: app.error('Values for lmax must be non-negative') run.command( 'dwi2tensor dwi.mif - -mask mask.mif | tensor2metric - -fa fa.mif -vector vector.mif' ) if not os.path.exists('dirs.mif'): run.function(shutil.copy, 'vector.mif', 'dirs.mif') run.command( 'mrtransform 5tt.mif 5tt_regrid.mif -template fa.mif -interp linear') # Basic tissue masks run.command( 'mrconvert 5tt_regrid.mif - -coord 3 2 -axes 0,1,2 | mrcalc - ' + str(app.args.pvf) + ' -gt mask.mif -mult wm_mask.mif') run.command( 'mrconvert 5tt_regrid.mif - -coord 3 0 -axes 0,1,2 | mrcalc - ' + str(app.args.pvf) + ' -gt fa.mif ' + str(app.args.fa) + ' -lt -mult mask.mif -mult gm_mask.mif') run.command( 'mrconvert 5tt_regrid.mif - -coord 3 3 -axes 0,1,2 | mrcalc - ' + str(app.args.pvf) + ' -gt fa.mif ' + str(app.args.fa) + ' -lt -mult mask.mif -mult csf_mask.mif') # Revise WM mask to only include single-fibre voxels app.console( 'Calling dwi2response recursively to select WM single-fibre voxels using \'' + app.args.wm_algo + '\' algorithm') recursive_cleanup_option = '' if not app._cleanup: recursive_cleanup_option = ' -nocleanup' run.command( 'dwi2response ' + app.args.wm_algo + ' dwi.mif wm_ss_response.txt -mask wm_mask.mif -voxels wm_sf_mask.mif -tempdir ' + app._tempDir + recursive_cleanup_option) # Check for empty masks wm_voxels = int( image.statistic('wm_sf_mask.mif', 'count', 'wm_sf_mask.mif')) gm_voxels = int(image.statistic('gm_mask.mif', 'count', 'gm_mask.mif')) csf_voxels = int(image.statistic('csf_mask.mif', 'count', 'csf_mask.mif')) empty_masks = [] if not wm_voxels: empty_masks.append('WM') if not gm_voxels: empty_masks.append('GM') if not csf_voxels: empty_masks.append('CSF') if empty_masks: message = ','.join(empty_masks) message += ' tissue mask' if len(empty_masks) > 1: message += 's' message += ' empty; cannot estimate response function' if len(empty_masks) > 1: message += 's' app.error(message) # For each of the three tissues, generate a multi-shell response bvalues_option = ' -shell ' + ','.join(map(str, shells)) sfwm_lmax_option = '' if wm_lmax: sfwm_lmax_option = ' -lmax ' + ','.join(map(str, wm_lmax)) run.command('amp2response dwi.mif wm_sf_mask.mif dirs.mif wm.txt' + bvalues_option + sfwm_lmax_option) run.command('amp2response dwi.mif gm_mask.mif dirs.mif gm.txt' + bvalues_option + ' -isotropic') run.command('amp2response dwi.mif csf_mask.mif dirs.mif csf.txt' + bvalues_option + ' -isotropic') run.function(shutil.copyfile, 'wm.txt', path.fromUser(app.args.out_wm, False)) run.function(shutil.copyfile, 'gm.txt', path.fromUser(app.args.out_gm, False)) run.function(shutil.copyfile, 'csf.txt', path.fromUser(app.args.out_csf, False)) # Generate output 4D binary image with voxel selections; RGB as in MSMT-CSD paper run.command( 'mrcat csf_mask.mif gm_mask.mif wm_sf_mask.mif voxels.mif -axis 3')
def execute(): import math, os, shutil from mrtrix3 import app, image, path, run # Get b-values and number of volumes per b-value. bvalues = [ int(round(float(x))) for x in image.headerField('dwi.mif', 'shells').split() ] bvolumes = [ int(x) for x in image.headerField('dwi.mif', 'shellcounts').split() ] app.console( str(len(bvalues)) + ' unique b-value(s) detected: ' + ','.join(map(str, bvalues)) + ' with ' + ','.join(map(str, bvolumes)) + ' volumes.') if len(bvalues) < 2: app.error('Need at least 2 unique b-values (including b=0).') # Get lmax information (if provided). sfwm_lmax = [] if app.args.lmax: sfwm_lmax = [int(x.strip()) for x in app.args.lmax.split(',')] if not len(sfwm_lmax) == len(bvalues): app.error('Number of lmax\'s (' + str(len(sfwm_lmax)) + ', as supplied to the -lmax option: ' + ','.join(map(str, sfwm_lmax)) + ') does not match number of unique b-values.') for l in sfwm_lmax: if l % 2: app.error('Values supplied to the -lmax option must be even.') if l < 0: app.error( 'Values supplied to the -lmax option must be non-negative.' ) # Erode (brain) mask. if app.args.erode > 0: run.command('maskfilter mask.mif erode eroded_mask.mif -npass ' + str(app.args.erode)) else: run.command('mrconvert mask.mif eroded_mask.mif -datatype bit') # Get volumes, compute mean signal and SDM per b-value; compute overall SDM; get rid of erroneous values. totvolumes = 0 fullsdmcmd = 'mrcalc' errcmd = 'mrcalc' zeropath = 'mean_b' + str(bvalues[0]) + '.mif' for i, b in enumerate(bvalues): meanpath = 'mean_b' + str(b) + '.mif' run.command('dwiextract dwi.mif -shell ' + str(b) + ' - | mrmath - mean ' + meanpath + ' -axis 3') errpath = 'err_b' + str(b) + '.mif' run.command('mrcalc ' + meanpath + ' -finite ' + meanpath + ' 0 -if 0 -le ' + errpath + ' -datatype bit') errcmd += ' ' + errpath if i > 0: errcmd += ' -add' sdmpath = 'sdm_b' + str(b) + '.mif' run.command('mrcalc ' + zeropath + ' ' + meanpath + ' -divide -log ' + sdmpath) totvolumes += bvolumes[i] fullsdmcmd += ' ' + sdmpath + ' ' + str(bvolumes[i]) + ' -mult' if i > 1: fullsdmcmd += ' -add' fullsdmcmd += ' ' + str(totvolumes) + ' -divide full_sdm.mif' run.command(fullsdmcmd) run.command( 'mrcalc full_sdm.mif -finite full_sdm.mif 0 -if 0 -le err_sdm.mif -datatype bit' ) errcmd += ' err_sdm.mif -add 0 eroded_mask.mif -if safe_mask.mif -datatype bit' run.command(errcmd) run.command('mrcalc safe_mask.mif full_sdm.mif 0 -if 10 -min safe_sdm.mif') # Compute FA and principal eigenvectors; crude WM versus GM-CSF separation based on FA. run.command( 'dwi2tensor dwi.mif - -mask safe_mask.mif | tensor2metric - -fa safe_fa.mif -vector safe_vecs.mif -modulate none -mask safe_mask.mif' ) run.command('mrcalc safe_mask.mif safe_fa.mif 0 -if ' + str(app.args.fa) + ' -gt crude_wm.mif -datatype bit') run.command( 'mrcalc crude_wm.mif 0 safe_mask.mif -if _crudenonwm.mif -datatype bit' ) # Crude GM versus CSF separation based on SDM. crudenonwmmedian = image.statistic('safe_sdm.mif', 'median', '_crudenonwm.mif') run.command( 'mrcalc _crudenonwm.mif safe_sdm.mif ' + str(crudenonwmmedian) + ' -subtract 0 -if - | mrthreshold - - -mask _crudenonwm.mif | mrcalc _crudenonwm.mif - 0 -if crude_csf.mif -datatype bit' ) run.command( 'mrcalc crude_csf.mif 0 _crudenonwm.mif -if crude_gm.mif -datatype bit' ) # Refine WM: remove high SDM outliers. crudewmmedian = image.statistic('safe_sdm.mif', 'median', 'crude_wm.mif') run.command('mrcalc crude_wm.mif safe_sdm.mif 0 -if ' + str(crudewmmedian) + ' -gt _crudewmhigh.mif -datatype bit') run.command( 'mrcalc _crudewmhigh.mif 0 crude_wm.mif -if _crudewmlow.mif -datatype bit' ) crudewmQ1 = float( image.statistic('safe_sdm.mif', 'median', '_crudewmlow.mif')) crudewmQ3 = float( image.statistic('safe_sdm.mif', 'median', '_crudewmhigh.mif')) crudewmoutlthresh = crudewmQ3 + (crudewmQ3 - crudewmQ1) run.command('mrcalc crude_wm.mif safe_sdm.mif 0 -if ' + str(crudewmoutlthresh) + ' -gt _crudewmoutliers.mif -datatype bit') run.command( 'mrcalc _crudewmoutliers.mif 0 crude_wm.mif -if refined_wm.mif -datatype bit' ) # Refine GM: separate safer GM from partial volumed voxels. crudegmmedian = image.statistic('safe_sdm.mif', 'median', 'crude_gm.mif') run.command('mrcalc crude_gm.mif safe_sdm.mif 0 -if ' + str(crudegmmedian) + ' -gt _crudegmhigh.mif -datatype bit') run.command( 'mrcalc _crudegmhigh.mif 0 crude_gm.mif -if _crudegmlow.mif -datatype bit' ) run.command( 'mrcalc _crudegmhigh.mif safe_sdm.mif ' + str(crudegmmedian) + ' -subtract 0 -if - | mrthreshold - - -mask _crudegmhigh.mif -invert | mrcalc _crudegmhigh.mif - 0 -if _crudegmhighselect.mif -datatype bit' ) run.command( 'mrcalc _crudegmlow.mif safe_sdm.mif ' + str(crudegmmedian) + ' -subtract -neg 0 -if - | mrthreshold - - -mask _crudegmlow.mif -invert | mrcalc _crudegmlow.mif - 0 -if _crudegmlowselect.mif -datatype bit' ) run.command( 'mrcalc _crudegmhighselect.mif 1 _crudegmlowselect.mif -if refined_gm.mif -datatype bit' ) # Refine CSF: recover lost CSF from crude WM SDM outliers, separate safer CSF from partial volumed voxels. crudecsfmin = image.statistic('safe_sdm.mif', 'min', 'crude_csf.mif') run.command('mrcalc _crudewmoutliers.mif safe_sdm.mif 0 -if ' + str(crudecsfmin) + ' -gt 1 crude_csf.mif -if _crudecsfextra.mif -datatype bit') run.command( 'mrcalc _crudecsfextra.mif safe_sdm.mif ' + str(crudecsfmin) + ' -subtract 0 -if - | mrthreshold - - -mask _crudecsfextra.mif | mrcalc _crudecsfextra.mif - 0 -if refined_csf.mif -datatype bit' ) # Get final voxels for single-fibre WM response function estimation from WM using 'tournier' algorithm. refwmcount = float( image.statistic('refined_wm.mif', 'count', 'refined_wm.mif')) voxsfwmcount = int(round(refwmcount * app.args.sfwm / 100.0)) app.console('Running \'tournier\' algorithm to select ' + str(voxsfwmcount) + ' single-fibre WM voxels.') cleanopt = '' if not app._cleanup: cleanopt = ' -nocleanup' run.command('dwi2response tournier dwi.mif _respsfwmss.txt -sf_voxels ' + str(voxsfwmcount) + ' -iter_voxels ' + str(voxsfwmcount * 10) + ' -mask refined_wm.mif -voxels voxels_sfwm.mif -tempdir ' + app._tempDir + cleanopt) # Get final voxels for GM response function estimation from GM. refgmmedian = image.statistic('safe_sdm.mif', 'median', 'refined_gm.mif') run.command('mrcalc refined_gm.mif safe_sdm.mif 0 -if ' + str(refgmmedian) + ' -gt _refinedgmhigh.mif -datatype bit') run.command( 'mrcalc _refinedgmhigh.mif 0 refined_gm.mif -if _refinedgmlow.mif -datatype bit' ) refgmhighcount = float( image.statistic('_refinedgmhigh.mif', 'count', '_refinedgmhigh.mif')) refgmlowcount = float( image.statistic('_refinedgmlow.mif', 'count', '_refinedgmlow.mif')) voxgmhighcount = int(round(refgmhighcount * app.args.gm / 100.0)) voxgmlowcount = int(round(refgmlowcount * app.args.gm / 100.0)) run.command( 'mrcalc _refinedgmhigh.mif safe_sdm.mif 0 -if - | mrthreshold - - -bottom ' + str(voxgmhighcount) + ' -ignorezero | mrcalc _refinedgmhigh.mif - 0 -if _refinedgmhighselect.mif -datatype bit' ) run.command( 'mrcalc _refinedgmlow.mif safe_sdm.mif 0 -if - | mrthreshold - - -top ' + str(voxgmlowcount) + ' -ignorezero | mrcalc _refinedgmlow.mif - 0 -if _refinedgmlowselect.mif -datatype bit' ) run.command( 'mrcalc _refinedgmhighselect.mif 1 _refinedgmlowselect.mif -if voxels_gm.mif -datatype bit' ) # Get final voxels for CSF response function estimation from CSF. refcsfcount = float( image.statistic('refined_csf.mif', 'count', 'refined_csf.mif')) voxcsfcount = int(round(refcsfcount * app.args.csf / 100.0)) run.command( 'mrcalc refined_csf.mif safe_sdm.mif 0 -if - | mrthreshold - - -top ' + str(voxcsfcount) + ' -ignorezero | mrcalc refined_csf.mif - 0 -if voxels_csf.mif -datatype bit' ) # Show summary of voxels counts. textarrow = ' --> ' app.console('Summary of voxel counts:') app.console( 'Mask: ' + str(int(image.statistic('mask.mif', 'count', 'mask.mif'))) + textarrow + str(int(image.statistic('eroded_mask.mif', 'count', 'eroded_mask.mif'))) + textarrow + str(int(image.statistic('safe_mask.mif', 'count', 'safe_mask.mif')))) app.console( 'WM: ' + str(int(image.statistic('crude_wm.mif', 'count', 'crude_wm.mif'))) + textarrow + str(int(image.statistic('refined_wm.mif', 'count', 'refined_wm.mif'))) + textarrow + str(int(image.statistic('voxels_sfwm.mif', 'count', 'voxels_sfwm.mif'))) + ' (SF)') app.console( 'GM: ' + str(int(image.statistic('crude_gm.mif', 'count', 'crude_gm.mif'))) + textarrow + str(int(image.statistic('refined_gm.mif', 'count', 'refined_gm.mif'))) + textarrow + str(int(image.statistic('voxels_gm.mif', 'count', 'voxels_gm.mif')))) app.console( 'CSF: ' + str(int(image.statistic('crude_csf.mif', 'count', 'crude_csf.mif'))) + textarrow + str(int(image.statistic('refined_csf.mif', 'count', 'refined_csf.mif'))) + textarrow + str(int(image.statistic('voxels_csf.mif', 'count', 'voxels_csf.mif')))) # Generate single-fibre WM, GM and CSF responses bvalues_option = ' -shell ' + ','.join(map(str, bvalues)) sfwm_lmax_option = '' if sfwm_lmax: sfwm_lmax_option = ' -lmax ' + ','.join(map(str, sfwm_lmax)) run.command( 'amp2response dwi.mif voxels_sfwm.mif safe_vecs.mif response_sfwm.txt' + bvalues_option + sfwm_lmax_option) run.command( 'amp2response dwi.mif voxels_gm.mif safe_vecs.mif response_gm.txt' + bvalues_option + ' -isotropic') run.command( 'amp2response dwi.mif voxels_csf.mif safe_vecs.mif response_csf.txt' + bvalues_option + ' -isotropic') run.function(shutil.copyfile, 'response_sfwm.txt', path.fromUser(app.args.out_sfwm, False)) run.function(shutil.copyfile, 'response_gm.txt', path.fromUser(app.args.out_gm, False)) run.function(shutil.copyfile, 'response_csf.txt', path.fromUser(app.args.out_csf, False)) # Generate 4D binary images with voxel selections at major stages in algorithm (RGB as in MSMT-CSD paper). run.command( 'mrcat crude_csf.mif crude_gm.mif crude_wm.mif crude.mif -axis 3') run.command( 'mrcat refined_csf.mif refined_gm.mif refined_wm.mif refined.mif -axis 3' ) run.command( 'mrcat voxels_csf.mif voxels_gm.mif voxels_sfwm.mif voxels.mif -axis 3' )
def execute(): import os from distutils.spawn import find_executable from mrtrix3 import app, file, fsl, image, run if app.isWindows(): app.error( '\'fsl\' algorithm of 5ttgen script cannot be run on Windows: FSL not available on Windows' ) fsl_path = os.environ.get('FSLDIR', '') if not fsl_path: app.error( 'Environment variable FSLDIR is not set; please run appropriate FSL configuration script' ) ssroi_cmd = 'standard_space_roi' if not find_executable(ssroi_cmd): ssroi_cmd = 'fsl5.0-standard_space_roi' if not find_executable(ssroi_cmd): app.error( 'Could not find FSL program standard_space_roi; please verify FSL install' ) bet_cmd = 'bet' if not find_executable(bet_cmd): bet_cmd = 'fsl5.0-bet' if not find_executable(bet_cmd): app.error( 'Could not find FSL program bet; please verify FSL install') fast_cmd = 'fast' if not find_executable(fast_cmd): fast_cmd = 'fsl5.0-fast' if not find_executable(fast_cmd): app.error( 'Could not find FSL program fast; please verify FSL install') first_cmd = 'run_first_all' if not find_executable(first_cmd): first_cmd = "fsl5.0-run_first_all" if not find_executable(first_cmd): app.error( 'Could not find FSL program run_first_all; please verify FSL install' ) first_atlas_path = os.path.join(fsl_path, 'data', 'first', 'models_336_bin') if not os.path.isdir(first_atlas_path): app.error( 'Atlases required for FSL\'s FIRST program not installed; please install fsl-first-data using your relevant package manager' ) fsl_suffix = fsl.suffix() sgm_structures = [ 'L_Accu', 'R_Accu', 'L_Caud', 'R_Caud', 'L_Pall', 'R_Pall', 'L_Puta', 'R_Puta', 'L_Thal', 'R_Thal' ] if app.args.sgm_amyg_hipp: sgm_structures.extend(['L_Amyg', 'R_Amyg', 'L_Hipp', 'R_Hipp']) run.command('mrconvert input.mif T1.nii -stride -1,+2,+3') fast_t1_input = 'T1.nii' fast_t2_input = '' # Decide whether or not we're going to do any brain masking if os.path.exists('mask.mif'): fast_t1_input = 'T1_masked' + fsl_suffix # Check to see if the mask matches the T1 image if image.match('T1.nii', 'mask.mif'): run.command('mrcalc T1.nii mask.mif -mult ' + fast_t1_input) mask_path = 'mask.mif' else: app.warn('Mask image does not match input image - re-gridding') run.command( 'mrtransform mask.mif mask_regrid.mif -template T1.nii') run.command('mrcalc T1.nii mask_regrid.mif ' + fast_t1_input) mask_path = 'mask_regrid.mif' if os.path.exists('T2.nii'): fast_t2_input = 'T2_masked' + fsl_suffix run.command('mrcalc T2.nii ' + mask_path + ' -mult ' + fast_t2_input) elif app.args.premasked: fast_t1_input = 'T1.nii' if os.path.exists('T2.nii'): fast_t2_input = 'T2.nii' else: # Use FSL command standard_space_roi to do an initial masking of the image before BET # Also reduce the FoV of the image # Using MNI 1mm dilated brain mask rather than the -b option in standard_space_roi (which uses the 2mm mask); the latter looks 'buggy' to me... Unfortunately even with the 1mm 'dilated' mask, it can still cut into some brain areas, hence the explicit dilation mni_mask_path = os.path.join(fsl_path, 'data', 'standard', 'MNI152_T1_1mm_brain_mask_dil.nii.gz') mni_mask_dilation = 0 if os.path.exists(mni_mask_path): mni_mask_dilation = 4 else: mni_mask_path = os.path.join( fsl_path, 'data', 'standard', 'MNI152_T1_2mm_brain_mask_dil.nii.gz') if os.path.exists(mni_mask_path): mni_mask_dilation = 2 if mni_mask_dilation: run.command('maskfilter ' + mni_mask_path + ' dilate mni_mask.nii -npass ' + str(mni_mask_dilation)) if app.args.nocrop: ssroi_roi_option = ' -roiNONE' else: ssroi_roi_option = ' -roiFOV' run.command( ssroi_cmd + ' T1.nii T1_preBET' + fsl_suffix + ' -maskMASK mni_mask.nii' + ssroi_roi_option, False) else: run.command(ssroi_cmd + ' T1.nii T1_preBET' + fsl_suffix + ' -b', False) # For whatever reason, the output file from standard_space_roi may not be # completed before BET is run file.waitFor('T1_preBET' + fsl_suffix) # BET fast_t1_input = 'T1_BET' + fsl_suffix run.command(bet_cmd + ' T1_preBET' + fsl_suffix + ' ' + fast_t1_input + ' -f 0.15 -R') if os.path.exists('T2.nii'): if app.args.nocrop: fast_t2_input = 'T2.nii' else: # Just a reduction of FoV, no sub-voxel interpolation going on run.command('mrtransform T2.nii T2_cropped.nii -template ' + fast_t1_input + ' -interp nearest') fast_t2_input = 'T2_cropped.nii' # Finish branching based on brain masking # FAST if fast_t2_input: run.command(fast_cmd + ' -S 2 ' + fast_t2_input + ' ' + fast_t1_input) else: run.command(fast_cmd + ' ' + fast_t1_input) fast_output_prefix = fast_t1_input.split('.')[0] # FIRST first_input_is_brain_extracted = '' if app.args.premasked: first_input_is_brain_extracted = ' -b' run.command(first_cmd + ' -s ' + ','.join(sgm_structures) + ' -i T1.nii -o first' + first_input_is_brain_extracted) # Test to see whether or not FIRST has succeeded # However if the expected image is absent, it may be due to FIRST being run # on SGE; in this case it is necessary to wait and see if the file appears. # But even in this case, FIRST may still fail, and the file will never appear... combined_image_path = 'first_all_none_firstseg' + fsl_suffix if not os.path.isfile(combined_image_path): if 'SGE_ROOT' in os.environ: app.console( 'FSL FIRST job has been submitted to SGE; awaiting completion') app.console( '(note however that FIRST may fail, and hence this script may hang indefinitely)' ) file.waitFor(combined_image_path) else: app.error( 'FSL FIRST has failed; not all structures were segmented successfully (check ' + path.toTemp('first.logs', False) + ')') # Convert FIRST meshes to partial volume images pve_image_list = [] for struct in sgm_structures: pve_image_path = 'mesh2pve_' + struct + '.mif' vtk_in_path = 'first-' + struct + '_first.vtk' vtk_temp_path = struct + '.vtk' run.command('meshconvert ' + vtk_in_path + ' ' + vtk_temp_path + ' -transform first2real T1.nii') run.command('mesh2pve ' + vtk_temp_path + ' ' + fast_t1_input + ' ' + pve_image_path) pve_image_list.append(pve_image_path) pve_cat = ' '.join(pve_image_list) run.command('mrmath ' + pve_cat + ' sum - | mrcalc - 1.0 -min all_sgms.mif') # Looks like FAST in 5.0 ignores FSLOUTPUTTYPE when writing the PVE images # Will have to wait and see whether this changes, and update the script accordingly if fast_cmd == 'fast': fast_suffix = fsl_suffix else: fast_suffix = '.nii.gz' # Combine the tissue images into the 5TT format within the script itself # Step 1: Run LCC on the WM image run.command( 'mrthreshold ' + fast_output_prefix + '_pve_2' + fast_suffix + ' - -abs 0.001 | maskfilter - connect - -connectivity | mrcalc 1 - 1 -gt -sub remove_unconnected_wm_mask.mif -datatype bit' ) # Step 2: Generate the images in the same fashion as the 5ttgen command run.command('mrcalc ' + fast_output_prefix + '_pve_0' + fast_suffix + ' remove_unconnected_wm_mask.mif -mult csf.mif') run.command('mrcalc 1.0 csf.mif -sub all_sgms.mif -min sgm.mif') run.command('mrcalc 1.0 csf.mif sgm.mif -add -sub ' + fast_output_prefix + '_pve_1' + fast_suffix + ' ' + fast_output_prefix + '_pve_2' + fast_suffix + ' -add -div multiplier.mif') run.command( 'mrcalc multiplier.mif -finite multiplier.mif 0.0 -if multiplier_noNAN.mif' ) run.command( 'mrcalc ' + fast_output_prefix + '_pve_1' + fast_suffix + ' multiplier_noNAN.mif -mult remove_unconnected_wm_mask.mif -mult cgm.mif' ) run.command( 'mrcalc ' + fast_output_prefix + '_pve_2' + fast_suffix + ' multiplier_noNAN.mif -mult remove_unconnected_wm_mask.mif -mult wm.mif' ) run.command('mrcalc 0 wm.mif -min path.mif') run.command( 'mrcat cgm.mif sgm.mif wm.mif csf.mif path.mif - -axis 3 | mrconvert - combined_precrop.mif -stride +2,+3,+4,+1' ) # Use mrcrop to reduce file size (improves caching of image data during tracking) if app.args.nocrop: run.command('mrconvert combined_precrop.mif result.mif') else: run.command( 'mrmath combined_precrop.mif sum - -axis 3 | mrthreshold - - -abs 0.5 | mrcrop combined_precrop.mif result.mif -mask -' )
def execute(): #pylint: disable=unused-variable import math, os from mrtrix3 import app, fsl, image, run if app.isWindows(): app.error('\'fsl\' algorithm of 5ttgen script cannot be run on Windows: FSL not available on Windows') fsl_path = os.environ.get('FSLDIR', '') if not fsl_path: app.error('Environment variable FSLDIR is not set; please run appropriate FSL configuration script') bet_cmd = fsl.exeName('bet') fast_cmd = fsl.exeName('fast') first_cmd = fsl.exeName('run_first_all') ssroi_cmd = fsl.exeName('standard_space_roi') first_atlas_path = os.path.join(fsl_path, 'data', 'first', 'models_336_bin') if not os.path.isdir(first_atlas_path): app.error('Atlases required for FSL\'s FIRST program not installed; please install fsl-first-data using your relevant package manager') fsl_suffix = fsl.suffix() sgm_structures = [ 'L_Accu', 'R_Accu', 'L_Caud', 'R_Caud', 'L_Pall', 'R_Pall', 'L_Puta', 'R_Puta', 'L_Thal', 'R_Thal' ] if app.args.sgm_amyg_hipp: sgm_structures.extend([ 'L_Amyg', 'R_Amyg', 'L_Hipp', 'R_Hipp' ]) t1_spacing = image.Header('input.mif').spacing() upsample_for_first = False # If voxel size is 1.25mm or larger, make a guess that the user has erroneously re-gridded their data if math.pow(t1_spacing[0] * t1_spacing[1] * t1_spacing[2], 1.0/3.0) > 1.225: app.warn('Voxel size larger than expected for T1-weighted images (' + str(t1_spacing) + '); ' 'note that ACT does not require re-gridding of T1 image to DWI space, and indeed ' 'retaining the original higher resolution of the T1 image is preferable') upsample_for_first = True run.command('mrconvert input.mif T1.nii -strides -1,+2,+3') fast_t1_input = 'T1.nii' fast_t2_input = '' # Decide whether or not we're going to do any brain masking if os.path.exists('mask.mif'): fast_t1_input = 'T1_masked' + fsl_suffix # Check to see if the mask matches the T1 image if image.match('T1.nii', 'mask.mif'): run.command('mrcalc T1.nii mask.mif -mult ' + fast_t1_input) mask_path = 'mask.mif' else: app.warn('Mask image does not match input image - re-gridding') run.command('mrtransform mask.mif mask_regrid.mif -template T1.nii -datatype bit') run.command('mrcalc T1.nii mask_regrid.mif -mult ' + fast_t1_input) mask_path = 'mask_regrid.mif' if os.path.exists('T2.nii'): fast_t2_input = 'T2_masked' + fsl_suffix run.command('mrcalc T2.nii ' + mask_path + ' -mult ' + fast_t2_input) elif app.args.premasked: fast_t1_input = 'T1.nii' if os.path.exists('T2.nii'): fast_t2_input = 'T2.nii' else: # Use FSL command standard_space_roi to do an initial masking of the image before BET # Also reduce the FoV of the image # Using MNI 1mm dilated brain mask rather than the -b option in standard_space_roi (which uses the 2mm mask); the latter looks 'buggy' to me... Unfortunately even with the 1mm 'dilated' mask, it can still cut into some brain areas, hence the explicit dilation mni_mask_path = os.path.join(fsl_path, 'data', 'standard', 'MNI152_T1_1mm_brain_mask_dil.nii.gz') mni_mask_dilation = 0 if os.path.exists (mni_mask_path): mni_mask_dilation = 4 else: mni_mask_path = os.path.join(fsl_path, 'data', 'standard', 'MNI152_T1_2mm_brain_mask_dil.nii.gz') if os.path.exists (mni_mask_path): mni_mask_dilation = 2 if mni_mask_dilation: run.command('maskfilter ' + mni_mask_path + ' dilate mni_mask.nii -npass ' + str(mni_mask_dilation)) if app.args.nocrop: ssroi_roi_option = ' -roiNONE' else: ssroi_roi_option = ' -roiFOV' run.command(ssroi_cmd + ' T1.nii T1_preBET' + fsl_suffix + ' -maskMASK mni_mask.nii' + ssroi_roi_option, False) else: run.command(ssroi_cmd + ' T1.nii T1_preBET' + fsl_suffix + ' -b', False) pre_bet_image = fsl.findImage('T1_preBET') # BET run.command(bet_cmd + ' ' + pre_bet_image + ' T1_BET' + fsl_suffix + ' -f 0.15 -R') fast_t1_input = fsl.findImage('T1_BET' + fsl_suffix) if os.path.exists('T2.nii'): if app.args.nocrop: fast_t2_input = 'T2.nii' else: # Just a reduction of FoV, no sub-voxel interpolation going on run.command('mrtransform T2.nii T2_cropped.nii -template ' + fast_t1_input + ' -interp nearest') fast_t2_input = 'T2_cropped.nii' # Finish branching based on brain masking # FAST if fast_t2_input: run.command(fast_cmd + ' -S 2 ' + fast_t2_input + ' ' + fast_t1_input) else: run.command(fast_cmd + ' ' + fast_t1_input) # FIRST first_input = 'T1.nii' if upsample_for_first: app.warn('Generating 1mm isotropic T1 image for FIRST in hope of preventing failure, since input image is of lower resolution') run.command('mrresize T1.nii T1_1mm.nii -voxel 1.0 -interp sinc') first_input = 'T1_1mm.nii' first_input_brain_extracted_option = '' if app.args.premasked: first_input_brain_extracted_option = ' -b' first_debug_option = '' if not app.cleanup: first_debug_option = ' -d' first_verbosity_option = '' if app.verbosity == 3: first_verbosity_option = ' -v' run.command(first_cmd + ' -m none -s ' + ','.join(sgm_structures) + ' -i ' + first_input + ' -o first' + first_input_brain_extracted_option + first_debug_option + first_verbosity_option) fsl.checkFirst('first', sgm_structures) # Convert FIRST meshes to partial volume images pve_image_list = [ ] progress = app.progressBar('Generating partial volume images for SGM structures', len(sgm_structures)) for struct in sgm_structures: pve_image_path = 'mesh2voxel_' + struct + '.mif' vtk_in_path = 'first-' + struct + '_first.vtk' vtk_temp_path = struct + '.vtk' run.command('meshconvert ' + vtk_in_path + ' ' + vtk_temp_path + ' -transform first2real ' + first_input) run.command('mesh2voxel ' + vtk_temp_path + ' ' + fast_t1_input + ' ' + pve_image_path) pve_image_list.append(pve_image_path) progress.increment() progress.done() run.command('mrmath ' + ' '.join(pve_image_list) + ' sum - | mrcalc - 1.0 -min all_sgms.mif') # Combine the tissue images into the 5TT format within the script itself fast_output_prefix = fast_t1_input.split('.')[0] fast_csf_output = fsl.findImage(fast_output_prefix + '_pve_0') fast_gm_output = fsl.findImage(fast_output_prefix + '_pve_1') fast_wm_output = fsl.findImage(fast_output_prefix + '_pve_2') # Step 1: Run LCC on the WM image run.command('mrthreshold ' + fast_wm_output + ' - -abs 0.001 | maskfilter - connect - -connectivity | mrcalc 1 - 1 -gt -sub remove_unconnected_wm_mask.mif -datatype bit') # Step 2: Generate the images in the same fashion as the old 5ttgen binary used to: # - Preserve CSF as-is # - Preserve SGM, unless it results in a sum of volume fractions greater than 1, in which case clamp # - Multiply the FAST volume fractions of GM and CSF, so that the sum of CSF, SGM, CGM and WM is 1.0 run.command('mrcalc ' + fast_csf_output + ' remove_unconnected_wm_mask.mif -mult csf.mif') run.command('mrcalc 1.0 csf.mif -sub all_sgms.mif -min sgm.mif') run.command('mrcalc 1.0 csf.mif sgm.mif -add -sub ' + fast_gm_output + ' ' + fast_wm_output + ' -add -div multiplier.mif') run.command('mrcalc multiplier.mif -finite multiplier.mif 0.0 -if multiplier_noNAN.mif') run.command('mrcalc ' + fast_gm_output + ' multiplier_noNAN.mif -mult remove_unconnected_wm_mask.mif -mult cgm.mif') run.command('mrcalc ' + fast_wm_output + ' multiplier_noNAN.mif -mult remove_unconnected_wm_mask.mif -mult wm.mif') run.command('mrcalc 0 wm.mif -min path.mif') run.command('mrcat cgm.mif sgm.mif wm.mif csf.mif path.mif - -axis 3 | mrconvert - combined_precrop.mif -strides +2,+3,+4,+1') # Use mrcrop to reduce file size (improves caching of image data during tracking) if app.args.nocrop: run.command('mrconvert combined_precrop.mif result.mif') else: run.command('mrmath combined_precrop.mif sum - -axis 3 | mrthreshold - - -abs 0.5 | mrcrop combined_precrop.mif result.mif -mask -')
def command(cmd, exitOnError=True): #pylint: disable=unused-variable import inspect, itertools, shlex, signal, string, subprocess, sys, tempfile from distutils.spawn import find_executable from mrtrix3 import app # This is the only global variable that is _modified_ within this function global _processes # Vectorise the command string, preserving anything encased within quotation marks if os.sep == '/': # Cheap POSIX compliance check cmdsplit = shlex.split(cmd) else: # Native Windows Python cmdsplit = [ entry.strip('\"') for entry in shlex.split(cmd, posix=False) ] if _lastFile: if _triggerContinue(cmdsplit): app.debug( 'Detected last file in command \'' + cmd + '\'; this is the last run.command() / run.function() call that will be skipped' ) if app.verbosity: sys.stderr.write(app.colourExec + 'Skipping command:' + app.colourClear + ' ' + cmd + '\n') sys.stderr.flush() return ('', '') # This splits the command string based on the piping character '|', such that each # individual executable (along with its arguments) appears as its own list cmdstack = [ list(g) for k, g in itertools.groupby(cmdsplit, lambda s: s != '|') if k ] for line in cmdstack: is_mrtrix_exe = line[0] in _mrtrix_exe_list if is_mrtrix_exe: line[0] = versionMatch(line[0]) if app.numThreads is not None: line.extend(['-nthreads', str(app.numThreads)]) # Get MRtrix3 binaries to output additional INFO-level information if running in debug mode if app.verbosity == 3: line.append('-info') elif not app.verbosity: line.append('-quiet') else: line[0] = exeName(line[0]) shebang = _shebang(line[0]) if shebang: if not is_mrtrix_exe: # If a shebang is found, and this call is therefore invoking an # interpreter, can't rely on the interpreter finding the script # from PATH; need to find the full path ourselves. line[0] = find_executable(line[0]) for item in reversed(shebang): line.insert(0, item) app.debug('To execute: ' + str(cmdstack)) if app.verbosity: sys.stderr.write(app.colourExec + 'Command:' + app.colourClear + ' ' + cmd + '\n') sys.stderr.flush() # Disable interrupt signal handler while threads are running try: signal.signal(signal.SIGINT, signal.default_int_handler) except: pass # Construct temporary text files for holding stdout / stderr contents when appropriate # (One entry per process; each is a tuple containing two entries, each of which is either a # file-like object, or None) tempfiles = [] # Execute all processes assert not _processes for index, to_execute in enumerate(cmdstack): file_out = None file_err = None # If there's at least one command prior to this, need to receive the stdout from the prior command # at the stdin of this command; otherwise, nothing to receive if index > 0: handle_in = _processes[index - 1].stdout else: handle_in = None # If this is not the last command, then stdout needs to be piped to the next command; # otherwise, write stdout to a temporary file so that the contents can be read later if index < len(cmdstack) - 1: handle_out = subprocess.PIPE else: file_out = tempfile.TemporaryFile() handle_out = file_out.fileno() # If we're in debug / info mode, the contents of stderr will be read and printed to the terminal # as the command progresses, hence this needs to go to a pipe; otherwise, write it to a temporary # file so that the contents can be read later if app.verbosity > 1: handle_err = subprocess.PIPE else: file_err = tempfile.TemporaryFile() handle_err = file_err.fileno() # Set off the processes try: try: process = subprocess.Popen(to_execute, stdin=handle_in, stdout=handle_out, stderr=handle_err, env=_env, preexec_fn=os.setpgrp) # pylint: disable=bad-option-value,subprocess-popen-preexec-fn except AttributeError: process = subprocess.Popen(to_execute, stdin=handle_in, stdout=handle_out, stderr=handle_err, env=_env) _processes.append(process) tempfiles.append((file_out, file_err)) # FileNotFoundError not defined in Python 2.7 except OSError as e: if exitOnError: app.error('\'' + to_execute[0] + '\' not executed ("' + str(e) + '"); script cannot proceed') else: app.warn('\'' + to_execute[0] + '\' not executed ("' + str(e) + '")') for p in _processes: p.terminate() _processes = [] break return_stdout = '' return_stderr = '' error = False error_text = '' # Wait for all commands to complete # Switch how we monitor running processes / wait for them to complete # depending on whether or not the user has specified -info or -debug option try: if app.verbosity > 1: for process in _processes: stderrdata = b'' do_indent = True while True: # Have to read one character at a time: Waiting for a newline character using e.g. readline() will prevent MRtrix progressbars from appearing byte = process.stderr.read(1) stderrdata += byte char = byte.decode('cp1252', errors='ignore') if not char and process.poll() is not None: break if do_indent and char in string.printable and char != '\r' and char != '\n': sys.stderr.write(' ') do_indent = False elif char in ['\r', '\n']: do_indent = True sys.stderr.write(char) sys.stderr.flush() stderrdata = stderrdata.decode('utf-8', errors='replace') return_stderr += stderrdata if process.returncode: error = True error_text += stderrdata else: for process in _processes: process.wait() except (KeyboardInterrupt, SystemExit): app.handler(signal.SIGINT, inspect.currentframe()) # Re-enable interrupt signal handler try: signal.signal(signal.SIGINT, app.handler) except: pass # For any command stdout / stderr data that wasn't either passed to another command or # printed to the terminal during execution, read it here. for index in range(len(cmdstack)): if tempfiles[index][0] is not None: tempfiles[index][0].flush() tempfiles[index][0].seek(0) stdout_text = tempfiles[index][0].read().decode('utf-8', errors='replace') return_stdout += stdout_text if _processes[index].returncode: error = True error_text += stdout_text if tempfiles[index][1] is not None: tempfiles[index][1].flush() tempfiles[index][1].seek(0) stderr_text = tempfiles[index][1].read().decode('utf-8', errors='replace') return_stderr += stderr_text if _processes[index].returncode: error = True error_text += stderr_text _processes = [] if error: if exitOnError: app.cleanup = False caller = inspect.getframeinfo(inspect.stack()[1][0]) script_name = os.path.basename(sys.argv[0]) app.console('') try: filename = caller.filename lineno = caller.lineno except AttributeError: filename = caller[1] lineno = caller[2] sys.stderr.write(script_name + ': ' + app.colourError + '[ERROR] Command failed: ' + cmd + app.colourClear + app.colourDebug + ' (' + os.path.basename(filename) + ':' + str(lineno) + ')' + app.colourClear + '\n') sys.stderr.write(script_name + ': ' + app.colourConsole + 'Output of failed command:' + app.colourClear + '\n') for line in error_text.splitlines(): sys.stderr.write(' ' * (len(script_name) + 2) + line + '\n') app.console('') sys.stderr.flush() if app.tempDir: with open(os.path.join(app.tempDir, 'error.txt'), 'w') as outfile: outfile.write(cmd + '\n\n' + error_text + '\n') app.complete() sys.exit(1) else: app.warn('Command failed: ' + cmd) # Only now do we append to the script log, since the command has completed successfully # Note: Writing the command as it was formed as the input to run.command(): # other flags may potentially change if this file is eventually used to resume the script if app.tempDir: with open(os.path.join(app.tempDir, 'log.txt'), 'a') as outfile: outfile.write(cmd + '\n') return (return_stdout, return_stderr)
def execute(): #pylint: disable=unused-variable import os, shutil from mrtrix3 import app, file, image, path, run #pylint: disable=redefined-builtin lmax_option = '' if app.args.lmax: lmax_option = ' -lmax ' + app.args.lmax if app.args.max_iters < 2: app.error('Number of iterations must be at least 2') for iteration in range(0, app.args.max_iters): prefix = 'iter' + str(iteration) + '_' if iteration == 0: RF_in_path = 'init_RF.txt' mask_in_path = 'mask.mif' init_RF = '1 -1 1' with open(RF_in_path, 'w') as f: f.write(init_RF) iter_lmax_option = ' -lmax 4' else: RF_in_path = 'iter' + str(iteration-1) + '_RF.txt' mask_in_path = 'iter' + str(iteration-1) + '_SF_dilated.mif' iter_lmax_option = lmax_option # Run CSD run.command('dwi2fod csd dwi.mif ' + RF_in_path + ' ' + prefix + 'FOD.mif -mask ' + mask_in_path + iter_lmax_option) # Get amplitudes of two largest peaks, and direction of largest run.command('fod2fixel ' + prefix + 'FOD.mif ' + prefix + 'fixel -peak peaks.mif -mask ' + mask_in_path + ' -fmls_no_thresholds') file.delTemporary(prefix + 'FOD.mif') if iteration: file.delTemporary(mask_in_path) run.command('fixel2voxel ' + prefix + 'fixel/peaks.mif split_data ' + prefix + 'amps.mif -number 2') run.command('mrconvert ' + prefix + 'amps.mif ' + prefix + 'first_peaks.mif -coord 3 0 -axes 0,1,2') run.command('mrconvert ' + prefix + 'amps.mif ' + prefix + 'second_peaks.mif -coord 3 1 -axes 0,1,2') file.delTemporary(prefix + 'amps.mif') run.command('fixel2voxel ' + prefix + 'fixel/directions.mif split_dir ' + prefix + 'all_dirs.mif -number 1') file.delTemporary(prefix + 'fixel') run.command('mrconvert ' + prefix + 'all_dirs.mif ' + prefix + 'first_dir.mif -coord 3 0:2') file.delTemporary(prefix + 'all_dirs.mif') # Calculate the 'cost function' Donald derived for selecting single-fibre voxels # https://github.com/MRtrix3/mrtrix3/pull/426 # sqrt(|peak1|) * (1 - |peak2| / |peak1|)^2 run.command('mrcalc ' + prefix + 'first_peaks.mif -sqrt 1 ' + prefix + 'second_peaks.mif ' + prefix + 'first_peaks.mif -div -sub 2 -pow -mult '+ prefix + 'CF.mif') file.delTemporary(prefix + 'first_peaks.mif') file.delTemporary(prefix + 'second_peaks.mif') # Select the top-ranked voxels run.command('mrthreshold ' + prefix + 'CF.mif -top ' + str(app.args.sf_voxels) + ' ' + prefix + 'SF.mif') # Generate a new response function based on this selection run.command('amp2response dwi.mif ' + prefix + 'SF.mif ' + prefix + 'first_dir.mif ' + prefix + 'RF.txt' + iter_lmax_option) file.delTemporary(prefix + 'first_dir.mif') # Should we terminate? if iteration > 0: run.command('mrcalc ' + prefix + 'SF.mif iter' + str(iteration-1) + '_SF.mif -sub ' + prefix + 'SF_diff.mif') file.delTemporary('iter' + str(iteration-1) + '_SF.mif') max_diff = image.statistic(prefix + 'SF_diff.mif', 'max') file.delTemporary(prefix + 'SF_diff.mif') if int(max_diff) == 0: app.console('Convergence of SF voxel selection detected at iteration ' + str(iteration)) file.delTemporary(prefix + 'CF.mif') run.function(shutil.copyfile, prefix + 'RF.txt', 'response.txt') run.function(shutil.move, prefix + 'SF.mif', 'voxels.mif') break # Select a greater number of top single-fibre voxels, and dilate (within bounds of initial mask); # these are the voxels that will be re-tested in the next iteration run.command('mrthreshold ' + prefix + 'CF.mif -top ' + str(app.args.iter_voxels) + ' - | maskfilter - dilate - -npass ' + str(app.args.dilate) + ' | mrcalc mask.mif - -mult ' + prefix + 'SF_dilated.mif') file.delTemporary(prefix + 'CF.mif') # Commence the next iteration # If terminating due to running out of iterations, still need to put the results in the appropriate location if not os.path.exists('response.txt'): app.console('Exiting after maximum ' + str(app.args.max_iters) + ' iterations') run.function(shutil.copyfile, 'iter' + str(app.args.max_iters-1) + '_RF.txt', 'response.txt') run.function(shutil.move, 'iter' + str(app.args.max_iters-1) + '_SF.mif', 'voxels.mif') run.function(shutil.copyfile, 'response.txt', path.fromUser(app.args.output, False))
def execute(): import math, os, shutil from mrtrix3 import app, file, image, path, run lmax_option = '' if app.args.lmax: lmax_option = ' -lmax ' + app.args.lmax convergence_change = 0.01 * app.args.convergence for iteration in range(0, app.args.max_iters): prefix = 'iter' + str(iteration) + '_' # How to initialise response function? # old dwi2response command used mean & standard deviation of DWI data; however # this may force the output FODs to lmax=2 at the first iteration # Chantal used a tensor with low FA, but it'd be preferable to get the scaling right # Other option is to do as before, but get the ratio between l=0 and l=2, and # generate l=4,6,... using that amplitude ratio if iteration == 0: RF_in_path = 'init_RF.txt' mask_in_path = 'mask.mif' # TODO This can be changed once #71 is implemented (mrstats statistics across volumes) volume_means = [float(x) for x in image.statistic('dwi.mif', 'mean', 'mask.mif').split()] mean = sum(volume_means) / float(len(volume_means)) volume_stds = [float(x) for x in image.statistic('dwi.mif', 'std', 'mask.mif').split()] std = sum(volume_stds) / float(len(volume_stds)) # Scale these to reflect the fact that we're moving to the SH basis mean *= math.sqrt(4.0 * math.pi) std *= math.sqrt(4.0 * math.pi) # Now produce the initial response function # Let's only do it to lmax 4 init_RF = [ str(mean), str(-0.5*std), str(0.25*std*std/mean) ] with open('init_RF.txt', 'w') as f: f.write(' '.join(init_RF)) else: RF_in_path = 'iter' + str(iteration-1) + '_RF.txt' mask_in_path = 'iter' + str(iteration-1) + '_SF.mif' # Run CSD run.command('dwi2fod csd dwi.mif ' + RF_in_path + ' ' + prefix + 'FOD.mif -mask ' + mask_in_path) # Get amplitudes of two largest peaks, and directions of largest run.command('fod2fixel ' + prefix + 'FOD.mif ' + prefix + 'fixel -peak peaks.mif -mask ' + mask_in_path + ' -fmls_no_thresholds') file.delTempFile(prefix + 'FOD.mif') run.command('fixel2voxel ' + prefix + 'fixel/peaks.mif split_data ' + prefix + 'amps.mif') run.command('mrconvert ' + prefix + 'amps.mif ' + prefix + 'first_peaks.mif -coord 3 0 -axes 0,1,2') run.command('mrconvert ' + prefix + 'amps.mif ' + prefix + 'second_peaks.mif -coord 3 1 -axes 0,1,2') file.delTempFile(prefix + 'amps.mif') run.command('fixel2voxel ' + prefix + 'fixel/directions.mif split_dir ' + prefix + 'all_dirs.mif') file.delTempFolder(prefix + 'fixel') run.command('mrconvert ' + prefix + 'all_dirs.mif ' + prefix + 'first_dir.mif -coord 3 0:2') file.delTempFile(prefix + 'all_dirs.mif') # Revise single-fibre voxel selection based on ratio of tallest to second-tallest peak run.command('mrcalc ' + prefix + 'second_peaks.mif ' + prefix + 'first_peaks.mif -div ' + prefix + 'peak_ratio.mif') file.delTempFile(prefix + 'first_peaks.mif') file.delTempFile(prefix + 'second_peaks.mif') run.command('mrcalc ' + prefix + 'peak_ratio.mif ' + str(app.args.peak_ratio) + ' -lt ' + mask_in_path + ' -mult ' + prefix + 'SF.mif -datatype bit') file.delTempFile(prefix + 'peak_ratio.mif') # Make sure image isn't empty SF_voxel_count = int(image.statistic(prefix + 'SF.mif', 'count', prefix + 'SF.mif')) if not SF_voxel_count: app.error('Aborting: All voxels have been excluded from single-fibre selection') # Generate a new response function run.command('amp2response dwi.mif ' + prefix + 'SF.mif ' + prefix + 'first_dir.mif ' + prefix + 'RF.txt' + lmax_option) file.delTempFile(prefix + 'first_dir.mif') # Detect convergence # Look for a change > some percentage - don't bother looking at the masks if iteration > 0: with open(RF_in_path, 'r') as old_RF_file: old_RF = [ float(x) for x in old_RF_file.read().split() ] with open(prefix + 'RF.txt', 'r') as new_RF_file: new_RF = [ float(x) for x in new_RF_file.read().split() ] reiterate = False for index in range(0, len(old_RF)): mean = 0.5 * (old_RF[index] + new_RF[index]) diff = math.fabs(0.5 * (old_RF[index] - new_RF[index])) ratio = diff / mean if ratio > convergence_change: reiterate = True if not reiterate: app.console('Exiting at iteration ' + str(iteration) + ' with ' + str(SF_voxel_count) + ' SF voxels due to unchanged response function coefficients') run.function(shutil.copyfile, prefix + 'RF.txt', 'response.txt') run.function(shutil.copyfile, prefix + 'SF.mif', 'voxels.mif') break file.delTempFile(RF_in_path) file.delTempFile(mask_in_path) # Go to the next iteration # If we've terminated due to hitting the iteration limiter, we still need to copy the output file(s) to the correct location if not os.path.exists('response.txt'): app.console('Exiting after maximum ' + str(app.args.max_iters-1) + ' iterations with ' + str(SF_voxel_count) + ' SF voxels') run.function(shutil.copyfile, 'iter' + str(app.args.max_iters-1) + '_RF.txt', 'response.txt') run.function(shutil.copyfile, 'iter' + str(app.args.max_iters-1) + '_SF.mif', 'voxels.mif') run.function(shutil.copyfile, 'response.txt', path.fromUser(app.args.output, False))
choices=parcellation_choices) participant_options.add_argument( option_prefix + 'preprocessed', action='store_true', help= 'Indicate that the subject DWI data have been preprocessed, and hence initial image processing steps will be skipped (also useful for testing)' ) participant_options.add_argument( option_prefix + 'streamlines', type=int, help='The number of streamlines to generate for each subject') app.parse() if app.isWindows(): app.error('Script cannot be run on Windows due to FSL dependency') if find_executable('bids-validator'): run.command('bids-validator ' + app.args.bids_dir) else: app.warn( 'BIDS validator script not installed; proceeding without validation of input data' ) # Running participant level if app.args.analysis_level == 'participant': subjects_to_analyze = [] # Only run a subset of subjects if app.args.participant_label: subjects_to_analyze = [
if len(dwi_size) == 4: num_volumes = dwi_size[3] bval = [i[3] for i in grad] nvols = [i[3] for i in dwi_ind_size] for idx, i in enumerate(DWInlist): if len(DWInlist) == 1: tmpidxlist = range(0, num_volumes) else: tmpidxlist = range(sum(nvols[:idx + 1]), sum(nvols[:idx + 1]) + nvols[idx + 1]) idxlist.append(','.join(str(i) for i in tmpidxlist)) # Perform initial checks on input images if not grad: app.error('No diffusion gradient table found') if not len(grad) == num_volumes: app.error('Number of lines in gradient table (' + str(len(grad)) + ') does not match input image (' + str(num_volumes) + ' volumes); check your input data') if app.args.extent: extent = app.args.extent else: extent = '5,5,5' run.command('mrconvert dwi.mif working.mif') # denoising if app.args.denoise: print("...Beginning denoising")
def execute(): #pylint: disable=unused-variable import os, shutil from mrtrix3 import app, file, image, path, run #pylint: disable=redefined-builtin lmax_option = '' if app.args.lmax: lmax_option = ' -lmax ' + app.args.lmax if app.args.max_iters < 2: app.error('Number of iterations must be at least 2') for iteration in range(0, app.args.max_iters): prefix = 'iter' + str(iteration) + '_' if iteration == 0: RF_in_path = 'init_RF.txt' mask_in_path = 'mask.mif' init_RF = '1 -1 1' with open(RF_in_path, 'w') as f: f.write(init_RF) iter_lmax_option = ' -lmax 4' else: RF_in_path = 'iter' + str(iteration - 1) + '_RF.txt' mask_in_path = 'iter' + str(iteration - 1) + '_SF_dilated.mif' iter_lmax_option = lmax_option # Run CSD run.command('dwi2fod csd dwi.mif ' + RF_in_path + ' ' + prefix + 'FOD.mif -mask ' + mask_in_path + iter_lmax_option) # Get amplitudes of two largest peaks, and direction of largest run.command('fod2fixel ' + prefix + 'FOD.mif ' + prefix + 'fixel -peak peaks.mif -mask ' + mask_in_path + ' -fmls_no_thresholds') file.delTemporary(prefix + 'FOD.mif') if iteration: file.delTemporary(mask_in_path) run.command('fixel2voxel ' + prefix + 'fixel/peaks.mif split_data ' + prefix + 'amps.mif -number 2') run.command('mrconvert ' + prefix + 'amps.mif ' + prefix + 'first_peaks.mif -coord 3 0 -axes 0,1,2') run.command('mrconvert ' + prefix + 'amps.mif ' + prefix + 'second_peaks.mif -coord 3 1 -axes 0,1,2') file.delTemporary(prefix + 'amps.mif') run.command('fixel2voxel ' + prefix + 'fixel/directions.mif split_dir ' + prefix + 'all_dirs.mif -number 1') file.delTemporary(prefix + 'fixel') run.command('mrconvert ' + prefix + 'all_dirs.mif ' + prefix + 'first_dir.mif -coord 3 0:2') file.delTemporary(prefix + 'all_dirs.mif') # Calculate the 'cost function' Donald derived for selecting single-fibre voxels # https://github.com/MRtrix3/mrtrix3/pull/426 # sqrt(|peak1|) * (1 - |peak2| / |peak1|)^2 run.command('mrcalc ' + prefix + 'first_peaks.mif -sqrt 1 ' + prefix + 'second_peaks.mif ' + prefix + 'first_peaks.mif -div -sub 2 -pow -mult ' + prefix + 'CF.mif') file.delTemporary(prefix + 'first_peaks.mif') file.delTemporary(prefix + 'second_peaks.mif') # Select the top-ranked voxels run.command('mrthreshold ' + prefix + 'CF.mif -top ' + str(app.args.sf_voxels) + ' ' + prefix + 'SF.mif') # Generate a new response function based on this selection run.command('amp2response dwi.mif ' + prefix + 'SF.mif ' + prefix + 'first_dir.mif ' + prefix + 'RF.txt' + iter_lmax_option) file.delTemporary(prefix + 'first_dir.mif') # Should we terminate? if iteration > 0: run.command('mrcalc ' + prefix + 'SF.mif iter' + str(iteration - 1) + '_SF.mif -sub ' + prefix + 'SF_diff.mif') file.delTemporary('iter' + str(iteration - 1) + '_SF.mif') max_diff = image.statistic(prefix + 'SF_diff.mif', 'max') file.delTemporary(prefix + 'SF_diff.mif') if int(max_diff) == 0: app.console( 'Convergence of SF voxel selection detected at iteration ' + str(iteration)) file.delTemporary(prefix + 'CF.mif') run.function(shutil.copyfile, prefix + 'RF.txt', 'response.txt') run.function(shutil.move, prefix + 'SF.mif', 'voxels.mif') break # Select a greater number of top single-fibre voxels, and dilate (within bounds of initial mask); # these are the voxels that will be re-tested in the next iteration run.command('mrthreshold ' + prefix + 'CF.mif -top ' + str(app.args.iter_voxels) + ' - | maskfilter - dilate - -npass ' + str(app.args.dilate) + ' | mrcalc mask.mif - -mult ' + prefix + 'SF_dilated.mif') file.delTemporary(prefix + 'CF.mif') # Commence the next iteration # If terminating due to running out of iterations, still need to put the results in the appropriate location if not os.path.exists('response.txt'): app.console('Exiting after maximum ' + str(app.args.max_iters) + ' iterations') run.function(shutil.copyfile, 'iter' + str(app.args.max_iters - 1) + '_RF.txt', 'response.txt') run.function(shutil.move, 'iter' + str(app.args.max_iters - 1) + '_SF.mif', 'voxels.mif') run.function(shutil.copyfile, 'response.txt', path.fromUser(app.args.output, False))
def execute(): import math, os, shutil from mrtrix3 import app, image, path, run # Get b-values and number of volumes per b-value. bvalues = [ int(round(float(x))) for x in image.headerField('dwi.mif', 'shells').split() ] bvolumes = [ int(x) for x in image.headerField('dwi.mif', 'shellcounts').split() ] app.console(str(len(bvalues)) + ' unique b-value(s) detected: ' + ','.join(map(str,bvalues)) + ' with ' + ','.join(map(str,bvolumes)) + ' volumes.') if len(bvalues) < 2: app.error('Need at least 2 unique b-values (including b=0).') # Get lmax information (if provided). sfwm_lmax = [ ] if app.args.lmax: sfwm_lmax = [ int(x.strip()) for x in app.args.lmax.split(',') ] if not len(sfwm_lmax) == len(bvalues): app.error('Number of lmax\'s (' + str(len(sfwm_lmax)) + ', as supplied to the -lmax option: ' + ','.join(map(str,sfwm_lmax)) + ') does not match number of unique b-values.') for l in sfwm_lmax: if l%2: app.error('Values supplied to the -lmax option must be even.') if l<0: app.error('Values supplied to the -lmax option must be non-negative.') # Erode (brain) mask. if app.args.erode > 0: run.command('maskfilter mask.mif erode eroded_mask.mif -npass ' + str(app.args.erode)) else: run.command('mrconvert mask.mif eroded_mask.mif -datatype bit') # Get volumes, compute mean signal and SDM per b-value; compute overall SDM; get rid of erroneous values. totvolumes = 0 fullsdmcmd = 'mrcalc' errcmd = 'mrcalc' zeropath = 'mean_b' + str(bvalues[0]) + '.mif' for i, b in enumerate(bvalues): meanpath = 'mean_b' + str(b) + '.mif' run.command('dwiextract dwi.mif -shell ' + str(b) + ' - | mrmath - mean ' + meanpath + ' -axis 3') errpath = 'err_b' + str(b) + '.mif' run.command('mrcalc ' + meanpath + ' -finite ' + meanpath + ' 0 -if 0 -le ' + errpath + ' -datatype bit') errcmd += ' ' + errpath if i>0: errcmd += ' -add' sdmpath = 'sdm_b' + str(b) + '.mif' run.command('mrcalc ' + zeropath + ' ' + meanpath + ' -divide -log ' + sdmpath) totvolumes += bvolumes[i] fullsdmcmd += ' ' + sdmpath + ' ' + str(bvolumes[i]) + ' -mult' if i>1: fullsdmcmd += ' -add' fullsdmcmd += ' ' + str(totvolumes) + ' -divide full_sdm.mif' run.command(fullsdmcmd) run.command('mrcalc full_sdm.mif -finite full_sdm.mif 0 -if 0 -le err_sdm.mif -datatype bit') errcmd += ' err_sdm.mif -add 0 eroded_mask.mif -if safe_mask.mif -datatype bit' run.command(errcmd) run.command('mrcalc safe_mask.mif full_sdm.mif 0 -if 10 -min safe_sdm.mif') # Compute FA and principal eigenvectors; crude WM versus GM-CSF separation based on FA. run.command('dwi2tensor dwi.mif - -mask safe_mask.mif | tensor2metric - -fa safe_fa.mif -vector safe_vecs.mif -modulate none -mask safe_mask.mif') run.command('mrcalc safe_mask.mif safe_fa.mif 0 -if ' + str(app.args.fa) + ' -gt crude_wm.mif -datatype bit') run.command('mrcalc crude_wm.mif 0 safe_mask.mif -if _crudenonwm.mif -datatype bit') # Crude GM versus CSF separation based on SDM. crudenonwmmedian = image.statistic('safe_sdm.mif', 'median', '_crudenonwm.mif') run.command('mrcalc _crudenonwm.mif safe_sdm.mif ' + str(crudenonwmmedian) + ' -subtract 0 -if - | mrthreshold - - -mask _crudenonwm.mif | mrcalc _crudenonwm.mif - 0 -if crude_csf.mif -datatype bit') run.command('mrcalc crude_csf.mif 0 _crudenonwm.mif -if crude_gm.mif -datatype bit') # Refine WM: remove high SDM outliers. crudewmmedian = image.statistic('safe_sdm.mif', 'median', 'crude_wm.mif') run.command('mrcalc crude_wm.mif safe_sdm.mif 0 -if ' + str(crudewmmedian) + ' -gt _crudewmhigh.mif -datatype bit') run.command('mrcalc _crudewmhigh.mif 0 crude_wm.mif -if _crudewmlow.mif -datatype bit') crudewmQ1 = float(image.statistic('safe_sdm.mif', 'median', '_crudewmlow.mif')) crudewmQ3 = float(image.statistic('safe_sdm.mif', 'median', '_crudewmhigh.mif')) crudewmoutlthresh = crudewmQ3 + (crudewmQ3 - crudewmQ1) run.command('mrcalc crude_wm.mif safe_sdm.mif 0 -if ' + str(crudewmoutlthresh) + ' -gt _crudewmoutliers.mif -datatype bit') run.command('mrcalc _crudewmoutliers.mif 0 crude_wm.mif -if refined_wm.mif -datatype bit') # Refine GM: separate safer GM from partial volumed voxels. crudegmmedian = image.statistic('safe_sdm.mif', 'median', 'crude_gm.mif') run.command('mrcalc crude_gm.mif safe_sdm.mif 0 -if ' + str(crudegmmedian) + ' -gt _crudegmhigh.mif -datatype bit') run.command('mrcalc _crudegmhigh.mif 0 crude_gm.mif -if _crudegmlow.mif -datatype bit') run.command('mrcalc _crudegmhigh.mif safe_sdm.mif ' + str(crudegmmedian) + ' -subtract 0 -if - | mrthreshold - - -mask _crudegmhigh.mif -invert | mrcalc _crudegmhigh.mif - 0 -if _crudegmhighselect.mif -datatype bit') run.command('mrcalc _crudegmlow.mif safe_sdm.mif ' + str(crudegmmedian) + ' -subtract -neg 0 -if - | mrthreshold - - -mask _crudegmlow.mif -invert | mrcalc _crudegmlow.mif - 0 -if _crudegmlowselect.mif -datatype bit') run.command('mrcalc _crudegmhighselect.mif 1 _crudegmlowselect.mif -if refined_gm.mif -datatype bit') # Refine CSF: recover lost CSF from crude WM SDM outliers, separate safer CSF from partial volumed voxels. crudecsfmin = image.statistic('safe_sdm.mif', 'min', 'crude_csf.mif') run.command('mrcalc _crudewmoutliers.mif safe_sdm.mif 0 -if ' + str(crudecsfmin) + ' -gt 1 crude_csf.mif -if _crudecsfextra.mif -datatype bit') run.command('mrcalc _crudecsfextra.mif safe_sdm.mif ' + str(crudecsfmin) + ' -subtract 0 -if - | mrthreshold - - -mask _crudecsfextra.mif | mrcalc _crudecsfextra.mif - 0 -if refined_csf.mif -datatype bit') # Get final voxels for single-fibre WM response function estimation from WM using 'tournier' algorithm. refwmcount = float(image.statistic('refined_wm.mif', 'count', 'refined_wm.mif')) voxsfwmcount = int(round(refwmcount * app.args.sfwm / 100.0)) app.console('Running \'tournier\' algorithm to select ' + str(voxsfwmcount) + ' single-fibre WM voxels.') cleanopt = '' if not app._cleanup: cleanopt = ' -nocleanup' run.command('dwi2response tournier dwi.mif _respsfwmss.txt -sf_voxels ' + str(voxsfwmcount) + ' -iter_voxels ' + str(voxsfwmcount * 10) + ' -mask refined_wm.mif -voxels voxels_sfwm.mif -tempdir ' + app._tempDir + cleanopt) # Get final voxels for GM response function estimation from GM. refgmmedian = image.statistic('safe_sdm.mif', 'median', 'refined_gm.mif') run.command('mrcalc refined_gm.mif safe_sdm.mif 0 -if ' + str(refgmmedian) + ' -gt _refinedgmhigh.mif -datatype bit') run.command('mrcalc _refinedgmhigh.mif 0 refined_gm.mif -if _refinedgmlow.mif -datatype bit') refgmhighcount = float(image.statistic('_refinedgmhigh.mif', 'count', '_refinedgmhigh.mif')) refgmlowcount = float(image.statistic('_refinedgmlow.mif', 'count', '_refinedgmlow.mif')) voxgmhighcount = int(round(refgmhighcount * app.args.gm / 100.0)) voxgmlowcount = int(round(refgmlowcount * app.args.gm / 100.0)) run.command('mrcalc _refinedgmhigh.mif safe_sdm.mif 0 -if - | mrthreshold - - -bottom ' + str(voxgmhighcount) + ' -ignorezero | mrcalc _refinedgmhigh.mif - 0 -if _refinedgmhighselect.mif -datatype bit') run.command('mrcalc _refinedgmlow.mif safe_sdm.mif 0 -if - | mrthreshold - - -top ' + str(voxgmlowcount) + ' -ignorezero | mrcalc _refinedgmlow.mif - 0 -if _refinedgmlowselect.mif -datatype bit') run.command('mrcalc _refinedgmhighselect.mif 1 _refinedgmlowselect.mif -if voxels_gm.mif -datatype bit') # Get final voxels for CSF response function estimation from CSF. refcsfcount = float(image.statistic('refined_csf.mif', 'count', 'refined_csf.mif')) voxcsfcount = int(round(refcsfcount * app.args.csf / 100.0)) run.command('mrcalc refined_csf.mif safe_sdm.mif 0 -if - | mrthreshold - - -top ' + str(voxcsfcount) + ' -ignorezero | mrcalc refined_csf.mif - 0 -if voxels_csf.mif -datatype bit') # Show summary of voxels counts. textarrow = ' --> ' app.console('Summary of voxel counts:') app.console('Mask: ' + str(int(image.statistic('mask.mif', 'count', 'mask.mif'))) + textarrow + str(int(image.statistic('eroded_mask.mif', 'count', 'eroded_mask.mif'))) + textarrow + str(int(image.statistic('safe_mask.mif', 'count', 'safe_mask.mif')))) app.console('WM: ' + str(int(image.statistic('crude_wm.mif', 'count', 'crude_wm.mif'))) + textarrow + str(int(image.statistic('refined_wm.mif', 'count', 'refined_wm.mif'))) + textarrow + str(int(image.statistic('voxels_sfwm.mif', 'count', 'voxels_sfwm.mif'))) + ' (SF)') app.console('GM: ' + str(int(image.statistic('crude_gm.mif', 'count', 'crude_gm.mif'))) + textarrow + str(int(image.statistic('refined_gm.mif', 'count', 'refined_gm.mif'))) + textarrow + str(int(image.statistic('voxels_gm.mif', 'count', 'voxels_gm.mif')))) app.console('CSF: ' + str(int(image.statistic('crude_csf.mif', 'count', 'crude_csf.mif'))) + textarrow + str(int(image.statistic('refined_csf.mif', 'count', 'refined_csf.mif'))) + textarrow + str(int(image.statistic('voxels_csf.mif', 'count', 'voxels_csf.mif')))) # Generate single-fibre WM, GM and CSF responses bvalues_option = ' -shell ' + ','.join(map(str,bvalues)) sfwm_lmax_option = '' if sfwm_lmax: sfwm_lmax_option = ' -lmax ' + ','.join(map(str,sfwm_lmax)) run.command('amp2response dwi.mif voxels_sfwm.mif safe_vecs.mif response_sfwm.txt' + bvalues_option + sfwm_lmax_option) run.command('amp2response dwi.mif voxels_gm.mif safe_vecs.mif response_gm.txt' + bvalues_option + ' -isotropic') run.command('amp2response dwi.mif voxels_csf.mif safe_vecs.mif response_csf.txt' + bvalues_option + ' -isotropic') run.function(shutil.copyfile, 'response_sfwm.txt', path.fromUser(app.args.out_sfwm, False)) run.function(shutil.copyfile, 'response_gm.txt', path.fromUser(app.args.out_gm, False)) run.function(shutil.copyfile, 'response_csf.txt', path.fromUser(app.args.out_csf, False)) # Generate 4D binary images with voxel selections at major stages in algorithm (RGB as in MSMT-CSD paper). run.command('mrcat crude_csf.mif crude_gm.mif crude_wm.mif crude.mif -axis 3') run.command('mrcat refined_csf.mif refined_gm.mif refined_wm.mif refined.mif -axis 3') run.command('mrcat voxels_csf.mif voxels_gm.mif voxels_sfwm.mif voxels.mif -axis 3')
'(so it does not include "sub-"). If this parameter is not ' 'provided all subjects should be analyzed. Multiple ' 'participants can be specified with a space separated list.', nargs='+') app.cmdline.add_argument('-group_subset', help='Define a subset of participants to be used when generating the group-average FOD template and response functions. The subset is to be supplied as a comma separate list. Note the subset should be representable of your entire population and not biased towards one particular group. For example in a patient-control comparison, choose equal numbers of patients and controls. Used in group1 and group2 analysis levels.', nargs=1) app.cmdline.add_argument('-extra_eddy_args', help='generic string of arguments to be passed to the DiffPreprocPipeline_Eddy.sh script' ' and and subsequently to the run_eddy.sh script and finally to the command ' ' that actually invokes the eddy binary') app.parse() if app.isWindows(): app.error('Script cannot be run on Windows due to FSL dependency') subjects_to_analyze = [] # only for a subset of subjects if app.args.participant_label: subjects_to_analyze = app.args.participant_label # for all subjects else: subject_dirs = glob.glob(os.path.join(app.args.in_dir, '*')) subjects_to_analyze = subject_dirs # create a temporary directory for intermediate files app.makeTempDir() # read in group subset if supplied
def command(cmd, exitOnError=True): #pylint: disable=unused-variable import inspect, itertools, shlex, signal, string, subprocess, sys, tempfile from distutils.spawn import find_executable from mrtrix3 import app # This is the only global variable that is _modified_ within this function global _processes # Vectorise the command string, preserving anything encased within quotation marks if os.sep == '/': # Cheap POSIX compliance check cmdsplit = shlex.split(cmd) else: # Native Windows Python cmdsplit = [ entry.strip('\"') for entry in shlex.split(cmd, posix=False) ] if _lastFile: if _triggerContinue(cmdsplit): app.debug('Detected last file in command \'' + cmd + '\'; this is the last run.command() / run.function() call that will be skipped') if app.verbosity: sys.stderr.write(app.colourExec + 'Skipping command:' + app.colourClear + ' ' + cmd + '\n') sys.stderr.flush() return ('', '') # This splits the command string based on the piping character '|', such that each # individual executable (along with its arguments) appears as its own list cmdstack = [ list(g) for k, g in itertools.groupby(cmdsplit, lambda s : s != '|') if k ] for line in cmdstack: is_mrtrix_exe = line[0] in _mrtrix_exe_list if is_mrtrix_exe: line[0] = versionMatch(line[0]) if app.numThreads is not None: line.extend( [ '-nthreads', str(app.numThreads) ] ) # Get MRtrix3 binaries to output additional INFO-level information if running in debug mode if app.verbosity == 3: line.append('-info') elif not app.verbosity: line.append('-quiet') else: line[0] = exeName(line[0]) shebang = _shebang(line[0]) if shebang: if not is_mrtrix_exe: # If a shebang is found, and this call is therefore invoking an # interpreter, can't rely on the interpreter finding the script # from PATH; need to find the full path ourselves. line[0] = find_executable(line[0]) for item in reversed(shebang): line.insert(0, item) app.debug('To execute: ' + str(cmdstack)) if app.verbosity: sys.stderr.write(app.colourExec + 'Command:' + app.colourClear + ' ' + cmd + '\n') sys.stderr.flush() # Disable interrupt signal handler while threads are running try: signal.signal(signal.SIGINT, signal.default_int_handler) except: pass # Construct temporary text files for holding stdout / stderr contents when appropriate # (One entry per process; each is a tuple containing two entries, each of which is either a # file-like object, or None) tempfiles = [ ] # Execute all processes assert not _processes for index, to_execute in enumerate(cmdstack): file_out = None file_err = None # If there's at least one command prior to this, need to receive the stdout from the prior command # at the stdin of this command; otherwise, nothing to receive if index > 0: handle_in = _processes[index-1].stdout else: handle_in = None # If this is not the last command, then stdout needs to be piped to the next command; # otherwise, write stdout to a temporary file so that the contents can be read later if index < len(cmdstack)-1: handle_out = subprocess.PIPE else: file_out = tempfile.TemporaryFile() handle_out = file_out.fileno() # If we're in debug / info mode, the contents of stderr will be read and printed to the terminal # as the command progresses, hence this needs to go to a pipe; otherwise, write it to a temporary # file so that the contents can be read later if app.verbosity > 1: handle_err = subprocess.PIPE else: file_err = tempfile.TemporaryFile() handle_err = file_err.fileno() # Set off the processes try: try: process = subprocess.Popen (to_execute, stdin=handle_in, stdout=handle_out, stderr=handle_err, env=_env, preexec_fn=os.setpgrp) # pylint: disable=bad-option-value,subprocess-popen-preexec-fn except AttributeError: process = subprocess.Popen (to_execute, stdin=handle_in, stdout=handle_out, stderr=handle_err, env=_env) _processes.append(process) tempfiles.append( ( file_out, file_err ) ) # FileNotFoundError not defined in Python 2.7 except OSError as e: if exitOnError: app.error('\'' + to_execute[0] + '\' not executed ("' + str(e) + '"); script cannot proceed') else: app.warn('\'' + to_execute[0] + '\' not executed ("' + str(e) + '")') for p in _processes: p.terminate() _processes = [ ] break return_stdout = '' return_stderr = '' error = False error_text = '' # Wait for all commands to complete # Switch how we monitor running processes / wait for them to complete # depending on whether or not the user has specified -info or -debug option try: if app.verbosity > 1: for process in _processes: stderrdata = b'' do_indent = True while True: # Have to read one character at a time: Waiting for a newline character using e.g. readline() will prevent MRtrix progressbars from appearing byte = process.stderr.read(1) stderrdata += byte char = byte.decode('cp1252', errors='ignore') if not char and process.poll() is not None: break if do_indent and char in string.printable and char != '\r' and char != '\n': sys.stderr.write(' ') do_indent = False elif char in [ '\r', '\n' ]: do_indent = True sys.stderr.write(char) sys.stderr.flush() stderrdata = stderrdata.decode('utf-8', errors='replace') return_stderr += stderrdata if process.returncode: error = True error_text += stderrdata else: for process in _processes: process.wait() except (KeyboardInterrupt, SystemExit): app.handler(signal.SIGINT, inspect.currentframe()) # Re-enable interrupt signal handler try: signal.signal(signal.SIGINT, app.handler) except: pass # For any command stdout / stderr data that wasn't either passed to another command or # printed to the terminal during execution, read it here. for index in range(len(cmdstack)): if tempfiles[index][0] is not None: tempfiles[index][0].flush() tempfiles[index][0].seek(0) stdout_text = tempfiles[index][0].read().decode('utf-8', errors='replace') return_stdout += stdout_text if _processes[index].returncode: error = True error_text += stdout_text if tempfiles[index][1] is not None: tempfiles[index][1].flush() tempfiles[index][1].seek(0) stderr_text = tempfiles[index][1].read().decode('utf-8', errors='replace') return_stderr += stderr_text if _processes[index].returncode: error = True error_text += stderr_text _processes = [ ] if error: if exitOnError: app.cleanup = False caller = inspect.getframeinfo(inspect.stack()[1][0]) script_name = os.path.basename(sys.argv[0]) app.console('') try: filename = caller.filename lineno = caller.lineno except AttributeError: filename = caller[1] lineno = caller[2] sys.stderr.write(script_name + ': ' + app.colourError + '[ERROR] Command failed: ' + cmd + app.colourClear + app.colourDebug + ' (' + os.path.basename(filename) + ':' + str(lineno) + ')' + app.colourClear + '\n') sys.stderr.write(script_name + ': ' + app.colourConsole + 'Output of failed command:' + app.colourClear + '\n') for line in error_text.splitlines(): sys.stderr.write(' ' * (len(script_name)+2) + line + '\n') app.console('') sys.stderr.flush() if app.tempDir: with open(os.path.join(app.tempDir, 'error.txt'), 'w') as outfile: outfile.write(cmd + '\n\n' + error_text + '\n') app.complete() sys.exit(1) else: app.warn('Command failed: ' + cmd) # Only now do we append to the script log, since the command has completed successfully # Note: Writing the command as it was formed as the input to run.command(): # other flags may potentially change if this file is eventually used to resume the script if app.tempDir: with open(os.path.join(app.tempDir, 'log.txt'), 'a') as outfile: outfile.write(cmd + '\n') return (return_stdout, return_stderr)
def command(cmd, exitOnError=True): import inspect, itertools, os, shlex, subprocess, sys, tempfile from distutils.spawn import find_executable from mrtrix3 import app # This is the only global variable that is _modified_ within this function global _processes # Vectorise the command string, preserving anything encased within quotation marks cmdsplit = shlex.split(cmd) if app._lastFile: # Check to see if the last file produced in the previous script execution is # intended to be produced by this command; if it is, this will be the last # command that gets skipped by the -continue option # It's possible that the file might be defined in a '--option=XXX' style argument # It's also possible that the filename in the command string has the file extension omitted for entry in cmdsplit: if entry.startswith('--') and '=' in entry: cmdtotest = entry.split('=')[1] else: cmdtotest = entry filetotest = [ app._lastFile, os.path.splitext(app._lastFile)[0] ] if cmdtotest in filetotest: app.debug('Detected last file \'' + app._lastFile + '\' in command \'' + cmd + '\'; this is the last run.command() / run.function() call that will be skipped') app._lastFile = '' break if app._verbosity: sys.stderr.write(app.colourExec + 'Skipping command:' + app.colourClear + ' ' + cmd + '\n') sys.stderr.flush() return # This splits the command string based on the piping character '|', such that each # individual executable (along with its arguments) appears as its own list # Note that for Python2 support, it is necessary to convert groupby() output from # a generator to a list before it is passed to filter() cmdstack = [ list(g) for k, g in filter(lambda t : t[0], ((k, list(g)) for k, g in itertools.groupby(cmdsplit, lambda s : s is not '|') ) ) ] for line in cmdstack: is_mrtrix_exe = line[0] in _mrtrix_exe_list if is_mrtrix_exe: line[0] = versionMatch(line[0]) if app._nthreads is not None: line.extend( [ '-nthreads', str(app._nthreads) ] ) # Get MRtrix3 binaries to output additional INFO-level information if running in debug mode if app._verbosity == 3: line.append('-info') elif not app._verbosity: line.append('-quiet') else: line[0] = exeName(line[0]) shebang = _shebang(line[0]) if len(shebang): if not is_mrtrix_exe: # If a shebang is found, and this call is therefore invoking an # interpreter, can't rely on the interpreter finding the script # from PATH; need to find the full path ourselves. line[0] = find_executable(line[0]) for item in reversed(shebang): line.insert(0, item) if app._verbosity: sys.stderr.write(app.colourExec + 'Command:' + app.colourClear + ' ' + cmd + '\n') sys.stderr.flush() app.debug('To execute: ' + str(cmdstack)) # Construct temporary text files for holding stdout / stderr contents when appropriate # (One entry per process; each is a tuple containing two entries, each of which is either a # file-like object, or None) tempfiles = [ ] # Execute all processes _processes = [ ] for index, command in enumerate(cmdstack): file_out = None file_err = None # If there's at least one command prior to this, need to receive the stdout from the prior command # at the stdin of this command; otherwise, nothing to receive if index > 0: handle_in = _processes[index-1].stdout else: handle_in = None # If this is not the last command, then stdout needs to be piped to the next command; # otherwise, write stdout to a temporary file so that the contents can be read later if index < len(cmdstack)-1: handle_out = subprocess.PIPE else: file_out = tempfile.TemporaryFile() handle_out = file_out.fileno() # If we're in debug / info mode, the contents of stderr will be read and printed to the terminal # as the command progresses, hence this needs to go to a pipe; otherwise, write it to a temporary # file so that the contents can be read later if app._verbosity > 1: handle_err = subprocess.PIPE else: file_err = tempfile.TemporaryFile() handle_err = file_err.fileno() # Set off the processes try: process = subprocess.Popen (command, stdin=handle_in, stdout=handle_out, stderr=handle_err, env=_env) _processes.append(process) tempfiles.append( ( file_out, file_err ) ) # FileNotFoundError not defined in Python 2.7 except OSError as e: if exitOnError: app.error('\'' + command[0] + '\' not executed ("' + str(e) + '"); script cannot proceed') else: app.warn('\'' + command[0] + '\' not executed ("' + str(e) + '")') for p in _processes: p.terminate() _processes = [ ] break except (KeyboardInterrupt, SystemExit): import inspect, signal app._handler(signal.SIGINT, inspect.currentframe()) return_stdout = '' return_stderr = '' error = False error_text = '' # Wait for all commands to complete try: # Switch how we monitor running processes / wait for them to complete # depending on whether or not the user has specified -verbose or -debug option if app._verbosity > 1: for process in _processes: stderrdata = '' while True: # Have to read one character at a time: Waiting for a newline character using e.g. readline() will prevent MRtrix progressbars from appearing line = process.stderr.read(1).decode('utf-8') sys.stderr.write(line) sys.stderr.flush() stderrdata += line if not line and process.poll() is not None: break return_stderr += stderrdata if process.returncode: error = True error_text += stderrdata else: for process in _processes: process.wait() except (KeyboardInterrupt, SystemExit): import inspect, signal app._handler(signal.SIGINT, inspect.currentframe()) # For any command stdout / stderr data that wasn't either passed to another command or # printed to the terminal during execution, read it here. for index in range(len(cmdstack)): if tempfiles[index][0] is not None: tempfiles[index][0].flush() tempfiles[index][0].seek(0) stdout_text = tempfiles[index][0].read().decode('utf-8') return_stdout += stdout_text if _processes[index].returncode: error = True error_text += stdout_text if tempfiles[index][1] is not None: tempfiles[index][1].flush() tempfiles[index][1].seek(0) stderr_text = tempfiles[index][1].read().decode('utf-8') return_stderr += stderr_text if _processes[index].returncode: error = True error_text += stderr_text _processes = [ ] if (error): app._cleanup = False if exitOnError: caller = inspect.getframeinfo(inspect.stack()[1][0]) app.console('') sys.stderr.write(os.path.basename(sys.argv[0]) + ': ' + app.colourError + '[ERROR] Command failed: ' + cmd + app.colourClear + app.colourDebug + ' (' + os.path.basename(caller.filename) + ':' + str(caller.lineno) + ')' + app.colourClear + '\n') sys.stderr.write(os.path.basename(sys.argv[0]) + ': ' + app.colourConsole + 'Output of failed command:' + app.colourClear + '\n') sys.stderr.write(error_text) sys.stderr.flush() if app._tempDir: with open(os.path.join(app._tempDir, 'error.txt'), 'w') as outfile: outfile.write(cmd + '\n\n' + error_text + '\n') app.complete() sys.exit(1) else: app.warn('Command failed: ' + cmd) # Only now do we append to the script log, since the command has completed successfully # Note: Writing the command as it was formed as the input to run.command(): # other flags may potentially change if this file is eventually used to resume the script if app._tempDir: with open(os.path.join(app._tempDir, 'log.txt'), 'a') as outfile: outfile.write(cmd + '\n') return (return_stdout, return_stderr)
def execute(): #pylint: disable=unused-variable import math, os from mrtrix3 import app, fsl, image, run if app.isWindows(): app.error( '\'fsl\' algorithm of 5ttgen script cannot be run on Windows: FSL not available on Windows' ) fsl_path = os.environ.get('FSLDIR', '') if not fsl_path: app.error( 'Environment variable FSLDIR is not set; please run appropriate FSL configuration script' ) bet_cmd = fsl.exeName('bet') fast_cmd = fsl.exeName('fast') first_cmd = fsl.exeName('run_first_all') ssroi_cmd = fsl.exeName('standard_space_roi') first_atlas_path = os.path.join(fsl_path, 'data', 'first', 'models_336_bin') if not os.path.isdir(first_atlas_path): app.error( 'Atlases required for FSL\'s FIRST program not installed; please install fsl-first-data using your relevant package manager' ) fsl_suffix = fsl.suffix() sgm_structures = [ 'L_Accu', 'R_Accu', 'L_Caud', 'R_Caud', 'L_Pall', 'R_Pall', 'L_Puta', 'R_Puta', 'L_Thal', 'R_Thal' ] if app.args.sgm_amyg_hipp: sgm_structures.extend(['L_Amyg', 'R_Amyg', 'L_Hipp', 'R_Hipp']) t1_spacing = image.Header('input.mif').spacing() upsample_for_first = False # If voxel size is 1.25mm or larger, make a guess that the user has erroneously re-gridded their data if math.pow(t1_spacing[0] * t1_spacing[1] * t1_spacing[2], 1.0 / 3.0) > 1.225: app.warn( 'Voxel size larger than expected for T1-weighted images (' + str(t1_spacing) + '); ' 'note that ACT does not require re-gridding of T1 image to DWI space, and indeed ' 'retaining the original higher resolution of the T1 image is preferable' ) upsample_for_first = True run.command('mrconvert input.mif T1.nii -strides -1,+2,+3') fast_t1_input = 'T1.nii' fast_t2_input = '' # Decide whether or not we're going to do any brain masking if os.path.exists('mask.mif'): fast_t1_input = 'T1_masked' + fsl_suffix # Check to see if the mask matches the T1 image if image.match('T1.nii', 'mask.mif'): run.command('mrcalc T1.nii mask.mif -mult ' + fast_t1_input) mask_path = 'mask.mif' else: app.warn('Mask image does not match input image - re-gridding') run.command( 'mrtransform mask.mif mask_regrid.mif -template T1.nii -datatype bit' ) run.command('mrcalc T1.nii mask_regrid.mif -mult ' + fast_t1_input) mask_path = 'mask_regrid.mif' if os.path.exists('T2.nii'): fast_t2_input = 'T2_masked' + fsl_suffix run.command('mrcalc T2.nii ' + mask_path + ' -mult ' + fast_t2_input) elif app.args.premasked: fast_t1_input = 'T1.nii' if os.path.exists('T2.nii'): fast_t2_input = 'T2.nii' else: # Use FSL command standard_space_roi to do an initial masking of the image before BET # Also reduce the FoV of the image # Using MNI 1mm dilated brain mask rather than the -b option in standard_space_roi (which uses the 2mm mask); the latter looks 'buggy' to me... Unfortunately even with the 1mm 'dilated' mask, it can still cut into some brain areas, hence the explicit dilation mni_mask_path = os.path.join(fsl_path, 'data', 'standard', 'MNI152_T1_1mm_brain_mask_dil.nii.gz') mni_mask_dilation = 0 if os.path.exists(mni_mask_path): mni_mask_dilation = 4 else: mni_mask_path = os.path.join( fsl_path, 'data', 'standard', 'MNI152_T1_2mm_brain_mask_dil.nii.gz') if os.path.exists(mni_mask_path): mni_mask_dilation = 2 if mni_mask_dilation: run.command('maskfilter ' + mni_mask_path + ' dilate mni_mask.nii -npass ' + str(mni_mask_dilation)) if app.args.nocrop: ssroi_roi_option = ' -roiNONE' else: ssroi_roi_option = ' -roiFOV' run.command( ssroi_cmd + ' T1.nii T1_preBET' + fsl_suffix + ' -maskMASK mni_mask.nii' + ssroi_roi_option, False) else: run.command(ssroi_cmd + ' T1.nii T1_preBET' + fsl_suffix + ' -b', False) pre_bet_image = fsl.findImage('T1_preBET') # BET run.command(bet_cmd + ' ' + pre_bet_image + ' T1_BET' + fsl_suffix + ' -f 0.15 -R') fast_t1_input = fsl.findImage('T1_BET' + fsl_suffix) if os.path.exists('T2.nii'): if app.args.nocrop: fast_t2_input = 'T2.nii' else: # Just a reduction of FoV, no sub-voxel interpolation going on run.command('mrtransform T2.nii T2_cropped.nii -template ' + fast_t1_input + ' -interp nearest') fast_t2_input = 'T2_cropped.nii' # Finish branching based on brain masking # FAST if fast_t2_input: run.command(fast_cmd + ' -S 2 ' + fast_t2_input + ' ' + fast_t1_input) else: run.command(fast_cmd + ' ' + fast_t1_input) # FIRST first_input = 'T1.nii' if upsample_for_first: app.warn( 'Generating 1mm isotropic T1 image for FIRST in hope of preventing failure, since input image is of lower resolution' ) run.command('mrresize T1.nii T1_1mm.nii -voxel 1.0 -interp sinc') first_input = 'T1_1mm.nii' first_input_brain_extracted_option = '' if app.args.premasked: first_input_brain_extracted_option = ' -b' first_debug_option = '' if not app.cleanup: first_debug_option = ' -d' first_verbosity_option = '' if app.verbosity == 3: first_verbosity_option = ' -v' run.command(first_cmd + ' -m none -s ' + ','.join(sgm_structures) + ' -i ' + first_input + ' -o first' + first_input_brain_extracted_option + first_debug_option + first_verbosity_option) fsl.checkFirst('first', sgm_structures) # Convert FIRST meshes to partial volume images pve_image_list = [] progress = app.progressBar( 'Generating partial volume images for SGM structures', len(sgm_structures)) for struct in sgm_structures: pve_image_path = 'mesh2voxel_' + struct + '.mif' vtk_in_path = 'first-' + struct + '_first.vtk' vtk_temp_path = struct + '.vtk' run.command('meshconvert ' + vtk_in_path + ' ' + vtk_temp_path + ' -transform first2real ' + first_input) run.command('mesh2voxel ' + vtk_temp_path + ' ' + fast_t1_input + ' ' + pve_image_path) pve_image_list.append(pve_image_path) progress.increment() progress.done() run.command('mrmath ' + ' '.join(pve_image_list) + ' sum - | mrcalc - 1.0 -min all_sgms.mif') # Combine the tissue images into the 5TT format within the script itself fast_output_prefix = fast_t1_input.split('.')[0] fast_csf_output = fsl.findImage(fast_output_prefix + '_pve_0') fast_gm_output = fsl.findImage(fast_output_prefix + '_pve_1') fast_wm_output = fsl.findImage(fast_output_prefix + '_pve_2') # Step 1: Run LCC on the WM image run.command( 'mrthreshold ' + fast_wm_output + ' - -abs 0.001 | maskfilter - connect - -connectivity | mrcalc 1 - 1 -gt -sub remove_unconnected_wm_mask.mif -datatype bit' ) # Step 2: Generate the images in the same fashion as the old 5ttgen binary used to: # - Preserve CSF as-is # - Preserve SGM, unless it results in a sum of volume fractions greater than 1, in which case clamp # - Multiply the FAST volume fractions of GM and CSF, so that the sum of CSF, SGM, CGM and WM is 1.0 run.command('mrcalc ' + fast_csf_output + ' remove_unconnected_wm_mask.mif -mult csf.mif') run.command('mrcalc 1.0 csf.mif -sub all_sgms.mif -min sgm.mif') run.command('mrcalc 1.0 csf.mif sgm.mif -add -sub ' + fast_gm_output + ' ' + fast_wm_output + ' -add -div multiplier.mif') run.command( 'mrcalc multiplier.mif -finite multiplier.mif 0.0 -if multiplier_noNAN.mif' ) run.command( 'mrcalc ' + fast_gm_output + ' multiplier_noNAN.mif -mult remove_unconnected_wm_mask.mif -mult cgm.mif' ) run.command( 'mrcalc ' + fast_wm_output + ' multiplier_noNAN.mif -mult remove_unconnected_wm_mask.mif -mult wm.mif' ) run.command('mrcalc 0 wm.mif -min path.mif') run.command( 'mrcat cgm.mif sgm.mif wm.mif csf.mif path.mif - -axis 3 | mrconvert - combined_precrop.mif -strides +2,+3,+4,+1' ) # Use mrcrop to reduce file size (improves caching of image data during tracking) if app.args.nocrop: run.command('mrconvert combined_precrop.mif result.mif') else: run.command( 'mrmath combined_precrop.mif sum - -axis 3 | mrthreshold - - -abs 0.5 | mrcrop combined_precrop.mif result.mif -mask -' )
num_volumes = 1 if len(dwi_size) == 4: num_volumes = dwi_size[3] bval = [i[3] for i in grad] nvols = [i[3] for i in dwi_ind_size] for idx,i in enumerate(DWInlist): if len(DWInlist) == 1: tmpidxlist = range(0,num_volumes) else: tmpidxlist = range(sum(nvols[:idx+1]),sum(nvols[:idx+1])+nvols[idx+1]) idxlist.append(','.join(str(i) for i in tmpidxlist)) # Perform initial checks on input images if not grad: app.error('No diffusion gradient table found') if not len(grad) == num_volumes: app.error('Number of lines in gradient table (' + str(len(grad)) + ') does not match input image (' + str(num_volumes) + ' volumes); check your input data') if app.args.extent: extent = app.args.extent else: extent = '5,5,5' run.command('mrconvert dwi.mif working.mif') # denoising if app.args.denoise: print("...Beginning denoising") run.command('dwidenoise -extent ' + extent + ' -noise fullnoisemap.mif working.mif dwidn.mif') run.function(os.remove,'working.mif') run.command('mrconvert dwidn.mif working.mif')
def execute(): import math, os, shutil from mrtrix3 import app, image, path, run # Ideally want to use the oversampling-based regridding of the 5TT image from the SIFT model, not mrtransform # May need to commit 5ttregrid... # Verify input 5tt image run.command('5ttcheck 5tt.mif', False) # Get shell information shells = [ int(round(float(x))) for x in image.headerField('dwi.mif', 'shells').split() ] if len(shells) < 3: app.warn('Less than three b-value shells; response functions will not be applicable in resolving three tissues using MSMT-CSD algorithm') # Get lmax information (if provided) wm_lmax = [ ] if app.args.lmax: wm_lmax = [ int(x.strip()) for x in app.args.lmax.split(',') ] if not len(wm_lmax) == len(shells): app.error('Number of manually-defined lmax\'s (' + str(len(wm_lmax)) + ') does not match number of b-value shells (' + str(len(shells)) + ')') for l in wm_lmax: if l%2: app.error('Values for lmax must be even') if l<0: app.error('Values for lmax must be non-negative') run.command('dwi2tensor dwi.mif - -mask mask.mif | tensor2metric - -fa fa.mif -vector vector.mif') if not os.path.exists('dirs.mif'): run.function(shutil.copy, 'vector.mif', 'dirs.mif') run.command('mrtransform 5tt.mif 5tt_regrid.mif -template fa.mif -interp linear') # Basic tissue masks run.command('mrconvert 5tt_regrid.mif - -coord 3 2 -axes 0,1,2 | mrcalc - ' + str(app.args.pvf) + ' -gt mask.mif -mult wm_mask.mif') run.command('mrconvert 5tt_regrid.mif - -coord 3 0 -axes 0,1,2 | mrcalc - ' + str(app.args.pvf) + ' -gt fa.mif ' + str(app.args.fa) + ' -lt -mult mask.mif -mult gm_mask.mif') run.command('mrconvert 5tt_regrid.mif - -coord 3 3 -axes 0,1,2 | mrcalc - ' + str(app.args.pvf) + ' -gt fa.mif ' + str(app.args.fa) + ' -lt -mult mask.mif -mult csf_mask.mif') # Revise WM mask to only include single-fibre voxels app.console('Calling dwi2response recursively to select WM single-fibre voxels using \'' + app.args.wm_algo + '\' algorithm') recursive_cleanup_option='' if not app._cleanup: recursive_cleanup_option = ' -nocleanup' run.command('dwi2response ' + app.args.wm_algo + ' dwi.mif wm_ss_response.txt -mask wm_mask.mif -voxels wm_sf_mask.mif -tempdir ' + app._tempDir + recursive_cleanup_option) # Check for empty masks wm_voxels = int(image.statistic('wm_sf_mask.mif', 'count', 'wm_sf_mask.mif')) gm_voxels = int(image.statistic('gm_mask.mif', 'count', 'gm_mask.mif')) csf_voxels = int(image.statistic('csf_mask.mif', 'count', 'csf_mask.mif')) empty_masks = [ ] if not wm_voxels: empty_masks.append('WM') if not gm_voxels: empty_masks.append('GM') if not csf_voxels: empty_masks.append('CSF') if empty_masks: message = ','.join(empty_masks) message += ' tissue mask' if len(empty_masks) > 1: message += 's' message += ' empty; cannot estimate response function' if len(empty_masks) > 1: message += 's' app.error(message) # For each of the three tissues, generate a multi-shell response bvalues_option = ' -shell ' + ','.join(map(str,shells)) sfwm_lmax_option = '' if wm_lmax: sfwm_lmax_option = ' -lmax ' + ','.join(map(str,wm_lmax)) run.command('amp2response dwi.mif wm_sf_mask.mif dirs.mif wm.txt' + bvalues_option + sfwm_lmax_option) run.command('amp2response dwi.mif gm_mask.mif dirs.mif gm.txt' + bvalues_option + ' -isotropic') run.command('amp2response dwi.mif csf_mask.mif dirs.mif csf.txt' + bvalues_option + ' -isotropic') run.function(shutil.copyfile, 'wm.txt', path.fromUser(app.args.out_wm, False)) run.function(shutil.copyfile, 'gm.txt', path.fromUser(app.args.out_gm, False)) run.function(shutil.copyfile, 'csf.txt', path.fromUser(app.args.out_csf, False)) # Generate output 4D binary image with voxel selections; RGB as in MSMT-CSD paper run.command('mrcat csf_mask.mif gm_mask.mif wm_sf_mask.mif voxels.mif -axis 3')
def runSubject(bids_dir, label, output_prefix): output_dir = os.path.join(output_prefix, label) if os.path.exists(output_dir): shutil.rmtree(output_dir) os.makedirs(output_dir) os.makedirs(os.path.join(output_dir, 'connectome')) os.makedirs(os.path.join(output_dir, 'dwi')) fsl_path = os.environ.get('FSLDIR', '') if not fsl_path: app.error( 'Environment variable FSLDIR is not set; please run appropriate FSL configuration script' ) flirt_cmd = fsl.exeName('flirt') fslanat_cmd = fsl.exeName('fsl_anat') fsl_suffix = fsl.suffix() unring_cmd = 'unring.a64' if not find_executable(unring_cmd): app.console('Command \'' + unring_cmd + '\' not found; cannot perform Gibbs ringing removal') unring_cmd = '' dwibiascorrect_algo = '-ants' if not find_executable('N4BiasFieldCorrection'): # Can't use findFSLBinary() here, since we want to proceed even if it's not found if find_executable('fast') or find_executable('fsl5.0-fast'): dwibiascorrect_algo = '-fsl' app.console('Could not find ANTs program N4BiasFieldCorrection; ' 'using FSL FAST for bias field correction') else: dwibiascorrect_algo = '' app.warn( 'Could not find ANTs program \'N4BiasFieldCorrection\' or FSL program \'fast\'; ' 'will proceed without performing DWI bias field correction') if not app.args.parcellation: app.error( 'For participant-level analysis, desired parcellation must be provided using the -parcellation option' ) parc_image_path = '' parc_lut_file = '' mrtrix_lut_file = os.path.join( os.path.dirname(os.path.abspath(app.__file__)), os.pardir, os.pardir, 'share', 'mrtrix3', 'labelconvert') if app.args.parcellation == 'fs_2005' or app.args.parcellation == 'fs_2009': if not 'FREESURFER_HOME' in os.environ: app.error( 'Environment variable FREESURFER_HOME not set; please verify FreeSurfer installation' ) if not find_executable('recon-all'): app.error( 'Could not find FreeSurfer script recon-all; please verify FreeSurfer installation' ) parc_lut_file = os.path.join(os.environ['FREESURFER_HOME'], 'FreeSurferColorLUT.txt') if app.args.parcellation == 'fs_2005': mrtrix_lut_file = os.path.join(mrtrix_lut_file, 'fs_default.txt') else: mrtrix_lut_file = os.path.join(mrtrix_lut_file, 'fs_a2009s.txt') if app.args.parcellation == 'aal' or app.args.parcellation == 'aal2': mni152_path = os.path.join(fsl_path, 'data', 'standard', 'MNI152_T1_1mm.nii.gz') if not os.path.isfile(mni152_path): app.error( 'Could not find MNI152 template image within FSL installation (expected location: ' + mni152_path + ')') if app.args.parcellation == 'aal': parc_image_path = os.path.abspath( os.path.join(os.sep, 'opt', 'aal', 'ROI_MNI_V4.nii')) parc_lut_file = os.path.abspath( os.path.join(os.sep, 'opt', 'aal', 'ROI_MNI_V4.txt')) mrtrix_lut_file = os.path.join(mrtrix_lut_file, 'aal.txt') else: parc_image_path = os.path.abspath( os.path.join(os.sep, 'opt', 'aal', 'ROI_MNI_V5.nii')) parc_lut_file = os.path.abspath( os.path.join(os.sep, 'opt', 'aal', 'ROI_MNI_V5.txt')) mrtrix_lut_file = os.path.join(mrtrix_lut_file, 'aal2.txt') if parc_image_path and not os.path.isfile(parc_image_path): if app.args.atlas_path: parc_image_path = [ parc_image_path, os.path.join(os.path.dirname(app.args.atlas_path), os.path.basename(parc_image_path)) ] if os.path.isfile(parc_image_path[1]): parc_image_path = parc_image_path[1] else: app.error( 'Could not find parcellation image (tested locations: ' + str(parc_image_path) + ')') else: app.error( 'Could not find parcellation image (expected location: ' + parc_image_path + ')') if not os.path.isfile(parc_lut_file): if app.args.atlas_path: parc_lut_file = [ parc_lut_file, os.path.join(os.path.dirname(app.args.atlas_path), os.path.basename(parc_lut_file)) ] if os.path.isfile(parc_lut_file[1]): parc_lut_file = parc_lut_file[1] else: app.error( 'Could not find parcellation lookup table file (tested locations: ' + str(parc_lut_file) + ')') else: app.error( 'Could not find parcellation lookup table file (expected location: ' + parc_lut_file + ')') if not os.path.exists(mrtrix_lut_file): app.error( 'Could not find MRtrix3 connectome lookup table file (expected location: ' + mrtrix_lut_file + ')') app.makeTempDir() # Need to perform an initial import of JSON data using mrconvert; so let's grab the diffusion gradient table as well # If no bvec/bval present, need to go down the directory listing # Only try to import JSON file if it's actually present # direction in the acquisition they'll need to be split across multiple files # May need to concatenate more than one input DWI, since if there's more than one phase-encode direction # in the acquired DWIs (i.e. not just those used for estimating the inhomogeneity field), they will # need to be stored as separate NIfTI files in the 'dwi/' directory. dwi_image_list = glob.glob( os.path.join(bids_dir, label, 'dwi', label) + '*_dwi.nii*') dwi_index = 1 for entry in dwi_image_list: # os.path.split() falls over with .nii.gz extensions; only removes the .gz prefix = entry.split(os.extsep)[0] if os.path.isfile(prefix + '.bval') and os.path.isfile(prefix + '.bvec'): prefix = prefix + '.' else: prefix = os.path.join(bids_dir, 'dwi') if not (os.path.isfile(prefix + 'bval') and os.path.isfile(prefix + 'bvec')): app.error( 'Unable to locate valid diffusion gradient table for image \'' + entry + '\'') grad_import_option = ' -fslgrad ' + prefix + 'bvec ' + prefix + 'bval' json_path = prefix + 'json' if os.path.isfile(json_path): json_import_option = ' -json_import ' + json_path else: json_import_option = '' run.command('mrconvert ' + entry + grad_import_option + json_import_option + ' ' + path.toTemp('dwi' + str(dwi_index) + '.mif', True)) dwi_index += 1 # Go hunting for reversed phase-encode data dedicated to field map estimation fmap_image_list = [] fmap_dir = os.path.join(bids_dir, label, 'fmap') fmap_index = 1 if os.path.isdir(fmap_dir): if app.args.preprocessed: app.error('fmap/ directory detected for subject \'' + label + '\' despite use of ' + option_prefix + 'preprocessed option') fmap_image_list = glob.glob( os.path.join(fmap_dir, label) + '_dir-*_epi.nii*') for entry in fmap_image_list: prefix = entry.split(os.extsep)[0] json_path = prefix + '.json' with open(json_path, 'r') as f: json_elements = json.load(f) if 'IntendedFor' in json_elements and not any( i.endswith(json_elements['IntendedFor']) for i in dwi_image_list): app.console('Image \'' + entry + '\' is not intended for use with DWIs; skipping') continue if os.path.isfile(json_path): json_import_option = ' -json_import ' + json_path # fmap files will not come with any gradient encoding in the JSON; # therefore we need to add it manually ourselves so that mrcat / mrconvert can # appropriately handle the table once these images are concatenated with the DWIs fmap_image_size = image.Header(entry).size() fmap_image_num_volumes = 1 if len( fmap_image_size) == 3 else fmap_image_size[3] run.command('mrconvert ' + entry + json_import_option + ' -set_property dw_scheme \"' + '\\n'.join(['0,0,1,0'] * fmap_image_num_volumes) + '\" ' + path.toTemp('fmap' + str(fmap_index) + '.mif', True)) fmap_index += 1 else: app.warn('No corresponding .json file found for image \'' + entry + '\'; skipping') fmap_image_list = [ 'fmap' + str(index) + '.mif' for index in range(1, fmap_index) ] # If there's no data in fmap/ directory, need to check to see if there's any phase-encoding # contrast within the input DWI(s) elif len(dwi_image_list) < 2 and not app.args.preprocessed: app.error( 'Inadequate data for pre-processing of subject \'' + label + '\': No phase-encoding contrast in input DWIs or fmap/ directory') dwi_image_list = [ 'dwi' + str(index) + '.mif' for index in range(1, dwi_index) ] # Import anatomical image run.command('mrconvert ' + os.path.join(bids_dir, label, 'anat', label + '_T1w.nii.gz') + ' ' + path.toTemp('T1.mif', True)) cwd = os.getcwd() app.gotoTempDir() dwipreproc_se_epi = '' dwipreproc_se_epi_option = '' # For automated testing, down-sampled images are used. However, this invalidates the requirements of # both MP-PCA denoising and Gibbs ringing removal. In addition, eddy can still take a long time # despite the down-sampling. Therefore, provide images that have been pre-processed to the stage # where it is still only DWI, JSON & bvecs/bvals that need to be provided. if app.args.preprocessed: if len(dwi_image_list) > 1: app.error( 'If DWIs have been pre-processed, then only a single DWI file should need to be provided' ) app.console( 'Skipping MP-PCA denoising, ' + ('Gibbs ringing removal, ' if unring_cmd else '') + 'distortion correction and bias field correction due to use of ' + option_prefix + 'preprocessed option') run.function(os.rename, dwi_image_list[0], 'dwi.mif') else: # Do initial image pre-processing (denoising, Gibbs ringing removal if available, distortion correction & bias field correction) as normal # Concatenate any SE EPI images with the DWIs before denoising (& unringing), then # separate them again after the fact dwidenoise_input = 'dwidenoise_input.mif' fmap_num_volumes = 0 if fmap_image_list: run.command('mrcat ' + ' '.join(fmap_image_list) + ' fmap_cat.mif -axis 3') for i in fmap_image_list: file.delTemporary(i) fmap_num_volumes = image.Header('fmap_cat.mif').size()[3] dwidenoise_input = 'all_cat.mif' run.command('mrcat fmap_cat.mif ' + ' '.join(dwi_image_list) + ' ' + dwidenoise_input + ' -axis 3') file.delTemporary('fmap_cat.mif') else: # Even if no explicit fmap images, may still need to concatenate multiple DWI inputs if len(dwi_image_list) > 1: run.command('mrcat ' + ' '.join(dwi_image_list) + ' ' + dwidenoise_input + ' -axis 3') else: run.function(shutil.move, dwi_image_list[0], dwidenoise_input) for i in dwi_image_list: file.delTemporary(i) # Step 1: Denoise run.command('dwidenoise ' + dwidenoise_input + ' dwi_denoised.' + ('nii' if unring_cmd else 'mif')) if unring_cmd: run.command('mrinfo ' + dwidenoise_input + ' -json_keyval input.json') file.delTemporary(dwidenoise_input) # Step 2: Gibbs ringing removal (if available) if unring_cmd: run.command(unring_cmd + ' dwi_denoised.nii dwi_unring' + fsl_suffix + ' -n 100') file.delTemporary('dwi_denoised.nii') unring_output_path = fsl.findImage('dwi_unring') run.command('mrconvert ' + unring_output_path + ' dwi_unring.mif -json_import input.json') file.delTemporary(unring_output_path) file.delTemporary('input.json') # If fmap images and DWIs have been concatenated, now is the time to split them back apart dwipreproc_input = 'dwi_unring.mif' if unring_cmd else 'dwi_denoised.mif' if fmap_num_volumes: cat_input = 'dwi_unring.mif' if unring_cmd else 'dwi_denoised.mif' dwipreproc_se_epi = 'se_epi.mif' run.command('mrconvert ' + cat_input + ' ' + dwipreproc_se_epi + ' -coord 3 0:' + str(fmap_num_volumes - 1)) cat_num_volumes = image.Header(cat_input).size()[3] run.command('mrconvert ' + cat_input + ' dwipreproc_in.mif -coord 3 ' + str(fmap_num_volumes) + ':' + str(cat_num_volumes - 1)) file.delTemporary(dwipreproc_input) dwipreproc_input = 'dwipreproc_in.mif' dwipreproc_se_epi_option = ' -se_epi ' + dwipreproc_se_epi # Step 3: Distortion correction run.command('dwipreproc ' + dwipreproc_input + ' dwi_preprocessed.mif -rpe_header' + dwipreproc_se_epi_option) file.delTemporary(dwipreproc_input) if dwipreproc_se_epi: file.delTemporary(dwipreproc_se_epi) # Step 4: Bias field correction if dwibiascorrect_algo: run.command('dwibiascorrect dwi_preprocessed.mif dwi.mif ' + dwibiascorrect_algo) file.delTemporary('dwi_preprocessed.mif') else: run.function(shutil.move, 'dwi_preprocessed.mif', 'dwi.mif') # No longer branching based on whether or not -preprocessed was specified # Step 5: Generate a brain mask for DWI run.command('dwi2mask dwi.mif dwi_mask.mif') # Step 6: Perform brain extraction on the T1 image in its original space # (this is necessary for histogram matching prior to registration) # Use fsl_anat script run.command('mrconvert T1.mif T1.nii -stride -1,+2,+3') run.command(fslanat_cmd + ' -i T1.nii --noseg --nosubcortseg') run.command('mrconvert ' + fsl.findImage('T1.anat' + os.sep + 'T1_biascorr_brain_mask') + ' T1_mask.mif -datatype bit') run.command('mrconvert ' + fsl.findImage('T1.anat' + os.sep + 'T1_biascorr_brain') + ' T1_biascorr_brain.mif') file.delTemporary('T1.anat') # Step 7: Generate target images for T1->DWI registration run.command('dwiextract dwi.mif -bzero - | ' 'mrcalc - 0.0 -max - | ' 'mrmath - mean -axis 3 dwi_meanbzero.mif') run.command( 'mrcalc 1 dwi_meanbzero.mif -div dwi_mask.mif -mult - | ' 'mrhistmatch - T1_biascorr_brain.mif dwi_pseudoT1.mif -mask_input dwi_mask.mif -mask_target T1_mask.mif' ) run.command( 'mrcalc 1 T1_biascorr_brain.mif -div T1_mask.mif -mult - | ' 'mrhistmatch - dwi_meanbzero.mif T1_pseudobzero.mif -mask_input T1_mask.mif -mask_target dwi_mask.mif' ) # Step 8: Perform T1->DWI registration # Note that two registrations are performed: Even though we have a symmetric registration, # generation of the two histogram-matched images means that you will get slightly different # answers depending on which synthesized image & original image you use. run.command( 'mrregister T1_biascorr_brain.mif dwi_pseudoT1.mif -type rigid -mask1 T1_mask.mif -mask2 dwi_mask.mif -rigid rigid_T1_to_pseudoT1.txt' ) file.delTemporary('T1_biascorr_brain.mif') run.command( 'mrregister T1_pseudobzero.mif dwi_meanbzero.mif -type rigid -mask1 T1_mask.mif -mask2 dwi_mask.mif -rigid rigid_pseudobzero_to_bzero.txt' ) file.delTemporary('dwi_meanbzero.mif') run.command( 'transformcalc rigid_T1_to_pseudoT1.txt rigid_pseudobzero_to_bzero.txt average rigid_T1_to_dwi.txt' ) file.delTemporary('rigid_T1_to_pseudoT1.txt') file.delTemporary('rigid_pseudobzero_to_bzero.txt') run.command( 'mrtransform T1.mif T1_registered.mif -linear rigid_T1_to_dwi.txt') file.delTemporary('T1.mif') # Note: Since we're using a mask from fsl_anat (which crops the FoV), but using it as input to 5ttge fsl # (which is receiving the raw T1), we need to resample in order to have the same dimensions between these two run.command( 'mrtransform T1_mask.mif T1_mask_registered.mif -linear rigid_T1_to_dwi.txt -template T1_registered.mif -interp nearest' ) file.delTemporary('T1_mask.mif') # Step 9: Generate 5TT image for ACT run.command( '5ttgen fsl T1_registered.mif 5TT.mif -mask T1_mask_registered.mif') file.delTemporary('T1_mask_registered.mif') # Step 10: Estimate response functions for spherical deconvolution run.command( 'dwi2response dhollander dwi.mif response_wm.txt response_gm.txt response_csf.txt -mask dwi_mask.mif' ) # Step 11: Determine whether we are working with single-shell or multi-shell data shells = [ int(round(float(value))) for value in image.mrinfo('dwi.mif', 'shellvalues').strip().split() ] multishell = (len(shells) > 2) # Step 12: Perform spherical deconvolution # Use a dilated mask for spherical deconvolution as a 'safety margin' - # ACT should be responsible for stopping streamlines before they reach the edge of the DWI mask run.command('maskfilter dwi_mask.mif dilate dwi_mask_dilated.mif -npass 3') if multishell: run.command( 'dwi2fod msmt_csd dwi.mif response_wm.txt FOD_WM.mif response_gm.txt FOD_GM.mif response_csf.txt FOD_CSF.mif ' '-mask dwi_mask_dilated.mif -lmax 10,0,0') file.delTemporary('FOD_GM.mif') file.delTemporary('FOD_CSF.mif') else: # Still use the msmt_csd algorithm with single-shell data: Use hard non-negativity constraint # Also incorporate the CSF response to provide some fluid attenuation run.command( 'dwi2fod msmt_csd dwi.mif response_wm.txt FOD_WM.mif response_csf.txt FOD_CSF.mif ' '-mask dwi_mask_dilated.mif -lmax 10,0') file.delTemporary('FOD_CSF.mif') # Step 13: Generate the grey matter parcellation # The necessary steps here will vary significantly depending on the parcellation scheme selected run.command( 'mrconvert T1_registered.mif T1_registered.nii -stride +1,+2,+3') if app.args.parcellation == 'fs_2005' or app.args.parcellation == 'fs_2009': # Run FreeSurfer pipeline on this subject's T1 image run.command('recon-all -sd ' + app.tempDir + ' -subjid freesurfer -i T1_registered.nii') run.command('recon-all -sd ' + app.tempDir + ' -subjid freesurfer -all') # Grab the relevant parcellation image and target lookup table for conversion parc_image_path = os.path.join('freesurfer', 'mri') if app.args.parcellation == 'fs_2005': parc_image_path = os.path.join(parc_image_path, 'aparc+aseg.mgz') else: parc_image_path = os.path.join(parc_image_path, 'aparc.a2009s+aseg.mgz') # Perform the index conversion run.command('labelconvert ' + parc_image_path + ' ' + parc_lut_file + ' ' + mrtrix_lut_file + ' parc_init.mif') if app.cleanup: run.function(shutil.rmtree, 'freesurfer') # Fix the sub-cortical grey matter parcellations using FSL FIRST run.command('labelsgmfix parc_init.mif T1_registered.mif ' + mrtrix_lut_file + ' parc.mif') file.delTemporary('parc_init.mif') elif app.args.parcellation == 'aal' or app.args.parcellation == 'aal2': # Can use MNI152 image provided with FSL for registration run.command(flirt_cmd + ' -ref ' + mni152_path + ' -in T1_registered.nii -omat T1_to_MNI_FLIRT.mat -dof 12') run.command('transformconvert T1_to_MNI_FLIRT.mat T1_registered.nii ' + mni152_path + ' flirt_import T1_to_MNI_MRtrix.mat') file.delTemporary('T1_to_MNI_FLIRT.mat') run.command( 'transformcalc T1_to_MNI_MRtrix.mat invert MNI_to_T1_MRtrix.mat') file.delTemporary('T1_to_MNI_MRtrix.mat') run.command('mrtransform ' + parc_image_path + ' AAL.mif -linear MNI_to_T1_MRtrix.mat ' '-template T1_registered.mif -interp nearest') file.delTemporary('MNI_to_T1_MRtrix.mat') run.command('labelconvert AAL.mif ' + parc_lut_file + ' ' + mrtrix_lut_file + ' parc.mif') file.delTemporary('AAL.mif') else: app.error('Unknown parcellation scheme requested: ' + app.args.parcellation) file.delTemporary('T1_registered.nii') # Step 14: Generate the tractogram # If not manually specified, determine the appropriate number of streamlines based on the number of nodes in the parcellation: # mean edge weight of 1,000 streamlines # A smaller FOD amplitude threshold of 0.06 (default 0.1) is used for tracking due to the use of the msmt_csd # algorithm, which imposes a hard rather than soft non-negativity constraint num_nodes = int(image.statistic('parc.mif', 'max')) num_streamlines = 1000 * num_nodes * num_nodes if app.args.streamlines: num_streamlines = app.args.streamlines run.command( 'tckgen FOD_WM.mif tractogram.tck -act 5TT.mif -backtrack -crop_at_gmwmi -cutoff 0.06 -maxlength 250 -power 0.33 ' '-select ' + str(num_streamlines) + ' -seed_dynamic FOD_WM.mif') # Step 15: Use SIFT2 to determine streamline weights fd_scale_gm_option = '' if not multishell: fd_scale_gm_option = ' -fd_scale_gm' run.command( 'tcksift2 tractogram.tck FOD_WM.mif weights.csv -act 5TT.mif -out_mu mu.txt' + fd_scale_gm_option) # Step 16: Generate a TDI (to verify that SIFT2 has worked correctly) with open('mu.txt', 'r') as f: mu = float(f.read()) run.command( 'tckmap tractogram.tck -tck_weights_in weights.csv -template FOD_WM.mif -precise - | ' 'mrcalc - ' + str(mu) + ' -mult tdi.mif') # Step 17: Generate the connectome # Only provide the standard density-weighted connectome for now run.command( 'tck2connectome tractogram.tck parc.mif connectome.csv -tck_weights_in weights.csv' ) file.delTemporary('weights.csv') # Move necessary files to output directory run.function( shutil.copy, 'connectome.csv', os.path.join(output_dir, 'connectome', label + '_connectome.csv')) run.command('mrconvert dwi.mif ' + os.path.join(output_dir, 'dwi', label + '_dwi.nii.gz') + ' -export_grad_fsl ' + os.path.join(output_dir, 'dwi', label + '_dwi.bvec') + ' ' + os.path.join(output_dir, 'dwi', label + '_dwi.bval') + ' -json_export ' + os.path.join(output_dir, 'dwi', label + '_dwi.json')) run.command('mrconvert tdi.mif ' + os.path.join(output_dir, 'dwi', label + '_tdi.nii.gz')) run.function(shutil.copy, 'mu.txt', os.path.join(output_dir, 'connectome', label + '_mu.txt')) run.function(shutil.copy, 'response_wm.txt', os.path.join(output_dir, 'dwi', label + '_response.txt')) # Manually wipe and zero the temp directory (since we might be processing more than one subject) os.chdir(cwd) if app.cleanup: app.console('Deleting temporary directory ' + app.tempDir) # Can't use run.function() here; it'll try to write to the log file that resides in the temp directory just deleted shutil.rmtree(app.tempDir) else: app.console('Contents of temporary directory kept, location: ' + app.tempDir) app.tempDir = ''