예제 #1
0
def execute(): #pylint: disable=unused-variable
  shells = [ int(round(float(x))) for x in image.mrinfo('dwi.mif', 'shell_bvalues').split() ]

  # Get lmax information (if provided)
  lmax = [ ]
  if app.ARGS.lmax:
    lmax = [ int(x.strip()) for x in app.ARGS.lmax.split(',') ]
    if not len(lmax) == len(shells):
      raise MRtrixError('Number of manually-defined lmax\'s (' + str(len(lmax)) + ') does not match number of b-value shells (' + str(len(shells)) + ')')
    for shell_l in lmax:
      if shell_l % 2:
        raise MRtrixError('Values for lmax must be even')
      if shell_l < 0:
        raise MRtrixError('Values for lmax must be non-negative')

  # Do we have directions, or do we need to calculate them?
  if not os.path.exists('dirs.mif'):
    run.command('dwi2tensor dwi.mif - -mask in_voxels.mif | tensor2metric - -vector dirs.mif')

  # Get response function
  bvalues_option = ' -shells ' + ','.join(map(str,shells))
  lmax_option = ''
  if lmax:
    lmax_option = ' -lmax ' + ','.join(map(str,lmax))
  run.command('amp2response dwi.mif in_voxels.mif dirs.mif response.txt' + bvalues_option + lmax_option)

  run.function(shutil.copyfile, 'response.txt', path.from_user(app.ARGS.output, False))
  if app.ARGS.voxels:
    run.command('mrconvert in_voxels.mif ' + path.from_user(app.ARGS.voxels), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE)
예제 #2
0
def execute():  #pylint: disable=unused-variable
    # Generate the images related to each tissue
    run.command('mrconvert input.mif -coord 3 1 CSF.mif')
    run.command('mrconvert input.mif -coord 3 2 cGM.mif')
    run.command('mrconvert input.mif -coord 3 3 cWM.mif')
    run.command('mrconvert input.mif -coord 3 4 sGM.mif')

    # Combine WM and subcortical WM into a unique WM image
    run.command(
        'mrconvert input.mif - -coord 3 3,5 | mrmath - sum WM.mif -axis 3')

    # Create an empty lesion image
    run.command('mrcalc WM.mif 0 -mul lsn.mif')

    # Convert into the 5tt format
    run.command('mrcat cGM.mif sGM.mif WM.mif CSF.mif lsn.mif 5tt.mif -axis 3')

    if app.ARGS.nocrop:
        run.function(os.rename, '5tt.mif', 'result.mif')
    else:
        run.command(
            'mrmath 5tt.mif sum - -axis 3 | mrthreshold - - -abs 0.5 | mrgrid 5tt.mif crop result.mif -mask -'
        )

    run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output),
                mrconvert_keyval=path.from_user(app.ARGS.input, False),
                force=app.FORCE_OVERWRITE)
예제 #3
0
def execute():  #pylint: disable=unused-variable
    import shutil
    from mrtrix3 import app, image, path, run
    bvalues = [
        int(round(float(x)))
        for x in image.mrinfo('dwi.mif', 'shell_bvalues').split()
    ]
    if len(bvalues) < 2:
        app.error('Need at least 2 unique b-values (including b=0).')
    lmax_option = ''
    if app.args.lmax:
        lmax_option = ' -lmax ' + app.args.lmax
    if not app.args.mask:
        run.command('maskfilter mask.mif erode mask_eroded.mif -npass ' +
                    str(app.args.erode))
        mask_path = 'mask_eroded.mif'
    else:
        mask_path = 'mask.mif'
    run.command('dwi2tensor dwi.mif -mask ' + mask_path + ' tensor.mif')
    run.command(
        'tensor2metric tensor.mif -fa fa.mif -vector vector.mif -mask ' +
        mask_path)
    if app.args.threshold:
        run.command('mrthreshold fa.mif voxels.mif -abs ' +
                    str(app.args.threshold))
    else:
        run.command('mrthreshold fa.mif voxels.mif -top ' +
                    str(app.args.number))
    run.command(
        'dwiextract dwi.mif - -singleshell -no_bzero | amp2response - voxels.mif vector.mif response.txt'
        + lmax_option)

    run.function(shutil.copyfile, 'response.txt',
                 path.fromUser(app.args.output, False))
예제 #4
0
파일: manual.py 프로젝트: MRtrix3/mrtrix3
def execute(): #pylint: disable=unused-variable
  import os, shutil
  from mrtrix3 import app, image, path, run

  shells = [ int(round(float(x))) for x in image.mrinfo('dwi.mif', 'shell_bvalues').split() ]

  # Get lmax information (if provided)
  lmax = [ ]
  if app.args.lmax:
    lmax = [ int(x.strip()) for x in app.args.lmax.split(',') ]
    if not len(lmax) == len(shells):
      app.error('Number of manually-defined lmax\'s (' + str(len(lmax)) + ') does not match number of b-value shells (' + str(len(shells)) + ')')
    for l in lmax:
      if l%2:
        app.error('Values for lmax must be even')
      if l<0:
        app.error('Values for lmax must be non-negative')

  # Do we have directions, or do we need to calculate them?
  if not os.path.exists('dirs.mif'):
    run.command('dwi2tensor dwi.mif - -mask in_voxels.mif | tensor2metric - -vector dirs.mif')

  # Get response function
  bvalues_option = ' -shells ' + ','.join(map(str,shells))
  lmax_option = ''
  if lmax:
    lmax_option = ' -lmax ' + ','.join(map(str,lmax))
  run.command('amp2response dwi.mif in_voxels.mif dirs.mif response.txt' + bvalues_option + lmax_option)

  run.function(shutil.copyfile, 'response.txt', path.fromUser(app.args.output, False))
  run.function(shutil.copyfile, 'in_voxels.mif', 'voxels.mif')
예제 #5
0
def getInputs():
    import os, shutil
    from mrtrix3 import app, path, run
    run.command('mrconvert ' + path.fromUser(app.args.input, True) + ' ' +
                path.toTemp('input.mif', True))
    if app.args.lut:
        run.function(shutil.copyfile, path.fromUser(app.args.lut, False),
                     path.toTemp('LUT.txt', False))
예제 #6
0
파일: manual.py 프로젝트: ssheybani/mrtrix3
def execute():
    import os, shutil
    from mrtrix3 import app, image, path, run

    shells = [
        int(round(float(x)))
        for x in image.headerField('dwi.mif', 'shells').split()
    ]

    # Get lmax information (if provided)
    lmax = []
    if app.args.lmax:
        lmax = [int(x.strip()) for x in app.args.lmax.split(',')]
        if not len(lmax) == len(shells):
            app.error('Number of manually-defined lmax\'s (' + str(len(lmax)) +
                      ') does not match number of b-value shells (' +
                      str(len(shells)) + ')')
        for l in lmax:
            if l % 2:
                app.error('Values for lmax must be even')
            if l < 0:
                app.error('Values for lmax must be non-negative')

    # Do we have directions, or do we need to calculate them?
    if not os.path.exists('dirs.mif'):
        run.command(
            'dwi2tensor dwi.mif - -mask in_voxels.mif | tensor2metric - -vector dirs.mif'
        )

    # Get response function
    bvalues_option = ' -shell ' + ','.join(map(str, shells))
    lmax_option = ''
    if lmax:
        lmax_option = ' -lmax ' + ','.join(map(str, lmax))
    run.command('amp2response dwi.mif in_voxels.mif dirs.mif response.txt' +
                bvalues_option + lmax_option)

    run.function(shutil.copyfile, 'response.txt',
                 path.fromUser(app.args.output, False))
    run.function(shutil.copyfile, 'in_voxels.mif', 'voxels.mif')
예제 #7
0
파일: fa.py 프로젝트: bcdarwin/mrtrix3
def execute(): #pylint: disable=unused-variable
  bvalues = [ int(round(float(x))) for x in image.mrinfo('dwi.mif', 'shell_bvalues').split() ]
  if len(bvalues) < 2:
    raise MRtrixError('Need at least 2 unique b-values (including b=0).')
  lmax_option = ''
  if app.ARGS.lmax:
    lmax_option = ' -lmax ' + app.ARGS.lmax
  if not app.ARGS.mask:
    run.command('maskfilter mask.mif erode mask_eroded.mif -npass ' + str(app.ARGS.erode))
    mask_path = 'mask_eroded.mif'
  else:
    mask_path = 'mask.mif'
  run.command('dwi2tensor dwi.mif -mask ' + mask_path + ' tensor.mif')
  run.command('tensor2metric tensor.mif -fa fa.mif -vector vector.mif -mask ' + mask_path)
  if app.ARGS.threshold:
    run.command('mrthreshold fa.mif voxels.mif -abs ' + str(app.ARGS.threshold))
  else:
    run.command('mrthreshold fa.mif voxels.mif -top ' + str(app.ARGS.number))
  run.command('dwiextract dwi.mif - -singleshell -no_bzero | amp2response - voxels.mif vector.mif response.txt' + lmax_option)

  run.function(shutil.copyfile, 'response.txt', path.from_user(app.ARGS.output, False))
  if app.ARGS.voxels:
    run.command('mrconvert voxels.mif ' + path.from_user(app.ARGS.voxels), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE)
예제 #8
0
파일: fa.py 프로젝트: MRtrix3/mrtrix3
def execute(): #pylint: disable=unused-variable
  import shutil
  from mrtrix3 import app, image, path, run
  bvalues = [ int(round(float(x))) for x in image.mrinfo('dwi.mif', 'shell_bvalues').split() ]
  if len(bvalues) < 2:
    app.error('Need at least 2 unique b-values (including b=0).')
  lmax_option = ''
  if app.args.lmax:
    lmax_option = ' -lmax ' + app.args.lmax
  if not app.args.mask:
    run.command('maskfilter mask.mif erode mask_eroded.mif -npass ' + str(app.args.erode))
    mask_path = 'mask_eroded.mif'
  else:
    mask_path = 'mask.mif'
  run.command('dwi2tensor dwi.mif -mask ' + mask_path + ' tensor.mif')
  run.command('tensor2metric tensor.mif -fa fa.mif -vector vector.mif -mask ' + mask_path)
  if app.args.threshold:
    run.command('mrthreshold fa.mif voxels.mif -abs ' + str(app.args.threshold))
  else:
    run.command('mrthreshold fa.mif voxels.mif -top ' + str(app.args.number))
  run.command('dwiextract dwi.mif - -singleshell -no_bzero | amp2response - voxels.mif vector.mif response.txt' + lmax_option)

  run.function(shutil.copyfile, 'response.txt', path.fromUser(app.args.output, False))
              ') does not match input image (' + str(num_volumes) +
              ' volumes); check your input data')

if app.args.extent:
    extent = app.args.extent
else:
    extent = '5,5,5'

run.command('mrconvert dwi.mif working.mif')

# denoising
if app.args.denoise:
    print("...Beginning denoising")
    run.command('dwidenoise -extent ' + extent +
                ' -noise fullnoisemap.mif working.mif dwidn.mif')
    run.function(os.remove, 'working.mif')
    run.command('mrconvert dwidn.mif working.mif')

# gibbs artifact correction
if app.args.degibbs:
    print("...Beginning degibbsing")
    run.command('mrdegibbs -nshifts 20 -minW 1 -maxW 3 working.mif dwigc.mif')
    run.function(os.remove, 'working.mif')
    run.command('mrconvert dwigc.mif working.mif')

# pre-eddy alignment for multiple input series
if app.args.prealign:
    if len(DWInlist) != 1:
        miflist = []
        for idx, i in enumerate(DWInlist):
            run.command('mrconvert -coord 3 ' + idxlist[idx] +
예제 #10
0
def runGroup(output_dir):

    # Check presence of all required input files before proceeding
    # Pre-calculate paths of all files since many will be used in more than one location
    class subjectPaths(object):
        def __init__(self, label):
            self.in_dwi = os.path.join(output_dir, label, 'dwi',
                                       label + '_dwi.nii.gz')
            self.in_bvec = os.path.join(output_dir, label, 'dwi',
                                        label + '_dwi.bvec')
            self.in_bval = os.path.join(output_dir, label, 'dwi',
                                        label + '_dwi.bval')
            self.in_json = os.path.join(output_dir, label, 'dwi',
                                        label + '_dwi.json')
            self.in_rf = os.path.join(output_dir, label, 'dwi',
                                      label + '_response.txt')
            self.in_connectome = os.path.join(output_dir, label, 'connectome',
                                              label + '_connectome.csv')
            self.in_mu = os.path.join(output_dir, label, 'connectome',
                                      label + '_mu.txt')

            for entry in vars(self).values():
                if not os.path.exists(entry):
                    app.error(
                        'Unable to find critical subject data (expected location: '
                        + entry + ')')

            with open(self.in_mu, 'r') as f:
                self.mu = float(f.read())

            self.RF = []
            with open(self.in_rf, 'r') as f:
                for line in f:
                    self.RF.append([float(v) for v in line.split()])

            self.temp_mask = os.path.join('masks', label + '.mif')
            self.temp_fa = os.path.join('images', label + '.mif')
            self.temp_bzero = os.path.join('bzeros', label + '.mif')
            self.temp_warp = os.path.join('warps', label + '.mif')
            self.temp_voxels = os.path.join('voxels', label + '.mif')
            self.median_bzero = 0.0
            self.dwiintensitynorm_factor = 1.0
            self.RF_multiplier = 1.0
            self.global_multiplier = 1.0
            self.temp_connectome = os.path.join('connectomes', label + '.csv')
            self.out_scale_bzero = os.path.join(
                output_dir, label, 'connectome',
                label + '_scalefactor_bzero.csv')
            self.out_scale_RF = os.path.join(
                output_dir, label, 'connectome',
                label + '_scalefactor_response.csv')
            self.out_connectome = os.path.join(
                output_dir, label, 'connectome',
                label + '_connectome_scaled.csv')

            self.label = label

    subject_list = [
        'sub-' + sub_dir.split("-")[-1]
        for sub_dir in glob.glob(os.path.join(output_dir, 'sub-*'))
    ]
    if not subject_list:
        app.error(
            'No processed subject data found in output directory for group analysis'
        )
    subjects = []
    for label in subject_list:
        subjects.append(subjectPaths(label))

    app.makeTempDir()
    app.gotoTempDir()

    # First pass through subject data in group analysis:
    #   - Grab DWI data (written back from single-subject analysis back into BIDS format)
    #   - Generate mask and FA images to be used in populate template generation
    #   - Generate mean b=0 image for each subject for later use
    progress = app.progressBar('Importing and preparing subject data',
                               len(subjects))
    run.function(os.makedirs, 'bzeros')
    run.function(os.makedirs, 'images')
    run.function(os.makedirs, 'masks')
    for s in subjects:
        grad_import_option = ' -fslgrad ' + s.in_bvec + ' ' + s.in_bval
        run.command('dwi2mask ' + s.in_dwi + ' ' + s.temp_mask +
                    grad_import_option)
        run.command('dwi2tensor ' + s.in_dwi + ' - -mask ' + s.temp_mask +
                    grad_import_option + ' | tensor2metric - -fa ' + s.temp_fa)
        run.command('dwiextract ' + s.in_dwi + grad_import_option +
                    ' - -bzero | mrmath - mean ' + s.temp_bzero + ' -axis 3')
        progress.increment()
    progress.done()

    # First group-level calculation: Generate the population FA template
    app.console(
        'Generating population template for inter-subject intensity normalisation WM mask derivation'
    )
    run.command(
        'population_template images -mask_dir masks -warp_dir warps template.mif '
        '-type rigid_affine_nonlinear -rigid_scale 0.25,0.5,0.8,1.0 -affine_scale 0.7,0.8,1.0,1.0 '
        '-nl_scale 0.5,0.75,1.0,1.0,1.0 -nl_niter 5,5,5,5,5 -linear_no_pause')
    file.delTemporary('images')
    file.delTemporary('masks')

    # Second pass through subject data in group analysis:
    #   - Warp template FA image back to subject space & threshold to define a WM mask in subject space
    #   - Calculate the median subject b=0 value within this mask
    #   - Store this in a file, and contribute to calculation of the mean of these values across subjects
    #   - Contribute to the group average response function
    progress = app.progressBar(
        'Generating group-average response function and intensity normalisation factors',
        len(subjects) + 1)
    run.function(os.makedirs, 'voxels')
    sum_median_bzero = 0.0
    sum_RF = []
    for s in subjects:
        run.command('mrtransform template.mif -warp_full ' + s.temp_warp +
                    ' - -from 2 -template ' + s.temp_bzero + ' | '
                    'mrthreshold - ' + s.temp_voxels + ' -abs 0.4')
        s.median_bzero = float(
            image.statistic(s.temp_bzero, 'median', '-mask ' + s.temp_voxels))
        file.delTemporary(s.temp_bzero)
        file.delTemporary(s.temp_voxels)
        file.delTemporary(s.temp_warp)
        sum_median_bzero += s.median_bzero
        if sum_RF:
            sum_RF = [[a + b for a, b in zip(one, two)]
                      for one, two in zip(sum_RF, s.RF)]
        else:
            sum_RF = s.RF
        progress.increment()
    file.delTemporary('bzeros')
    file.delTemporary('voxels')
    file.delTemporary('warps')
    progress.done()

    # Second group-level calculation:
    #   - Calculate the mean of median b=0 values
    #   - Calculate the mean response function, and extract the l=0 values from it
    mean_median_bzero = sum_median_bzero / len(subjects)
    mean_RF = [[v / len(subjects) for v in line] for line in sum_RF]
    mean_RF_lzero = [line[0] for line in mean_RF]

    # Third pass through subject data in group analysis:
    #   - Scale the connectome strengths:
    #     - Multiply by SIFT proportionality coefficient mu
    #     - Multiply by (mean median b=0) / (subject median b=0)
    #     - Multiply by (subject RF size) / (mean RF size)
    #         (needs to account for multi-shell data)
    #   - Write the result to file
    progress = app.progressBar(
        'Applying normalisation scaling to subject connectomes', len(subjects))
    run.function(os.makedirs, 'connectomes')
    for s in subjects:
        RF_lzero = [line[0] for line in s.RF]
        s.RF_multiplier = 1.0
        for (mean, subj) in zip(mean_RF_lzero, RF_lzero):
            s.RF_multiplier = s.RF_multiplier * subj / mean
        # Don't want to be scaling connectome independently for differences in RF l=0 terms across all shells;
        #   use the geometric mean of the per-shell scale factors
        s.RF_multiplier = math.pow(s.RF_multiplier, 1.0 / len(mean_RF_lzero))

        s.bzero_multiplier = mean_median_bzero / s.median_bzero

        s.global_multiplier = s.mu * s.bzero_multiplier * s.RF_multiplier

        connectome = []
        with open(s.in_connectome, 'r') as f:
            for line in f:
                connectome.append([float(v) for v in line.split()])
        with open(s.temp_connectome, 'w') as f:
            for line in connectome:
                f.write(' '.join([str(v * s.global_multiplier)
                                  for v in line]) + '\n')
        progress.increment()
    progress.done()

    # Third group-level calculation: Generate the group mean connectome
    # For any higher-level analysis (e.g. NBSE, computing connectome global measures, etc.),
    #   trying to incorporate such analysis into this particular pipeline script is likely to
    #   overly complicate the interface, and not actually provide much in terms of
    #   convenience / reproducibility guarantees. The primary functionality of this group-level
    #   analysis is therefore to achieve inter-subject connection density normalisation; users
    #   then have the flexibility to subsequently analyse the data however they choose (ideally
    #   based on subject classification data provided with the BIDS-compliant dataset).
    progress = app.progressBar('Calculating group mean connectome',
                               len(subjects) + 1)
    mean_connectome = []
    for s in subjects:
        connectome = []
        with open(s.temp_connectome, 'r') as f:
            for line in f:
                connectome.append([float(v) for v in line.split()])
        if mean_connectome:
            mean_connectome = [[c1 + c2 for c1, c2 in zip(r1, r2)]
                               for r1, r2 in zip(mean_connectome, connectome)]
        else:
            mean_connectome = connectome
        progress.increment()

    mean_connectome = [[v / len(subjects) for v in row]
                       for row in mean_connectome]
    progress.done()

    # Write results of interest back to the output directory;
    #   both per-subject and group information
    progress = app.progressBar('Writing results to output directory',
                               len(subjects) + 2)
    for s in subjects:
        run.function(shutil.copyfile, s.temp_connectome, s.out_connectome)
        with open(s.out_scale_bzero, 'w') as f:
            f.write(str(s.bzero_multiplier))
        with open(s.out_scale_RF, 'w') as f:
            f.write(str(s.RF_multiplier))
        progress.increment()

    with open(os.path.join(output_dir, 'mean_response.txt'), 'w') as f:
        for row in mean_RF:
            f.write(' '.join([str(v) for v in row]) + '\n')
    progress.increment()
    with open(os.path.join(output_dir, 'mean_connectome.csv'), 'w') as f:
        for row in mean_connectome:
            f.write(' '.join([str(v) for v in row]) + '\n')
    progress.done()
예제 #11
0
파일: tournier.py 프로젝트: MRtrix3/mrtrix3
def execute(): #pylint: disable=unused-variable
  import os, shutil
  from mrtrix3 import app, file, image, path, run #pylint: disable=redefined-builtin

  lmax_option = ''
  if app.args.lmax:
    lmax_option = ' -lmax ' + app.args.lmax

  if app.args.max_iters < 2:
    app.error('Number of iterations must be at least 2')

  for iteration in range(0, app.args.max_iters):
    prefix = 'iter' + str(iteration) + '_'

    if iteration == 0:
      RF_in_path = 'init_RF.txt'
      mask_in_path = 'mask.mif'
      init_RF = '1 -1 1'
      with open(RF_in_path, 'w') as f:
        f.write(init_RF)
      iter_lmax_option = ' -lmax 4'
    else:
      RF_in_path = 'iter' + str(iteration-1) + '_RF.txt'
      mask_in_path = 'iter' + str(iteration-1) + '_SF_dilated.mif'
      iter_lmax_option = lmax_option

    # Run CSD
    run.command('dwi2fod csd dwi.mif ' + RF_in_path + ' ' + prefix + 'FOD.mif -mask ' + mask_in_path + iter_lmax_option)
    # Get amplitudes of two largest peaks, and direction of largest
    run.command('fod2fixel ' + prefix + 'FOD.mif ' + prefix + 'fixel -peak peaks.mif -mask ' + mask_in_path + ' -fmls_no_thresholds')
    file.delTemporary(prefix + 'FOD.mif')
    if iteration:
      file.delTemporary(mask_in_path)
    run.command('fixel2voxel ' + prefix + 'fixel/peaks.mif split_data ' + prefix + 'amps.mif -number 2')
    run.command('mrconvert ' + prefix + 'amps.mif ' + prefix + 'first_peaks.mif -coord 3 0 -axes 0,1,2')
    run.command('mrconvert ' + prefix + 'amps.mif ' + prefix + 'second_peaks.mif -coord 3 1 -axes 0,1,2')
    file.delTemporary(prefix + 'amps.mif')
    run.command('fixel2voxel ' + prefix + 'fixel/directions.mif split_dir ' + prefix + 'all_dirs.mif -number 1')
    file.delTemporary(prefix + 'fixel')
    run.command('mrconvert ' + prefix + 'all_dirs.mif ' + prefix + 'first_dir.mif -coord 3 0:2')
    file.delTemporary(prefix + 'all_dirs.mif')
    # Calculate the 'cost function' Donald derived for selecting single-fibre voxels
    # https://github.com/MRtrix3/mrtrix3/pull/426
    #  sqrt(|peak1|) * (1 - |peak2| / |peak1|)^2
    run.command('mrcalc ' + prefix + 'first_peaks.mif -sqrt 1 ' + prefix + 'second_peaks.mif ' + prefix + 'first_peaks.mif -div -sub 2 -pow -mult '+ prefix + 'CF.mif')
    file.delTemporary(prefix + 'first_peaks.mif')
    file.delTemporary(prefix + 'second_peaks.mif')
    # Select the top-ranked voxels
    run.command('mrthreshold ' + prefix + 'CF.mif -top ' + str(app.args.sf_voxels) + ' ' + prefix + 'SF.mif')
    # Generate a new response function based on this selection
    run.command('amp2response dwi.mif ' + prefix + 'SF.mif ' + prefix + 'first_dir.mif ' + prefix + 'RF.txt' + iter_lmax_option)
    file.delTemporary(prefix + 'first_dir.mif')
    # Should we terminate?
    if iteration > 0:
      run.command('mrcalc ' + prefix + 'SF.mif iter' + str(iteration-1) + '_SF.mif -sub ' + prefix + 'SF_diff.mif')
      file.delTemporary('iter' + str(iteration-1) + '_SF.mif')
      max_diff = image.statistic(prefix + 'SF_diff.mif', 'max')
      file.delTemporary(prefix + 'SF_diff.mif')
      if int(max_diff) == 0:
        app.console('Convergence of SF voxel selection detected at iteration ' + str(iteration))
        file.delTemporary(prefix + 'CF.mif')
        run.function(shutil.copyfile, prefix + 'RF.txt', 'response.txt')
        run.function(shutil.move, prefix + 'SF.mif', 'voxels.mif')
        break

    # Select a greater number of top single-fibre voxels, and dilate (within bounds of initial mask);
    #   these are the voxels that will be re-tested in the next iteration
    run.command('mrthreshold ' + prefix + 'CF.mif -top ' + str(app.args.iter_voxels) + ' - | maskfilter - dilate - -npass ' + str(app.args.dilate) + ' | mrcalc mask.mif - -mult ' + prefix + 'SF_dilated.mif')
    file.delTemporary(prefix + 'CF.mif')

  # Commence the next iteration

  # If terminating due to running out of iterations, still need to put the results in the appropriate location
  if not os.path.exists('response.txt'):
    app.console('Exiting after maximum ' + str(app.args.max_iters) + ' iterations')
    run.function(shutil.copyfile, 'iter' + str(app.args.max_iters-1) + '_RF.txt', 'response.txt')
    run.function(shutil.move, 'iter' + str(app.args.max_iters-1) + '_SF.mif', 'voxels.mif')

  run.function(shutil.copyfile, 'response.txt', path.fromUser(app.args.output, False))
예제 #12
0
def getInputs(): #pylint: disable=unused-variable
  import shutil
  from mrtrix3 import app, path, run
  run.command('mrconvert ' + path.fromUser(app.args.input, True) + ' ' + path.toTemp('input.mif', True))
  if app.args.lut:
    run.function(shutil.copyfile, path.fromUser(app.args.lut, False), path.toTemp('LUT.txt', False))
예제 #13
0
def execute():  #pylint: disable=unused-variable
    import os, shutil
    from mrtrix3 import app, file, image, path, run  #pylint: disable=redefined-builtin

    lmax_option = ''
    if app.args.lmax:
        lmax_option = ' -lmax ' + app.args.lmax

    if app.args.max_iters < 2:
        app.error('Number of iterations must be at least 2')

    for iteration in range(0, app.args.max_iters):
        prefix = 'iter' + str(iteration) + '_'

        if iteration == 0:
            RF_in_path = 'init_RF.txt'
            mask_in_path = 'mask.mif'
            init_RF = '1 -1 1'
            with open(RF_in_path, 'w') as f:
                f.write(init_RF)
            iter_lmax_option = ' -lmax 4'
        else:
            RF_in_path = 'iter' + str(iteration - 1) + '_RF.txt'
            mask_in_path = 'iter' + str(iteration - 1) + '_SF_dilated.mif'
            iter_lmax_option = lmax_option

        # Run CSD
        run.command('dwi2fod csd dwi.mif ' + RF_in_path + ' ' + prefix +
                    'FOD.mif -mask ' + mask_in_path + iter_lmax_option)
        # Get amplitudes of two largest peaks, and direction of largest
        run.command('fod2fixel ' + prefix + 'FOD.mif ' + prefix +
                    'fixel -peak peaks.mif -mask ' + mask_in_path +
                    ' -fmls_no_thresholds')
        file.delTemporary(prefix + 'FOD.mif')
        if iteration:
            file.delTemporary(mask_in_path)
        run.command('fixel2voxel ' + prefix + 'fixel/peaks.mif split_data ' +
                    prefix + 'amps.mif -number 2')
        run.command('mrconvert ' + prefix + 'amps.mif ' + prefix +
                    'first_peaks.mif -coord 3 0 -axes 0,1,2')
        run.command('mrconvert ' + prefix + 'amps.mif ' + prefix +
                    'second_peaks.mif -coord 3 1 -axes 0,1,2')
        file.delTemporary(prefix + 'amps.mif')
        run.command('fixel2voxel ' + prefix +
                    'fixel/directions.mif split_dir ' + prefix +
                    'all_dirs.mif -number 1')
        file.delTemporary(prefix + 'fixel')
        run.command('mrconvert ' + prefix + 'all_dirs.mif ' + prefix +
                    'first_dir.mif -coord 3 0:2')
        file.delTemporary(prefix + 'all_dirs.mif')
        # Calculate the 'cost function' Donald derived for selecting single-fibre voxels
        # https://github.com/MRtrix3/mrtrix3/pull/426
        #  sqrt(|peak1|) * (1 - |peak2| / |peak1|)^2
        run.command('mrcalc ' + prefix + 'first_peaks.mif -sqrt 1 ' + prefix +
                    'second_peaks.mif ' + prefix +
                    'first_peaks.mif -div -sub 2 -pow -mult ' + prefix +
                    'CF.mif')
        file.delTemporary(prefix + 'first_peaks.mif')
        file.delTemporary(prefix + 'second_peaks.mif')
        # Select the top-ranked voxels
        run.command('mrthreshold ' + prefix + 'CF.mif -top ' +
                    str(app.args.sf_voxels) + ' ' + prefix + 'SF.mif')
        # Generate a new response function based on this selection
        run.command('amp2response dwi.mif ' + prefix + 'SF.mif ' + prefix +
                    'first_dir.mif ' + prefix + 'RF.txt' + iter_lmax_option)
        file.delTemporary(prefix + 'first_dir.mif')
        # Should we terminate?
        if iteration > 0:
            run.command('mrcalc ' + prefix + 'SF.mif iter' +
                        str(iteration - 1) + '_SF.mif -sub ' + prefix +
                        'SF_diff.mif')
            file.delTemporary('iter' + str(iteration - 1) + '_SF.mif')
            max_diff = image.statistic(prefix + 'SF_diff.mif', 'max')
            file.delTemporary(prefix + 'SF_diff.mif')
            if int(max_diff) == 0:
                app.console(
                    'Convergence of SF voxel selection detected at iteration '
                    + str(iteration))
                file.delTemporary(prefix + 'CF.mif')
                run.function(shutil.copyfile, prefix + 'RF.txt',
                             'response.txt')
                run.function(shutil.move, prefix + 'SF.mif', 'voxels.mif')
                break

        # Select a greater number of top single-fibre voxels, and dilate (within bounds of initial mask);
        #   these are the voxels that will be re-tested in the next iteration
        run.command('mrthreshold ' + prefix + 'CF.mif -top ' +
                    str(app.args.iter_voxels) +
                    ' - | maskfilter - dilate - -npass ' +
                    str(app.args.dilate) + ' | mrcalc mask.mif - -mult ' +
                    prefix + 'SF_dilated.mif')
        file.delTemporary(prefix + 'CF.mif')

    # Commence the next iteration

    # If terminating due to running out of iterations, still need to put the results in the appropriate location
    if not os.path.exists('response.txt'):
        app.console('Exiting after maximum ' + str(app.args.max_iters) +
                    ' iterations')
        run.function(shutil.copyfile,
                     'iter' + str(app.args.max_iters - 1) + '_RF.txt',
                     'response.txt')
        run.function(shutil.move,
                     'iter' + str(app.args.max_iters - 1) + '_SF.mif',
                     'voxels.mif')

    run.function(shutil.copyfile, 'response.txt',
                 path.fromUser(app.args.output, False))
예제 #14
0
 def function(self, func, *args, **kwargs):
     from mrtrix3 import run  #pylint: disable=import-outside-toplevel
     assert self.valid
     run.function(func, *args, **kwargs)
     self._increment()
예제 #15
0
def execute(): #pylint: disable=unused-variable
  # Ideally want to use the oversampling-based regridding of the 5TT image from the SIFT model, not mrtransform
  # May need to commit 5ttregrid...

  # Verify input 5tt image
  verification_text = ''
  try:
    verification_text = run.command('5ttcheck 5tt.mif').stderr
  except run.MRtrixCmdError as except_5ttcheck:
    verification_text = except_5ttcheck.stderr
  if 'WARNING' in verification_text or 'ERROR' in verification_text:
    app.warn('Command 5ttcheck indicates problems with provided input 5TT image \'' + app.ARGS.in_5tt + '\':')
    for line in verification_text.splitlines():
      app.warn(line)
    app.warn('These may or may not interfere with the dwi2response msmt_5tt script')

  # Get shell information
  shells = [ int(round(float(x))) for x in image.mrinfo('dwi.mif', 'shell_bvalues').split() ]
  if len(shells) < 3:
    app.warn('Less than three b-values; response functions will not be applicable in resolving three tissues using MSMT-CSD algorithm')

  # Get lmax information (if provided)
  wm_lmax = [ ]
  if app.ARGS.lmax:
    wm_lmax = [ int(x.strip()) for x in app.ARGS.lmax.split(',') ]
    if not len(wm_lmax) == len(shells):
      raise MRtrixError('Number of manually-defined lmax\'s (' + str(len(wm_lmax)) + ') does not match number of b-values (' + str(len(shells)) + ')')
    for shell_l in wm_lmax:
      if shell_l % 2:
        raise MRtrixError('Values for lmax must be even')
      if shell_l < 0:
        raise MRtrixError('Values for lmax must be non-negative')

  run.command('dwi2tensor dwi.mif - -mask mask.mif | tensor2metric - -fa fa.mif -vector vector.mif')
  if not os.path.exists('dirs.mif'):
    run.function(shutil.copy, 'vector.mif', 'dirs.mif')
  run.command('mrtransform 5tt.mif 5tt_regrid.mif -template fa.mif -interp linear')

  # Basic tissue masks
  run.command('mrconvert 5tt_regrid.mif - -coord 3 2 -axes 0,1,2 | mrcalc - ' + str(app.ARGS.pvf) + ' -gt mask.mif -mult wm_mask.mif')
  run.command('mrconvert 5tt_regrid.mif - -coord 3 0 -axes 0,1,2 | mrcalc - ' + str(app.ARGS.pvf) + ' -gt fa.mif ' + str(app.ARGS.fa) + ' -lt -mult mask.mif -mult gm_mask.mif')
  run.command('mrconvert 5tt_regrid.mif - -coord 3 3 -axes 0,1,2 | mrcalc - ' + str(app.ARGS.pvf) + ' -gt fa.mif ' + str(app.ARGS.fa) + ' -lt -mult mask.mif -mult csf_mask.mif')

  # Revise WM mask to only include single-fibre voxels
  recursive_cleanup_option=''
  if not app.DO_CLEANUP:
    recursive_cleanup_option = ' -nocleanup'
  if not app.ARGS.sfwm_fa_threshold:
    app.console('Selecting WM single-fibre voxels using \'' + app.ARGS.wm_algo + '\' algorithm')
    run.command('dwi2response ' + app.ARGS.wm_algo + ' dwi.mif wm_ss_response.txt -mask wm_mask.mif -voxels wm_sf_mask.mif -scratch ' + path.quote(app.SCRATCH_DIR) + recursive_cleanup_option)
  else:
    app.console('Selecting WM single-fibre voxels using \'fa\' algorithm with a hard FA threshold of ' + str(app.ARGS.sfwm_fa_threshold))
    run.command('dwi2response fa dwi.mif wm_ss_response.txt -mask wm_mask.mif -threshold ' + str(app.ARGS.sfwm_fa_threshold) + ' -voxels wm_sf_mask.mif -scratch ' + path.quote(app.SCRATCH_DIR) + recursive_cleanup_option)

  # Check for empty masks
  wm_voxels  = image.statistics('wm_sf_mask.mif', mask='wm_sf_mask.mif').count
  gm_voxels  = image.statistics('gm_mask.mif',    mask='gm_mask.mif').count
  csf_voxels = image.statistics('csf_mask.mif',   mask='csf_mask.mif').count
  empty_masks = [ ]
  if not wm_voxels:
    empty_masks.append('WM')
  if not gm_voxels:
    empty_masks.append('GM')
  if not csf_voxels:
    empty_masks.append('CSF')
  if empty_masks:
    message = ','.join(empty_masks)
    message += ' tissue mask'
    if len(empty_masks) > 1:
      message += 's'
    message += ' empty; cannot estimate response function'
    if len(empty_masks) > 1:
      message += 's'
    raise MRtrixError(message)

  # For each of the three tissues, generate a multi-shell response
  bvalues_option = ' -shells ' + ','.join(map(str,shells))
  sfwm_lmax_option = ''
  if wm_lmax:
    sfwm_lmax_option = ' -lmax ' + ','.join(map(str,wm_lmax))
  run.command('amp2response dwi.mif wm_sf_mask.mif dirs.mif wm.txt' + bvalues_option + sfwm_lmax_option)
  run.command('amp2response dwi.mif gm_mask.mif dirs.mif gm.txt' + bvalues_option + ' -isotropic')
  run.command('amp2response dwi.mif csf_mask.mif dirs.mif csf.txt' + bvalues_option + ' -isotropic')
  run.function(shutil.copyfile, 'wm.txt',  path.from_user(app.ARGS.out_wm,  False))
  run.function(shutil.copyfile, 'gm.txt',  path.from_user(app.ARGS.out_gm,  False))
  run.function(shutil.copyfile, 'csf.txt', path.from_user(app.ARGS.out_csf, False))

  # Generate output 4D binary image with voxel selections; RGB as in MSMT-CSD paper
  run.command('mrcat csf_mask.mif gm_mask.mif wm_sf_mask.mif voxels.mif -axis 3')
  if app.ARGS.voxels:
    run.command('mrconvert voxels.mif ' + path.from_user(app.ARGS.voxels), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE)
예제 #16
0
def execute(): #pylint: disable=unused-variable

  subject_dir = os.path.abspath(path.from_user(app.ARGS.input, False))
  if not os.path.isdir(subject_dir):
    raise MRtrixError('Input to hsvs algorithm must be a directory')
  surf_dir = os.path.join(subject_dir, 'surf')
  mri_dir = os.path.join(subject_dir, 'mri')
  check_dir(surf_dir)
  check_dir(mri_dir)
  #aparc_image = os.path.join(mri_dir, 'aparc+aseg.mgz')
  aparc_image = 'aparc.mif'
  mask_image = os.path.join(mri_dir, 'brainmask.mgz')
  reg_file = os.path.join(mri_dir, 'transforms', 'talairach.xfm')
  check_file(aparc_image)
  check_file(mask_image)
  check_file(reg_file)
  template_image = 'template.mif' if app.ARGS.template else aparc_image

  have_first = False
  have_fast = False
  fsl_path = os.environ.get('FSLDIR', '')
  if fsl_path:
    # Use brain-extracted, bias-corrected image for FSL tools
    norm_image = os.path.join(mri_dir, 'norm.mgz')
    check_file(norm_image)
    run.command('mrconvert ' + norm_image + ' T1.nii -stride -1,+2,+3')
    # Verify FAST availability
    try:
      fast_cmd = fsl.exe_name('fast')
    except MRtrixError:
      fast_cmd = None
    if fast_cmd:
      have_fast = True
      if fast_cmd == 'fast':
        fast_suffix = fsl.suffix()
      else:
        fast_suffix = '.nii.gz'
    else:
      app.warn('Could not find FSL program fast; script will not use fast for cerebellar tissue segmentation')
    # Verify FIRST availability
    try:
      first_cmd = fsl.exe_name('run_first_all')
    except MRtrixError:
      first_cmd = None
    first_atlas_path = os.path.join(fsl_path, 'data', 'first', 'models_336_bin')
    have_first = first_cmd and os.path.isdir(first_atlas_path)
  else:
    app.warn('Environment variable FSLDIR is not set; script will run without FSL components')

  acpc_string = 'anterior ' + ('& posterior commissures' if ATTEMPT_PC else 'commissure')
  have_acpcdetect = bool(find_executable('acpcdetect')) and 'ARTHOME' in os.environ
  if have_acpcdetect:
    if have_fast:
      app.console('ACPCdetect and FSL FAST will be used for explicit segmentation of ' + acpc_string)
    else:
      app.warn('ACPCdetect is installed, but FSL FAST not found; cannot segment ' + acpc_string)
      have_acpcdetect = False
  else:
    app.warn('ACPCdetect not installed; cannot segment ' + acpc_string)

  # Need to perform a better search for hippocampal subfield output: names & version numbers may change
  have_hipp_subfields = False
  hipp_subfield_has_amyg = False
  # Could result in multiple matches
  hipp_subfield_regex = re.compile(r'^[lr]h\.hippo[a-zA-Z]*Labels-[a-zA-Z0-9]*\.v[0-9]+\.?[a-zA-Z0-9]*\.mg[hz]$')
  hipp_subfield_all_images = sorted(list(filter(hipp_subfield_regex.match, os.listdir(mri_dir))))
  # Remove any images that provide segmentations in FreeSurfer voxel space; we want the high-resolution versions
  hipp_subfield_all_images = [ item for item in hipp_subfield_all_images if 'FSvoxelSpace' not in item ]
  # Arrange the images into lr pairs
  hipp_subfield_paired_images = [ ]
  for lh_filename in [ item for item in hipp_subfield_all_images if item[0] == 'l' ]:
    if 'r' + lh_filename[1:] in hipp_subfield_all_images:
      hipp_subfield_paired_images.append(lh_filename[1:])
  # Choose which of these image pairs we are going to use
  for code in [ '.CA.', '.FS60.' ]:
    if any(code in filename for filename in hipp_subfield_paired_images):
      hipp_subfield_image_suffix = [ filename for filename in hipp_subfield_paired_images if code in filename ][0]
      have_hipp_subfields = True
      break
  # Choose the pair with the shortest filename string if we have no other criteria
  if not have_hipp_subfields and hipp_subfield_paired_images:
    hipp_subfield_paired_images = sorted(hipp_subfield_paired_images, key=len)
    if hipp_subfield_paired_images:
      hipp_subfield_image_suffix = hipp_subfield_paired_images[0]
      have_hipp_subfields = True
  if have_hipp_subfields:
    hipp_subfield_has_amyg = 'Amyg' in hipp_subfield_image_suffix

  # Perform a similar search for thalamic nuclei submodule output
  thal_nuclei_image = None
  thal_nuclei_regex = re.compile(r'^ThalamicNuclei\.v[0-9]+\.?[a-zA-Z0-9]*.mg[hz]$')
  thal_nuclei_all_images = sorted(list(filter(thal_nuclei_regex.match, os.listdir(mri_dir))))
  thal_nuclei_all_images = [ item for item in thal_nuclei_all_images if 'FSvoxelSpace' not in item ]
  if thal_nuclei_all_images:
    if len(thal_nuclei_all_images) == 1:
      thal_nuclei_image = thal_nuclei_all_images[0]
    else:
      # How to choose which version to use?
      # Start with software version
      thal_nuclei_versions = [ int(item.split('.')[1].lstrip('v')) for item in thal_nuclei_all_images ]
      thal_nuclei_all_images = [ filepath for filepath, version_number in zip(thal_nuclei_all_images, thal_nuclei_versions) if version_number == max(thal_nuclei_versions) ]
      if len(thal_nuclei_all_images) == 1:
        thal_nuclei_image = thal_nuclei_all_images[0]
      else:
        # Revert to filename length
        thal_nuclei_all_images = sorted(thal_nuclei_all_images, key=len)
        thal_nuclei_image = thal_nuclei_all_images[0]

  # If particular hippocampal segmentation method is requested, make sure we can perform such;
  #   if not, decide how to segment hippocampus based on what's available
  hippocampi_method = app.ARGS.hippocampi
  if hippocampi_method:
    if hippocampi_method == 'subfields':
      if not have_hipp_subfields:
        raise MRtrixError('Could not isolate hippocampal subfields module output (candidate images: ' + str(hipp_subfield_all_images) + ')')
    elif hippocampi_method == 'first':
      if not have_first:
        raise MRtrixError('Cannot use "first" method for hippocampi segmentation; check FSL installation')
  else:
    if have_hipp_subfields:
      hippocampi_method = 'subfields'
      app.console('Hippocampal subfields module output detected; will utilise for hippocampi '
                  + ('and amygdalae ' if hipp_subfield_has_amyg else '')
                  + 'segmentation')
    elif have_first:
      hippocampi_method = 'first'
      app.console('No hippocampal subfields module output detected, but FSL FIRST is installed; '
                  'will utilise latter for hippocampi segmentation')
    else:
      hippocampi_method = 'aseg'
      app.console('Neither hippocampal subfields module output nor FSL FIRST detected; '
                  'FreeSurfer aseg will be used for hippocampi segmentation')

  if hippocampi_method == 'subfields':
    if 'FREESURFER_HOME' not in os.environ:
      raise MRtrixError('FREESURFER_HOME environment variable not set; required for use of hippocampal subfields module')
    freesurfer_lut_file = os.path.join(os.environ['FREESURFER_HOME'], 'FreeSurferColorLUT.txt')
    check_file(freesurfer_lut_file)
    hipp_lut_file = os.path.join(path.shared_data_path(), path.script_subdir_name(), 'hsvs', 'HippSubfields.txt')
    check_file(hipp_lut_file)
    if hipp_subfield_has_amyg:
      amyg_lut_file = os.path.join(path.shared_data_path(), path.script_subdir_name(), 'hsvs', 'AmygSubfields.txt')
      check_file(amyg_lut_file)

  if app.ARGS.sgm_amyg_hipp:
    app.warn('Option -sgm_amyg_hipp ignored '
             '(hsvs algorithm always assigns hippocampi & ampygdalae as sub-cortical grey matter)')


  # Similar logic for thalami
  thalami_method = app.ARGS.thalami
  if thalami_method:
    if thalami_method == 'nuclei':
      if not thal_nuclei_image:
        raise MRtrixError('Could not find thalamic nuclei module output')
    elif thalami_method == 'first':
      if not have_first:
        raise MRtrixError('Cannot use "first" method for thalami segmentation; check FSL installation')
  else:
    # Not happy with outputs of thalamic nuclei submodule; default to FIRST
    if have_first:
      thalami_method = 'first'
      if thal_nuclei_image:
        app.console('Thalamic nuclei submodule output ignored in favour of FSL FIRST '
                    '(can override using -thalami option)')
      else:
        app.console('Will utilise FSL FIRST for thalami segmentation')
    elif thal_nuclei_image:
      thalami_method = 'nuclei'
      app.console('Will utilise detected thalamic nuclei submodule output')
    else:
      thalami_method = 'aseg'
      app.console('Neither thalamic nuclei module output nor FSL FIRST detected; '
                  'FreeSurfer aseg will be used for thalami segmentation')


  ###########################
  # Commencing segmentation #
  ###########################

  tissue_images = [ [ 'lh.pial.mif', 'rh.pial.mif' ],
                    [],
                    [ 'lh.white.mif', 'rh.white.mif' ],
                    [],
                    [] ]

  # Get the main cerebrum segments; these are already smooth
  progress = app.ProgressBar('Mapping FreeSurfer cortical reconstruction to partial volume images', 8)
  for hemi in [ 'lh', 'rh' ]:
    for basename in [ hemi+'.white', hemi+'.pial' ]:
      filepath = os.path.join(surf_dir, basename)
      check_file(filepath)
      transformed_path = basename + '_realspace.obj'
      run.command('meshconvert ' + filepath + ' ' + transformed_path + ' -binary -transform fs2real ' + aparc_image)
      progress.increment()
      run.command('mesh2voxel ' + transformed_path + ' ' + template_image + ' ' + basename + '.mif')
      app.cleanup(transformed_path)
      progress.increment()
  progress.done()



  # Get other structures that need to be converted from the aseg voxel image
  from_aseg = list(ASEG_STRUCTURES)
  if hippocampi_method == 'subfields':
    if not hipp_subfield_has_amyg and not have_first:
      from_aseg.extend(AMYG_ASEG)
  elif hippocampi_method == 'aseg':
    from_aseg.extend(HIPP_ASEG)
    from_aseg.extend(AMYG_ASEG)
  if thalami_method == 'aseg':
    from_aseg.extend(THAL_ASEG)
  if not have_first:
    from_aseg.extend(OTHER_SGM_ASEG)
  progress = app.ProgressBar('Smoothing non-cortical structures segmented by FreeSurfer', len(from_aseg) + 2)
  for (index, tissue, name) in from_aseg:
    init_mesh_path = name + '_init.vtk'
    smoothed_mesh_path = name + '.vtk'
    run.command('mrcalc ' + aparc_image + ' ' + str(index) + ' -eq - | voxel2mesh - -threshold 0.5 ' + init_mesh_path)
    run.command('meshfilter ' + init_mesh_path + ' smooth ' + smoothed_mesh_path)
    app.cleanup(init_mesh_path)
    run.command('mesh2voxel ' + smoothed_mesh_path + ' ' + template_image + ' ' + name + '.mif')
    app.cleanup(smoothed_mesh_path)
    tissue_images[tissue-1].append(name + '.mif')
    progress.increment()
  # Lateral ventricles are separate as we want to combine with choroid plexus prior to mesh conversion
  for hemi_index, hemi_name in enumerate(['Left', 'Right']):
    name = hemi_name + '_LatVent_ChorPlex'
    init_mesh_path = name + '_init.vtk'
    smoothed_mesh_path = name + '.vtk'
    run.command('mrcalc ' + ' '.join(aparc_image + ' ' + str(index) + ' -eq' for index, tissue, name in VENTRICLE_CP_ASEG[hemi_index]) + ' -add - | '
                + 'voxel2mesh - -threshold 0.5 ' + init_mesh_path)
    run.command('meshfilter ' + init_mesh_path + ' smooth ' + smoothed_mesh_path)
    app.cleanup(init_mesh_path)
    run.command('mesh2voxel ' + smoothed_mesh_path + ' ' + template_image + ' ' + name + '.mif')
    app.cleanup(smoothed_mesh_path)
    tissue_images[3].append(name + '.mif')
    progress.increment()
  progress.done()



  # Combine corpus callosum segments before smoothing
  progress = app.ProgressBar('Combining and smoothing corpus callosum segmentation', len(CORPUS_CALLOSUM_ASEG) + 3)
  for (index, name) in CORPUS_CALLOSUM_ASEG:
    run.command('mrcalc ' + aparc_image + ' ' + str(index) + ' -eq ' + name + '.mif -datatype bit')
    progress.increment()
  cc_init_mesh_path = 'combined_corpus_callosum_init.vtk'
  cc_smoothed_mesh_path = 'combined_corpus_callosum.vtk'
  run.command('mrmath ' + ' '.join([ name + '.mif' for (index, name) in CORPUS_CALLOSUM_ASEG ]) + ' sum - | voxel2mesh - -threshold 0.5 ' + cc_init_mesh_path)
  for name in [ n for _, n in CORPUS_CALLOSUM_ASEG ]:
    app.cleanup(name + '.mif')
  progress.increment()
  run.command('meshfilter ' + cc_init_mesh_path + ' smooth ' + cc_smoothed_mesh_path)
  app.cleanup(cc_init_mesh_path)
  progress.increment()
  run.command('mesh2voxel ' + cc_smoothed_mesh_path + ' ' + template_image + ' combined_corpus_callosum.mif')
  app.cleanup(cc_smoothed_mesh_path)
  progress.done()
  tissue_images[2].append('combined_corpus_callosum.mif')



  # Deal with brain stem, including determining those voxels that should
  #   be erased from the 5TT image in order for streamlines traversing down
  #   the spinal column to be terminated & accepted
  bs_fullmask_path = 'brain_stem_init.mif'
  bs_cropmask_path = ''
  progress = app.ProgressBar('Segmenting and cropping brain stem', 5)
  run.command('mrcalc ' + aparc_image + ' ' + str(BRAIN_STEM_ASEG[0][0]) + ' -eq '
              + ' -add '.join([ aparc_image + ' ' + str(index) + ' -eq' for index, name in BRAIN_STEM_ASEG[1:] ]) + ' -add '
              + bs_fullmask_path + ' -datatype bit')
  progress.increment()
  bs_init_mesh_path = 'brain_stem_init.vtk'
  run.command('voxel2mesh ' + bs_fullmask_path + ' ' + bs_init_mesh_path)
  progress.increment()
  bs_smoothed_mesh_path = 'brain_stem.vtk'
  run.command('meshfilter ' + bs_init_mesh_path + ' smooth ' + bs_smoothed_mesh_path)
  app.cleanup(bs_init_mesh_path)
  progress.increment()
  run.command('mesh2voxel ' + bs_smoothed_mesh_path + ' ' + template_image + ' brain_stem.mif')
  app.cleanup(bs_smoothed_mesh_path)
  progress.increment()
  fourthventricle_zmin = min([ int(line.split()[2]) for line in run.command('maskdump 4th-Ventricle.mif')[0].splitlines() ])
  if fourthventricle_zmin:
    bs_cropmask_path = 'brain_stem_crop.mif'
    run.command('mredit brain_stem.mif - ' + ' '.join([ '-plane 2 ' + str(index) + ' 0' for index in range(0, fourthventricle_zmin) ]) + ' | '
                'mrcalc brain_stem.mif - -sub 1e-6 -gt ' + bs_cropmask_path + ' -datatype bit')
  app.cleanup(bs_fullmask_path)
  progress.done()


  if hippocampi_method == 'subfields':
    progress = app.ProgressBar('Using detected FreeSurfer hippocampal subfields module output',
                               64 if hipp_subfield_has_amyg else 32)

    subfields = [ ( hipp_lut_file, 'hipp' ) ]
    if hipp_subfield_has_amyg:
      subfields.append(( amyg_lut_file, 'amyg' ))

    for subfields_lut_file, structure_name in subfields:
      for hemi, filename in zip([ 'Left', 'Right'], [ prefix + hipp_subfield_image_suffix for prefix in [ 'l', 'r' ] ]):
        # Extract individual components from image and assign to different tissues
        subfields_all_tissues_image = hemi + '_' + structure_name + '_subfields.mif'
        run.command('labelconvert ' + os.path.join(mri_dir, filename) + ' ' + freesurfer_lut_file + ' ' + subfields_lut_file + ' ' + subfields_all_tissues_image)
        progress.increment()
        for tissue in range(0, 5):
          init_mesh_path = hemi + '_' + structure_name + '_subfield_' + str(tissue) + '_init.vtk'
          smooth_mesh_path = hemi + '_' + structure_name + '_subfield_' + str(tissue) + '.vtk'
          subfield_tissue_image = hemi + '_' + structure_name + '_subfield_' + str(tissue) + '.mif'
          run.command('mrcalc ' + subfields_all_tissues_image + ' ' + str(tissue+1) + ' -eq - | ' + \
                      'voxel2mesh - ' + init_mesh_path)
          progress.increment()
          # Since the hippocampal subfields segmentation can include some fine structures, reduce the extent of smoothing
          run.command('meshfilter ' + init_mesh_path + ' smooth ' + smooth_mesh_path + ' -smooth_spatial 2 -smooth_influence 2')
          app.cleanup(init_mesh_path)
          progress.increment()
          run.command('mesh2voxel ' + smooth_mesh_path + ' ' + template_image + ' ' + subfield_tissue_image)
          app.cleanup(smooth_mesh_path)
          progress.increment()
          tissue_images[tissue].append(subfield_tissue_image)
        app.cleanup(subfields_all_tissues_image)
    progress.done()


  if thalami_method == 'nuclei':
    progress = app.ProgressBar('Using detected FreeSurfer thalamic nuclei module output', 6)
    for hemi in ['Left', 'Right']:
      thal_mask_path = hemi + '_Thalamus_mask.mif'
      init_mesh_path = hemi + '_Thalamus_init.vtk'
      smooth_mesh_path = hemi + '_Thalamus.vtk'
      thalamus_image = hemi + '_Thalamus.mif'
      if hemi == 'Right':
        run.command('mrthreshold ' + os.path.join(mri_dir, thal_nuclei_image) + ' -abs 8200 ' + thal_mask_path)
      else:
        run.command('mrcalc ' + os.path.join(mri_dir, thal_nuclei_image) + ' 0 -gt '
                    + os.path.join(mri_dir, thal_nuclei_image) + ' 8200 -lt '
                    + '-mult ' + thal_mask_path)
      run.command('voxel2mesh ' + thal_mask_path + ' ' + init_mesh_path)
      app.cleanup(thal_mask_path)
      progress.increment()
      run.command('meshfilter ' + init_mesh_path + ' smooth ' + smooth_mesh_path + ' -smooth_spatial 2 -smooth_influence 2')
      app.cleanup(init_mesh_path)
      progress.increment()
      run.command('mesh2voxel ' + smooth_mesh_path + ' ' + template_image + ' ' + thalamus_image)
      app.cleanup(smooth_mesh_path)
      progress.increment()
      tissue_images[1].append(thalamus_image)
    progress.done()

  if have_first:
    app.console('Running FSL FIRST to segment sub-cortical grey matter structures')
    from_first = SGM_FIRST_MAP.copy()
    if hippocampi_method == 'subfields':
      from_first = { key: value for key, value in from_first.items() if 'Hippocampus' not in value }
      if hipp_subfield_has_amyg:
        from_first = { key: value for key, value in from_first.items() if 'Amygdala' not in value }
    elif hippocampi_method == 'aseg':
      from_first = { key: value for key, value in from_first.items() if 'Hippocampus' not in value and 'Amygdala' not in value }
    if thalami_method != 'first':
      from_first = { key: value for key, value in from_first.items() if 'Thalamus' not in value }
    run.command(first_cmd + ' -s ' + ','.join(from_first.keys()) + ' -i T1.nii -b -o first')
    fsl.check_first('first', from_first.keys())
    app.cleanup(glob.glob('T1_to_std_sub.*'))
    progress = app.ProgressBar('Mapping FIRST segmentations to image', 2*len(from_first))
    for key, value in from_first.items():
      vtk_in_path = 'first-' + key + '_first.vtk'
      vtk_converted_path = 'first-' + key + '_transformed.vtk'
      run.command('meshconvert ' + vtk_in_path + ' ' + vtk_converted_path + ' -transform first2real T1.nii')
      app.cleanup(vtk_in_path)
      progress.increment()
      run.command('mesh2voxel ' + vtk_converted_path + ' ' + template_image + ' ' + value + '.mif')
      app.cleanup(vtk_converted_path)
      tissue_images[1].append(value + '.mif')
      progress.increment()
    if not have_fast:
      app.cleanup('T1.nii')
    app.cleanup(glob.glob('first*'))
    progress.done()

  # Run ACPCdetect, use results to draw spherical ROIs on T1 that will be fed to FSL FAST,
  #   the WM components of which will then be added to the 5TT
  if have_acpcdetect:
    progress = app.ProgressBar('Using ACPCdetect and FAST to segment ' + acpc_string, 5)
    # ACPCdetect requires input image to be 16-bit
    # We also want to realign to RAS beforehand so that we can interpret the output voxel locations properly
    acpcdetect_input_image = 'T1RAS_16b.nii'
    run.command('mrconvert ' + norm_image + ' -datatype uint16 -stride +1,+2,+3 ' + acpcdetect_input_image)
    progress.increment()
    run.command('acpcdetect -i ' + acpcdetect_input_image)
    progress.increment()
    # We need the header in order to go from voxel coordinates to scanner coordinates
    acpcdetect_input_header = image.Header(acpcdetect_input_image)
    acpcdetect_output_path = os.path.splitext(acpcdetect_input_image)[0] + '_ACPC.txt'
    app.cleanup(acpcdetect_input_image)
    with open(acpcdetect_output_path, 'r') as acpc_file:
      acpcdetect_output_data = acpc_file.read().splitlines()
    app.cleanup(glob.glob(os.path.splitext(acpcdetect_input_image)[0] + "*"))
    # Need to scan through the contents of this file,
    #   isolating the AC and PC locations
    ac_voxel = pc_voxel = None
    for index, line in enumerate(acpcdetect_output_data):
      if 'AC' in line and 'voxel location' in line:
        ac_voxel = [float(item) for item in acpcdetect_output_data[index+1].strip().split()]
      elif 'PC' in line and 'voxel location' in line:
        pc_voxel = [float(item) for item in acpcdetect_output_data[index+1].strip().split()]
    if not ac_voxel or not pc_voxel:
      raise MRtrixError('Error parsing text file from "acpcdetect"')

    def voxel2scanner(voxel, header):
      return [ voxel[0]*header.spacing()[0]*header.transform()[axis][0]
               + voxel[1]*header.spacing()[1]*header.transform()[axis][1]
               + voxel[2]*header.spacing()[2]*header.transform()[axis][2]
               + header.transform()[axis][3]
               for axis in range(0,3) ]

    ac_scanner = voxel2scanner(ac_voxel, acpcdetect_input_header)
    pc_scanner = voxel2scanner(pc_voxel, acpcdetect_input_header)

    # Generate the mask image within which FAST will be run
    acpc_prefix = 'ACPC' if ATTEMPT_PC else 'AC'
    acpc_mask_image = acpc_prefix + '_FAST_mask.mif'
    run.command('mrcalc ' + template_image + ' nan -eq - | '
                'mredit - ' + acpc_mask_image + ' -scanner '
                '-sphere ' + ','.join(str(value) for value in ac_scanner) + ' 8 1 '
                + ('-sphere ' + ','.join(str(value) for value in pc_scanner) + ' 5 1' if ATTEMPT_PC else ''))
    progress.increment()

    acpc_t1_masked_image = acpc_prefix + '_T1.nii'
    run.command('mrtransform ' + norm_image + ' -template ' + template_image + ' - | '
                'mrcalc - ' + acpc_mask_image + ' -mult ' + acpc_t1_masked_image)
    app.cleanup(acpc_mask_image)
    progress.increment()

    run.command(fast_cmd + ' -N ' + acpc_t1_masked_image)
    app.cleanup(acpc_t1_masked_image)
    progress.increment()

    # Ideally don't want to have to add these manually; instead add all outputs from FAST
    #   to the 5TT (both cerebellum and AC / PC) in a single go
    # This should involve grabbing just the WM component of these images
    # Actually, in retrospect, it may be preferable to do the AC PC segmentation
    #   earlier on, and simply add them to the list of WM structures
    acpc_wm_image = acpc_prefix + '.mif'
    run.command('mrconvert ' + fsl.find_image(acpc_prefix + '_T1_pve_2') + ' ' + acpc_wm_image)
    tissue_images[2].append(acpc_wm_image)
    app.cleanup(glob.glob(os.path.splitext(acpc_t1_masked_image)[0] + '*'))
    progress.done()


  # If we don't have FAST, do cerebellar segmentation in a comparable way to the cortical GM / WM:
  #   Generate one 'pial-like' surface containing the GM and WM of the cerebellum,
  #   and another with just the WM
  if not have_fast:
    progress = app.ProgressBar('Adding FreeSurfer cerebellar segmentations directly', 6)
    for hemi in [ 'Left-', 'Right-' ]:
      wm_index = [ index for index, tissue, name in CEREBELLUM_ASEG if name.startswith(hemi) and 'White' in name ][0]
      gm_index = [ index for index, tissue, name in CEREBELLUM_ASEG if name.startswith(hemi) and 'Cortex' in name ][0]
      run.command('mrcalc ' + aparc_image + ' ' + str(wm_index) + ' -eq ' + aparc_image + ' ' + str(gm_index) + ' -eq -add - | ' + \
                  'voxel2mesh - ' + hemi + 'cerebellum_all_init.vtk')
      progress.increment()
      run.command('mrcalc ' + aparc_image + ' ' + str(gm_index) + ' -eq - | ' + \
                  'voxel2mesh - ' + hemi + 'cerebellum_grey_init.vtk')
      progress.increment()
      for name, tissue in { 'all':2, 'grey':1 }.items():
        run.command('meshfilter ' + hemi + 'cerebellum_' + name + '_init.vtk smooth ' + hemi + 'cerebellum_' + name + '.vtk')
        app.cleanup(hemi + 'cerebellum_' + name + '_init.vtk')
        progress.increment()
        run.command('mesh2voxel ' + hemi + 'cerebellum_' + name + '.vtk ' + template_image + ' ' + hemi + 'cerebellum_' + name + '.mif')
        app.cleanup(hemi + 'cerebellum_' + name + '.vtk')
        progress.increment()
        tissue_images[tissue].append(hemi + 'cerebellum_' + name + '.mif')
    progress.done()


  # Construct images with the partial volume of each tissue
  progress = app.ProgressBar('Combining segmentations of all structures corresponding to each tissue type', 5)
  for tissue in range(0,5):
    run.command('mrmath ' + ' '.join(tissue_images[tissue]) + (' brain_stem.mif' if tissue == 2 else '') + ' sum - | mrcalc - 1.0 -min tissue' + str(tissue) + '_init.mif')
    app.cleanup(tissue_images[tissue])
    progress.increment()
  progress.done()


  # This can hopefully be done with a connected-component analysis: Take just the WM image, and
  #   fill in any gaps (i.e. select the inverse, select the largest connected component, invert again)
  # Make sure that floating-point values are handled appropriately
  # Combine these images together using the appropriate logic in order to form the 5TT image
  progress = app.ProgressBar('Modulating segmentation images based on other tissues', 9)
  tissue_images = [ 'tissue0.mif', 'tissue1.mif', 'tissue2.mif', 'tissue3.mif', 'tissue4.mif' ]
  run.function(os.rename, 'tissue4_init.mif', 'tissue4.mif')
  progress.increment()
  run.command('mrcalc tissue3_init.mif tissue3_init.mif ' + tissue_images[4] + ' -add 1.0 -sub 0.0 -max -sub 0.0 -max ' + tissue_images[3])
  app.cleanup('tissue3_init.mif')
  progress.increment()
  run.command('mrmath ' + ' '.join(tissue_images[3:5]) + ' sum tissuesum_34.mif')
  progress.increment()
  run.command('mrcalc tissue1_init.mif tissue1_init.mif tissuesum_34.mif -add 1.0 -sub 0.0 -max -sub 0.0 -max ' + tissue_images[1])
  app.cleanup('tissue1_init.mif')
  app.cleanup('tissuesum_34.mif')
  progress.increment()
  run.command('mrmath ' + tissue_images[1] + ' ' + ' '.join(tissue_images[3:5]) + ' sum tissuesum_134.mif')
  progress.increment()
  run.command('mrcalc tissue2_init.mif tissue2_init.mif tissuesum_134.mif -add 1.0 -sub 0.0 -max -sub 0.0 -max ' + tissue_images[2])
  app.cleanup('tissue2_init.mif')
  app.cleanup('tissuesum_134.mif')
  progress.increment()
  run.command('mrmath ' + ' '.join(tissue_images[1:5]) + ' sum tissuesum_1234.mif')
  progress.increment()
  run.command('mrcalc tissue0_init.mif tissue0_init.mif tissuesum_1234.mif -add 1.0 -sub 0.0 -max -sub 0.0 -max ' + tissue_images[0])
  app.cleanup('tissue0_init.mif')
  app.cleanup('tissuesum_1234.mif')
  progress.increment()
  tissue_sum_image = 'tissuesum_01234.mif'
  run.command('mrmath ' + ' '.join(tissue_images) + ' sum ' + tissue_sum_image)
  progress.done()


  if app.ARGS.template:
    run.command('mrtransform ' + mask_image + ' -template template.mif - | mrthreshold - brainmask.mif -abs 0.5')
    mask_image = 'brainmask.mif'


  # Branch depending on whether or not FSL fast will be used to re-segment the cerebellum
  if have_fast:

    # How to support -template option?
    # - Re-grid norm.mgz to template image before running FAST
    # - Re-grid FAST output to template image
    # Consider splitting, including initial mapping of cerebellar regions:
    # - If we're not using a separate template image, just map cerebellar regions to voxels to
    #   produce a mask, and run FAST within that mask
    # - If we have a template, combine cerebellar regions, convert to surfaces (one per hemisphere),
    #   map these to the template image, run FIRST on a binary mask from this, then
    #   re-combine this with the tissue maps from other sources based on the estimated PVF of
    #   cerebellum meshes
    cerebellum_volume_image = 'Cerebellum_volume.mif'
    cerebellum_mask_image = 'Cerebellum_mask.mif'
    t1_cerebellum_masked = 'T1_cerebellum_precrop.mif'
    if app.ARGS.template:

      # If this is the case, then we haven't yet performed any cerebellar segmentation / meshing
      # What we want to do is: for each hemisphere, combine all three "cerebellar" segments from FreeSurfer,
      #   convert to a surface, map that surface to the template image
      progress = app.ProgressBar('Preparing images of cerebellum for intensity-based segmentation', 9)
      cerebellar_hemi_pvf_images = [ ]
      for hemi in [ 'Left', 'Right' ]:
        init_mesh_path = hemi + '-Cerebellum-All-Init.vtk'
        smooth_mesh_path = hemi + '-Cerebellum-All-Smooth.vtk'
        pvf_image_path = hemi + '-Cerebellum-PVF-Template.mif'
        cerebellum_aseg_hemi = [ entry for entry in CEREBELLUM_ASEG if hemi in entry[2] ]
        run.command('mrcalc ' + aparc_image + ' ' + str(cerebellum_aseg_hemi[0][0]) + ' -eq ' + \
                    ' -add '.join([ aparc_image + ' ' + str(index) + ' -eq' for index, tissue, name in cerebellum_aseg_hemi[1:] ]) + ' -add - | ' + \
                    'voxel2mesh - ' + init_mesh_path)
        progress.increment()
        run.command('meshfilter ' + init_mesh_path + ' smooth ' + smooth_mesh_path)
        app.cleanup(init_mesh_path)
        progress.increment()
        run.command('mesh2voxel ' + smooth_mesh_path + ' ' + template_image + ' ' + pvf_image_path)
        app.cleanup(smooth_mesh_path)
        cerebellar_hemi_pvf_images.append(pvf_image_path)
        progress.increment()

      # Combine the two hemispheres together into:
      # - An image in preparation for running FAST
      # - A combined total partial volume fraction image that will be later used for tissue recombination
      run.command('mrcalc ' + ' '.join(cerebellar_hemi_pvf_images) + ' -add 1.0 -min ' + cerebellum_volume_image)
      app.cleanup(cerebellar_hemi_pvf_images)
      progress.increment()

      run.command('mrthreshold ' + cerebellum_volume_image + ' ' + cerebellum_mask_image + ' -abs 1e-6')
      progress.increment()
      run.command('mrtransform ' + norm_image + ' -template ' + template_image + ' - | ' + \
                  'mrcalc - ' + cerebellum_mask_image + ' -mult ' + t1_cerebellum_masked)
      progress.done()

    else:
      app.console('Preparing images of cerebellum for intensity-based segmentation')
      run.command('mrcalc ' + aparc_image + ' ' + str(CEREBELLUM_ASEG[0][0]) + ' -eq ' + \
                  ' -add '.join([ aparc_image + ' ' + str(index) + ' -eq' for index, tissue, name in CEREBELLUM_ASEG[1:] ]) + ' -add ' + \
                  cerebellum_volume_image)
      cerebellum_mask_image = cerebellum_volume_image
      run.command('mrcalc T1.nii ' + cerebellum_mask_image + ' -mult ' + t1_cerebellum_masked)

    app.cleanup('T1.nii')

    # Any code below here should be compatible with cerebellum_volume_image.mif containing partial volume fractions
    #   (in the case of no explicit template image, it's a mask, but the logic still applies)

    app.console('Running FSL fast to segment the cerebellum based on intensity information')

    # Run FSL FAST just within the cerebellum
    # FAST memory usage can also be huge when using a high-resolution template image:
    #   Crop T1 image around the cerebellum before feeding to FAST, then re-sample to full template image FoV
    fast_input_image = 'T1_cerebellum.nii'
    run.command('mrgrid ' + t1_cerebellum_masked + ' crop -mask ' + cerebellum_mask_image + ' ' + fast_input_image)
    app.cleanup(t1_cerebellum_masked)
    # Cleanup of cerebellum_mask_image:
    #   May be same image as cerebellum_volume_image, which is required later
    if cerebellum_mask_image != cerebellum_volume_image:
      app.cleanup(cerebellum_mask_image)
    run.command(fast_cmd + ' -N ' + fast_input_image)
    app.cleanup(fast_input_image)

    # Use glob to clean up unwanted FAST outputs
    fast_output_prefix = os.path.splitext(fast_input_image)[0]
    fast_pve_output_prefix = fast_output_prefix + '_pve_'
    app.cleanup([ entry for entry in glob.glob(fast_output_prefix + '*') if not fast_pve_output_prefix in entry ])

    progress = app.ProgressBar('Introducing intensity-based cerebellar segmentation into the 5TT image', 10)
    fast_outputs_cropped = [ fast_pve_output_prefix + str(n) + fast_suffix for n in range(0,3) ]
    fast_outputs_template = [ 'FAST_' + str(n) + '.mif' for n in range(0,3) ]
    for inpath, outpath in zip(fast_outputs_cropped, fast_outputs_template):
      run.command('mrtransform ' + inpath + ' -interp nearest -template ' + template_image + ' ' + outpath)
      app.cleanup(inpath)
      progress.increment()
    if app.ARGS.template:
      app.cleanup(template_image)

    # Generate the revised tissue images, using output from FAST inside the cerebellum and
    #   output from previous processing everywhere else
    # Note that the middle intensity (grey matter) in the FAST output here gets assigned
    #   to the sub-cortical grey matter component

    # Some of these voxels may have existing non-zero tissue components.
    # In that case, let's find a multiplier to apply to cerebellum tissues such that the
    #   sum does not exceed 1.0
    new_tissue_images = [ 'tissue0_fast.mif', 'tissue1_fast.mif', 'tissue2_fast.mif', 'tissue3_fast.mif', 'tissue4_fast.mif' ]
    new_tissue_sum_image = 'tissuesum_01234_fast.mif'
    cerebellum_multiplier_image = 'Cerebellar_multiplier.mif'
    run.command('mrcalc ' + cerebellum_volume_image + ' ' + tissue_sum_image + ' -add 0.5 -gt 1.0 ' + tissue_sum_image + ' -sub 0.0 -if  ' + cerebellum_multiplier_image)
    app.cleanup(cerebellum_volume_image)
    progress.increment()
    run.command('mrconvert ' + tissue_images[0] + ' ' + new_tissue_images[0])
    app.cleanup(tissue_images[0])
    progress.increment()
    run.command('mrcalc ' + tissue_images[1] + ' ' + cerebellum_multiplier_image + ' ' + fast_outputs_template[1] + ' -mult -add ' + new_tissue_images[1])
    app.cleanup(tissue_images[1])
    app.cleanup(fast_outputs_template[1])
    progress.increment()
    run.command('mrcalc ' + tissue_images[2] + ' ' + cerebellum_multiplier_image + ' ' + fast_outputs_template[2] + ' -mult -add ' + new_tissue_images[2])
    app.cleanup(tissue_images[2])
    app.cleanup(fast_outputs_template[2])
    progress.increment()
    run.command('mrcalc ' + tissue_images[3] + ' ' + cerebellum_multiplier_image + ' ' + fast_outputs_template[0] + ' -mult -add ' + new_tissue_images[3])
    app.cleanup(tissue_images[3])
    app.cleanup(fast_outputs_template[0])
    app.cleanup(cerebellum_multiplier_image)
    progress.increment()
    run.command('mrconvert ' + tissue_images[4] + ' ' + new_tissue_images[4])
    app.cleanup(tissue_images[4])
    progress.increment()
    run.command('mrmath ' + ' '.join(new_tissue_images) + ' sum ' + new_tissue_sum_image)
    app.cleanup(tissue_sum_image)
    progress.done()
    tissue_images = new_tissue_images
    tissue_sum_image = new_tissue_sum_image



  # For all voxels within FreeSurfer's brain mask, add to the CSF image in order to make the sum 1.0
  progress = app.ProgressBar('Performing fill operations to preserve unity tissue volume', 2)

  # Some voxels may get a non-zero cortical GM fraction due to native use of the surface representation, yet
  #   these voxels are actually outside FreeSurfer's own provided brain mask. So what we need to do here is
  #   get the union of the tissue sum nonzero image and the mask image, and use that at the -mult step of the
  #   mrcalc call.
  # Required image: (tissue_sum_image > 0.0) || mask_image
  # tissue_sum_image 0.0 -gt mask_image -add 1.0 -min

  new_tissue_images = [ tissue_images[0],
                        tissue_images[1],
                        tissue_images[2],
                        os.path.splitext(tissue_images[3])[0] + '_filled.mif',
                        tissue_images[4] ]
  csf_fill_image = 'csf_fill.mif'
  run.command('mrcalc 1.0 ' + tissue_sum_image + ' -sub ' + tissue_sum_image + ' 0.0 -gt ' + mask_image + ' -add 1.0 -min -mult 0.0 -max ' + csf_fill_image)
  app.cleanup(tissue_sum_image)
  # If no template is specified, this file is part of the FreeSurfer output; hence don't modify
  if app.ARGS.template:
    app.cleanup(mask_image)
  progress.increment()
  run.command('mrcalc ' + tissue_images[3] + ' ' + csf_fill_image + ' -add ' + new_tissue_images[3])
  app.cleanup(csf_fill_image)
  app.cleanup(tissue_images[3])
  progress.done()
  tissue_images = new_tissue_images



  # Move brain stem from white matter to pathology at final step:
  #   this prevents the brain stem segmentation from overwriting other
  #   structures that it otherwise wouldn't if it were written to WM
  if not app.ARGS.white_stem:
    progress = app.ProgressBar('Moving brain stem to volume index 4', 3)
    new_tissue_images = [ tissue_images[0],
                          tissue_images[1],
                          os.path.splitext(tissue_images[2])[0] + '_no_brainstem.mif',
                          tissue_images[3],
                          os.path.splitext(tissue_images[4])[0] + '_with_brainstem.mif' ]
    run.command('mrcalc ' + tissue_images[2] + ' brain_stem.mif -min brain_stem_white_overlap.mif')
    app.cleanup('brain_stem.mif')
    progress.increment()
    run.command('mrcalc ' + tissue_images[2] + ' brain_stem_white_overlap.mif -sub ' + new_tissue_images[2])
    app.cleanup(tissue_images[2])
    progress.increment()
    run.command('mrcalc ' + tissue_images[4] + ' brain_stem_white_overlap.mif -add ' + new_tissue_images[4])
    app.cleanup(tissue_images[4])
    app.cleanup('brain_stem_white_overlap.mif')
    progress.done()
    tissue_images = new_tissue_images



  # Finally, concatenate the volumes to produce the 5TT image
  app.console('Concatenating tissue volumes into 5TT format')
  precrop_result_image = '5TT.mif'
  if bs_cropmask_path:
    run.command('mrcat ' + ' '.join(tissue_images) + ' - -axis 3 | ' + \
                '5ttedit - ' + precrop_result_image + ' -none ' + bs_cropmask_path)
    app.cleanup(bs_cropmask_path)
  else:
    run.command('mrcat ' + ' '.join(tissue_images) + ' ' + precrop_result_image + ' -axis 3')
  app.cleanup(tissue_images)


  # Maybe don't go off all tissues here, since FreeSurfer's mask can be fairly liberal;
  #   instead get just a voxel clearance from all other tissue types (maybe two)
  if app.ARGS.nocrop:
    run.function(os.rename, precrop_result_image, 'result.mif')
  else:
    app.console('Cropping final 5TT image')
    crop_mask_image = 'crop_mask.mif'
    run.command('mrconvert ' + precrop_result_image + ' -coord 3 0,1,2,4 - | mrmath - sum - -axis 3 | mrthreshold - - -abs 0.001 | maskfilter - dilate ' + crop_mask_image)
    run.command('mrgrid ' + precrop_result_image + ' crop result.mif -mask ' + crop_mask_image)
    app.cleanup(crop_mask_image)
    app.cleanup(precrop_result_image)

  run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output),
              mrconvert_keyval=path.from_user(os.path.join(app.ARGS.input, 'mri', 'aparc+aseg.mgz'), True),
              force=app.FORCE_OVERWRITE)
예제 #17
0
def execute():
    import math, os, shutil
    from mrtrix3 import app, image, path, run

    # Get b-values and number of volumes per b-value.
    bvalues = [
        int(round(float(x)))
        for x in image.headerField('dwi.mif', 'shells').split()
    ]
    bvolumes = [
        int(x) for x in image.headerField('dwi.mif', 'shellcounts').split()
    ]
    app.console(
        str(len(bvalues)) + ' unique b-value(s) detected: ' +
        ','.join(map(str, bvalues)) + ' with ' + ','.join(map(str, bvolumes)) +
        ' volumes.')
    if len(bvalues) < 2:
        app.error('Need at least 2 unique b-values (including b=0).')

    # Get lmax information (if provided).
    sfwm_lmax = []
    if app.args.lmax:
        sfwm_lmax = [int(x.strip()) for x in app.args.lmax.split(',')]
        if not len(sfwm_lmax) == len(bvalues):
            app.error('Number of lmax\'s (' + str(len(sfwm_lmax)) +
                      ', as supplied to the -lmax option: ' +
                      ','.join(map(str, sfwm_lmax)) +
                      ') does not match number of unique b-values.')
        for l in sfwm_lmax:
            if l % 2:
                app.error('Values supplied to the -lmax option must be even.')
            if l < 0:
                app.error(
                    'Values supplied to the -lmax option must be non-negative.'
                )

    # Erode (brain) mask.
    if app.args.erode > 0:
        run.command('maskfilter mask.mif erode eroded_mask.mif -npass ' +
                    str(app.args.erode))
    else:
        run.command('mrconvert mask.mif eroded_mask.mif -datatype bit')

    # Get volumes, compute mean signal and SDM per b-value; compute overall SDM; get rid of erroneous values.
    totvolumes = 0
    fullsdmcmd = 'mrcalc'
    errcmd = 'mrcalc'
    zeropath = 'mean_b' + str(bvalues[0]) + '.mif'
    for i, b in enumerate(bvalues):
        meanpath = 'mean_b' + str(b) + '.mif'
        run.command('dwiextract dwi.mif -shell ' + str(b) +
                    ' - | mrmath - mean ' + meanpath + ' -axis 3')
        errpath = 'err_b' + str(b) + '.mif'
        run.command('mrcalc ' + meanpath + ' -finite ' + meanpath +
                    ' 0 -if 0 -le ' + errpath + ' -datatype bit')
        errcmd += ' ' + errpath
        if i > 0:
            errcmd += ' -add'
            sdmpath = 'sdm_b' + str(b) + '.mif'
            run.command('mrcalc ' + zeropath + ' ' + meanpath +
                        ' -divide -log ' + sdmpath)
            totvolumes += bvolumes[i]
            fullsdmcmd += ' ' + sdmpath + ' ' + str(bvolumes[i]) + ' -mult'
            if i > 1:
                fullsdmcmd += ' -add'
    fullsdmcmd += ' ' + str(totvolumes) + ' -divide full_sdm.mif'
    run.command(fullsdmcmd)
    run.command(
        'mrcalc full_sdm.mif -finite full_sdm.mif 0 -if 0 -le err_sdm.mif -datatype bit'
    )
    errcmd += ' err_sdm.mif -add 0 eroded_mask.mif -if safe_mask.mif -datatype bit'
    run.command(errcmd)
    run.command('mrcalc safe_mask.mif full_sdm.mif 0 -if 10 -min safe_sdm.mif')

    # Compute FA and principal eigenvectors; crude WM versus GM-CSF separation based on FA.
    run.command(
        'dwi2tensor dwi.mif - -mask safe_mask.mif | tensor2metric - -fa safe_fa.mif -vector safe_vecs.mif -modulate none -mask safe_mask.mif'
    )
    run.command('mrcalc safe_mask.mif safe_fa.mif 0 -if ' + str(app.args.fa) +
                ' -gt crude_wm.mif -datatype bit')
    run.command(
        'mrcalc crude_wm.mif 0 safe_mask.mif -if _crudenonwm.mif -datatype bit'
    )

    # Crude GM versus CSF separation based on SDM.
    crudenonwmmedian = image.statistic('safe_sdm.mif', 'median',
                                       '_crudenonwm.mif')
    run.command(
        'mrcalc _crudenonwm.mif safe_sdm.mif ' + str(crudenonwmmedian) +
        ' -subtract 0 -if - | mrthreshold - - -mask _crudenonwm.mif | mrcalc _crudenonwm.mif - 0 -if crude_csf.mif -datatype bit'
    )
    run.command(
        'mrcalc crude_csf.mif 0 _crudenonwm.mif -if crude_gm.mif -datatype bit'
    )

    # Refine WM: remove high SDM outliers.
    crudewmmedian = image.statistic('safe_sdm.mif', 'median', 'crude_wm.mif')
    run.command('mrcalc crude_wm.mif safe_sdm.mif 0 -if ' +
                str(crudewmmedian) + ' -gt _crudewmhigh.mif -datatype bit')
    run.command(
        'mrcalc _crudewmhigh.mif 0 crude_wm.mif -if _crudewmlow.mif -datatype bit'
    )
    crudewmQ1 = float(
        image.statistic('safe_sdm.mif', 'median', '_crudewmlow.mif'))
    crudewmQ3 = float(
        image.statistic('safe_sdm.mif', 'median', '_crudewmhigh.mif'))
    crudewmoutlthresh = crudewmQ3 + (crudewmQ3 - crudewmQ1)
    run.command('mrcalc crude_wm.mif safe_sdm.mif 0 -if ' +
                str(crudewmoutlthresh) +
                ' -gt _crudewmoutliers.mif -datatype bit')
    run.command(
        'mrcalc _crudewmoutliers.mif 0 crude_wm.mif -if refined_wm.mif -datatype bit'
    )

    # Refine GM: separate safer GM from partial volumed voxels.
    crudegmmedian = image.statistic('safe_sdm.mif', 'median', 'crude_gm.mif')
    run.command('mrcalc crude_gm.mif safe_sdm.mif 0 -if ' +
                str(crudegmmedian) + ' -gt _crudegmhigh.mif -datatype bit')
    run.command(
        'mrcalc _crudegmhigh.mif 0 crude_gm.mif -if _crudegmlow.mif -datatype bit'
    )
    run.command(
        'mrcalc _crudegmhigh.mif safe_sdm.mif ' + str(crudegmmedian) +
        ' -subtract 0 -if - | mrthreshold - - -mask _crudegmhigh.mif -invert | mrcalc _crudegmhigh.mif - 0 -if _crudegmhighselect.mif -datatype bit'
    )
    run.command(
        'mrcalc _crudegmlow.mif safe_sdm.mif ' + str(crudegmmedian) +
        ' -subtract -neg 0 -if - | mrthreshold - - -mask _crudegmlow.mif -invert | mrcalc _crudegmlow.mif - 0 -if _crudegmlowselect.mif -datatype bit'
    )
    run.command(
        'mrcalc _crudegmhighselect.mif 1 _crudegmlowselect.mif -if refined_gm.mif -datatype bit'
    )

    # Refine CSF: recover lost CSF from crude WM SDM outliers, separate safer CSF from partial volumed voxels.
    crudecsfmin = image.statistic('safe_sdm.mif', 'min', 'crude_csf.mif')
    run.command('mrcalc _crudewmoutliers.mif safe_sdm.mif 0 -if ' +
                str(crudecsfmin) +
                ' -gt 1 crude_csf.mif -if _crudecsfextra.mif -datatype bit')
    run.command(
        'mrcalc _crudecsfextra.mif safe_sdm.mif ' + str(crudecsfmin) +
        ' -subtract 0 -if - | mrthreshold - - -mask _crudecsfextra.mif | mrcalc _crudecsfextra.mif - 0 -if refined_csf.mif -datatype bit'
    )

    # Get final voxels for single-fibre WM response function estimation from WM using 'tournier' algorithm.
    refwmcount = float(
        image.statistic('refined_wm.mif', 'count', 'refined_wm.mif'))
    voxsfwmcount = int(round(refwmcount * app.args.sfwm / 100.0))
    app.console('Running \'tournier\' algorithm to select ' +
                str(voxsfwmcount) + ' single-fibre WM voxels.')
    cleanopt = ''
    if not app._cleanup:
        cleanopt = ' -nocleanup'
    run.command('dwi2response tournier dwi.mif _respsfwmss.txt -sf_voxels ' +
                str(voxsfwmcount) + ' -iter_voxels ' + str(voxsfwmcount * 10) +
                ' -mask refined_wm.mif -voxels voxels_sfwm.mif -tempdir ' +
                app._tempDir + cleanopt)

    # Get final voxels for GM response function estimation from GM.
    refgmmedian = image.statistic('safe_sdm.mif', 'median', 'refined_gm.mif')
    run.command('mrcalc refined_gm.mif safe_sdm.mif 0 -if ' +
                str(refgmmedian) + ' -gt _refinedgmhigh.mif -datatype bit')
    run.command(
        'mrcalc _refinedgmhigh.mif 0 refined_gm.mif -if _refinedgmlow.mif -datatype bit'
    )
    refgmhighcount = float(
        image.statistic('_refinedgmhigh.mif', 'count', '_refinedgmhigh.mif'))
    refgmlowcount = float(
        image.statistic('_refinedgmlow.mif', 'count', '_refinedgmlow.mif'))
    voxgmhighcount = int(round(refgmhighcount * app.args.gm / 100.0))
    voxgmlowcount = int(round(refgmlowcount * app.args.gm / 100.0))
    run.command(
        'mrcalc _refinedgmhigh.mif safe_sdm.mif 0 -if - | mrthreshold - - -bottom '
        + str(voxgmhighcount) +
        ' -ignorezero | mrcalc _refinedgmhigh.mif - 0 -if _refinedgmhighselect.mif -datatype bit'
    )
    run.command(
        'mrcalc _refinedgmlow.mif safe_sdm.mif 0 -if - | mrthreshold - - -top '
        + str(voxgmlowcount) +
        ' -ignorezero | mrcalc _refinedgmlow.mif - 0 -if _refinedgmlowselect.mif -datatype bit'
    )
    run.command(
        'mrcalc _refinedgmhighselect.mif 1 _refinedgmlowselect.mif -if voxels_gm.mif -datatype bit'
    )

    # Get final voxels for CSF response function estimation from CSF.
    refcsfcount = float(
        image.statistic('refined_csf.mif', 'count', 'refined_csf.mif'))
    voxcsfcount = int(round(refcsfcount * app.args.csf / 100.0))
    run.command(
        'mrcalc refined_csf.mif safe_sdm.mif 0 -if - | mrthreshold - - -top ' +
        str(voxcsfcount) +
        ' -ignorezero | mrcalc refined_csf.mif - 0 -if voxels_csf.mif -datatype bit'
    )

    # Show summary of voxels counts.
    textarrow = ' --> '
    app.console('Summary of voxel counts:')
    app.console(
        'Mask: ' + str(int(image.statistic('mask.mif', 'count', 'mask.mif'))) +
        textarrow +
        str(int(image.statistic('eroded_mask.mif', 'count',
                                'eroded_mask.mif'))) + textarrow +
        str(int(image.statistic('safe_mask.mif', 'count', 'safe_mask.mif'))))
    app.console(
        'WM: ' +
        str(int(image.statistic('crude_wm.mif', 'count', 'crude_wm.mif'))) +
        textarrow +
        str(int(image.statistic('refined_wm.mif', 'count',
                                'refined_wm.mif'))) + textarrow +
        str(int(image.statistic('voxels_sfwm.mif', 'count',
                                'voxels_sfwm.mif'))) + ' (SF)')
    app.console(
        'GM: ' +
        str(int(image.statistic('crude_gm.mif', 'count', 'crude_gm.mif'))) +
        textarrow +
        str(int(image.statistic('refined_gm.mif', 'count',
                                'refined_gm.mif'))) + textarrow +
        str(int(image.statistic('voxels_gm.mif', 'count', 'voxels_gm.mif'))))
    app.console(
        'CSF: ' +
        str(int(image.statistic('crude_csf.mif', 'count', 'crude_csf.mif'))) +
        textarrow +
        str(int(image.statistic('refined_csf.mif', 'count',
                                'refined_csf.mif'))) + textarrow +
        str(int(image.statistic('voxels_csf.mif', 'count', 'voxels_csf.mif'))))

    # Generate single-fibre WM, GM and CSF responses
    bvalues_option = ' -shell ' + ','.join(map(str, bvalues))
    sfwm_lmax_option = ''
    if sfwm_lmax:
        sfwm_lmax_option = ' -lmax ' + ','.join(map(str, sfwm_lmax))
    run.command(
        'amp2response dwi.mif voxels_sfwm.mif safe_vecs.mif response_sfwm.txt'
        + bvalues_option + sfwm_lmax_option)
    run.command(
        'amp2response dwi.mif voxels_gm.mif safe_vecs.mif response_gm.txt' +
        bvalues_option + ' -isotropic')
    run.command(
        'amp2response dwi.mif voxels_csf.mif safe_vecs.mif response_csf.txt' +
        bvalues_option + ' -isotropic')
    run.function(shutil.copyfile, 'response_sfwm.txt',
                 path.fromUser(app.args.out_sfwm, False))
    run.function(shutil.copyfile, 'response_gm.txt',
                 path.fromUser(app.args.out_gm, False))
    run.function(shutil.copyfile, 'response_csf.txt',
                 path.fromUser(app.args.out_csf, False))

    # Generate 4D binary images with voxel selections at major stages in algorithm (RGB as in MSMT-CSD paper).
    run.command(
        'mrcat crude_csf.mif crude_gm.mif crude_wm.mif crude.mif -axis 3')
    run.command(
        'mrcat refined_csf.mif refined_gm.mif refined_wm.mif refined.mif -axis 3'
    )
    run.command(
        'mrcat voxels_csf.mif voxels_gm.mif voxels_sfwm.mif voxels.mif -axis 3'
    )
예제 #18
0
def getInputs():
    import os, shutil
    from mrtrix3 import app, path, run
    if app.args.lut:
        run.function(shutil.copyfile, path.fromUser(app.args.lut, False),
                     path.toTemp('LUT.txt', False))
예제 #19
0
def execute():
    import math, os, shutil
    from mrtrix3 import app, image, path, run

    # Ideally want to use the oversampling-based regridding of the 5TT image from the SIFT model, not mrtransform
    # May need to commit 5ttregrid...

    # Verify input 5tt image
    run.command('5ttcheck 5tt.mif', False)

    # Get shell information
    shells = [
        int(round(float(x)))
        for x in image.headerField('dwi.mif', 'shells').split()
    ]
    if len(shells) < 3:
        app.warn(
            'Less than three b-value shells; response functions will not be applicable in resolving three tissues using MSMT-CSD algorithm'
        )

    # Get lmax information (if provided)
    wm_lmax = []
    if app.args.lmax:
        wm_lmax = [int(x.strip()) for x in app.args.lmax.split(',')]
        if not len(wm_lmax) == len(shells):
            app.error('Number of manually-defined lmax\'s (' +
                      str(len(wm_lmax)) +
                      ') does not match number of b-value shells (' +
                      str(len(shells)) + ')')
        for l in wm_lmax:
            if l % 2:
                app.error('Values for lmax must be even')
            if l < 0:
                app.error('Values for lmax must be non-negative')

    run.command(
        'dwi2tensor dwi.mif - -mask mask.mif | tensor2metric - -fa fa.mif -vector vector.mif'
    )
    if not os.path.exists('dirs.mif'):
        run.function(shutil.copy, 'vector.mif', 'dirs.mif')
    run.command(
        'mrtransform 5tt.mif 5tt_regrid.mif -template fa.mif -interp linear')

    # Basic tissue masks
    run.command(
        'mrconvert 5tt_regrid.mif - -coord 3 2 -axes 0,1,2 | mrcalc - ' +
        str(app.args.pvf) + ' -gt mask.mif -mult wm_mask.mif')
    run.command(
        'mrconvert 5tt_regrid.mif - -coord 3 0 -axes 0,1,2 | mrcalc - ' +
        str(app.args.pvf) + ' -gt fa.mif ' + str(app.args.fa) +
        ' -lt -mult mask.mif -mult gm_mask.mif')
    run.command(
        'mrconvert 5tt_regrid.mif - -coord 3 3 -axes 0,1,2 | mrcalc - ' +
        str(app.args.pvf) + ' -gt fa.mif ' + str(app.args.fa) +
        ' -lt -mult mask.mif -mult csf_mask.mif')

    # Revise WM mask to only include single-fibre voxels
    app.console(
        'Calling dwi2response recursively to select WM single-fibre voxels using \''
        + app.args.wm_algo + '\' algorithm')
    recursive_cleanup_option = ''
    if not app._cleanup:
        recursive_cleanup_option = ' -nocleanup'
    run.command(
        'dwi2response ' + app.args.wm_algo +
        ' dwi.mif wm_ss_response.txt -mask wm_mask.mif -voxels wm_sf_mask.mif -tempdir '
        + app._tempDir + recursive_cleanup_option)

    # Check for empty masks
    wm_voxels = int(
        image.statistic('wm_sf_mask.mif', 'count', 'wm_sf_mask.mif'))
    gm_voxels = int(image.statistic('gm_mask.mif', 'count', 'gm_mask.mif'))
    csf_voxels = int(image.statistic('csf_mask.mif', 'count', 'csf_mask.mif'))
    empty_masks = []
    if not wm_voxels:
        empty_masks.append('WM')
    if not gm_voxels:
        empty_masks.append('GM')
    if not csf_voxels:
        empty_masks.append('CSF')
    if empty_masks:
        message = ','.join(empty_masks)
        message += ' tissue mask'
        if len(empty_masks) > 1:
            message += 's'
        message += ' empty; cannot estimate response function'
        if len(empty_masks) > 1:
            message += 's'
        app.error(message)

    # For each of the three tissues, generate a multi-shell response
    bvalues_option = ' -shell ' + ','.join(map(str, shells))
    sfwm_lmax_option = ''
    if wm_lmax:
        sfwm_lmax_option = ' -lmax ' + ','.join(map(str, wm_lmax))
    run.command('amp2response dwi.mif wm_sf_mask.mif dirs.mif wm.txt' +
                bvalues_option + sfwm_lmax_option)
    run.command('amp2response dwi.mif gm_mask.mif dirs.mif gm.txt' +
                bvalues_option + ' -isotropic')
    run.command('amp2response dwi.mif csf_mask.mif dirs.mif csf.txt' +
                bvalues_option + ' -isotropic')
    run.function(shutil.copyfile, 'wm.txt',
                 path.fromUser(app.args.out_wm, False))
    run.function(shutil.copyfile, 'gm.txt',
                 path.fromUser(app.args.out_gm, False))
    run.function(shutil.copyfile, 'csf.txt',
                 path.fromUser(app.args.out_csf, False))

    # Generate output 4D binary image with voxel selections; RGB as in MSMT-CSD paper
    run.command(
        'mrcat csf_mask.mif gm_mask.mif wm_sf_mask.mif voxels.mif -axis 3')
if not grad:
  app.error('No diffusion gradient table found')
if not len(grad) == num_volumes:
  app.error('Number of lines in gradient table (' + str(len(grad)) + ') does not match input image (' + str(num_volumes) + ' volumes); check your input data')

if app.args.extent:
	extent = app.args.extent
else: extent = '5,5,5'

run.command('mrconvert dwi.mif working.mif')

# denoising
if app.args.denoise:
    print("...Beginning denoising")
    run.command('dwidenoise -extent ' + extent + ' -noise fullnoisemap.mif working.mif dwidn.mif')
    run.function(os.remove,'working.mif')
    run.command('mrconvert dwidn.mif working.mif')

# gibbs artifact correction
if app.args.degibbs:
    print("...Beginning degibbsing")
    run.command('mrdegibbs -nshifts 20 -minW 1 -maxW 3 working.mif dwigc.mif')
    run.function(os.remove,'working.mif')
    run.command('mrconvert dwigc.mif working.mif')

# pre-eddy alignment for multiple input series
if app.args.prealign:
    if len(DWInlist) != 1:
        miflist = []
        for idx,i in enumerate(DWInlist):
            run.command('mrconvert -coord 3 ' + idxlist[idx] + ' working.mif dwipretf' + str(idx) + '.mif')
예제 #21
0
def execute():
  import math, os, shutil
  from mrtrix3 import app, image, path, run



  # Get b-values and number of volumes per b-value.
  bvalues = [ int(round(float(x))) for x in image.headerField('dwi.mif', 'shells').split() ]
  bvolumes = [ int(x) for x in image.headerField('dwi.mif', 'shellcounts').split() ]
  app.console(str(len(bvalues)) + ' unique b-value(s) detected: ' + ','.join(map(str,bvalues)) + ' with ' + ','.join(map(str,bvolumes)) + ' volumes.')
  if len(bvalues) < 2:
    app.error('Need at least 2 unique b-values (including b=0).')


  # Get lmax information (if provided).
  sfwm_lmax = [ ]
  if app.args.lmax:
    sfwm_lmax = [ int(x.strip()) for x in app.args.lmax.split(',') ]
    if not len(sfwm_lmax) == len(bvalues):
      app.error('Number of lmax\'s (' + str(len(sfwm_lmax)) + ', as supplied to the -lmax option: ' + ','.join(map(str,sfwm_lmax)) + ') does not match number of unique b-values.')
    for l in sfwm_lmax:
      if l%2:
        app.error('Values supplied to the -lmax option must be even.')
      if l<0:
        app.error('Values supplied to the -lmax option must be non-negative.')


  # Erode (brain) mask.
  if app.args.erode > 0:
    run.command('maskfilter mask.mif erode eroded_mask.mif -npass ' + str(app.args.erode))
  else:
    run.command('mrconvert mask.mif eroded_mask.mif -datatype bit')


  # Get volumes, compute mean signal and SDM per b-value; compute overall SDM; get rid of erroneous values.
  totvolumes = 0
  fullsdmcmd = 'mrcalc'
  errcmd = 'mrcalc'
  zeropath = 'mean_b' + str(bvalues[0]) + '.mif'
  for i, b in enumerate(bvalues):
    meanpath = 'mean_b' + str(b) + '.mif'
    run.command('dwiextract dwi.mif -shell ' + str(b) + ' - | mrmath - mean ' + meanpath + ' -axis 3')
    errpath = 'err_b' + str(b) + '.mif'
    run.command('mrcalc ' + meanpath + ' -finite ' + meanpath + ' 0 -if 0 -le ' + errpath + ' -datatype bit')
    errcmd += ' ' + errpath
    if i>0:
      errcmd += ' -add'
      sdmpath = 'sdm_b' + str(b) + '.mif'
      run.command('mrcalc ' + zeropath + ' ' + meanpath +  ' -divide -log ' + sdmpath)
      totvolumes += bvolumes[i]
      fullsdmcmd += ' ' + sdmpath + ' ' + str(bvolumes[i]) + ' -mult'
      if i>1:
        fullsdmcmd += ' -add'
  fullsdmcmd += ' ' + str(totvolumes) + ' -divide full_sdm.mif'
  run.command(fullsdmcmd)
  run.command('mrcalc full_sdm.mif -finite full_sdm.mif 0 -if 0 -le err_sdm.mif -datatype bit')
  errcmd += ' err_sdm.mif -add 0 eroded_mask.mif -if safe_mask.mif -datatype bit'
  run.command(errcmd)
  run.command('mrcalc safe_mask.mif full_sdm.mif 0 -if 10 -min safe_sdm.mif')


  # Compute FA and principal eigenvectors; crude WM versus GM-CSF separation based on FA.
  run.command('dwi2tensor dwi.mif - -mask safe_mask.mif | tensor2metric - -fa safe_fa.mif -vector safe_vecs.mif -modulate none -mask safe_mask.mif')
  run.command('mrcalc safe_mask.mif safe_fa.mif 0 -if ' + str(app.args.fa) + ' -gt crude_wm.mif -datatype bit')
  run.command('mrcalc crude_wm.mif 0 safe_mask.mif -if _crudenonwm.mif -datatype bit')

  # Crude GM versus CSF separation based on SDM.
  crudenonwmmedian = image.statistic('safe_sdm.mif', 'median', '_crudenonwm.mif')
  run.command('mrcalc _crudenonwm.mif safe_sdm.mif ' + str(crudenonwmmedian) + ' -subtract 0 -if - | mrthreshold - - -mask _crudenonwm.mif | mrcalc _crudenonwm.mif - 0 -if crude_csf.mif -datatype bit')
  run.command('mrcalc crude_csf.mif 0 _crudenonwm.mif -if crude_gm.mif -datatype bit')


  # Refine WM: remove high SDM outliers.
  crudewmmedian = image.statistic('safe_sdm.mif', 'median', 'crude_wm.mif')
  run.command('mrcalc crude_wm.mif safe_sdm.mif 0 -if ' + str(crudewmmedian) + ' -gt _crudewmhigh.mif -datatype bit')
  run.command('mrcalc _crudewmhigh.mif 0 crude_wm.mif -if _crudewmlow.mif -datatype bit')
  crudewmQ1 = float(image.statistic('safe_sdm.mif', 'median', '_crudewmlow.mif'))
  crudewmQ3 = float(image.statistic('safe_sdm.mif', 'median', '_crudewmhigh.mif'))
  crudewmoutlthresh = crudewmQ3 + (crudewmQ3 - crudewmQ1)
  run.command('mrcalc crude_wm.mif safe_sdm.mif 0 -if ' + str(crudewmoutlthresh) + ' -gt _crudewmoutliers.mif -datatype bit')
  run.command('mrcalc _crudewmoutliers.mif 0 crude_wm.mif -if refined_wm.mif -datatype bit')

  # Refine GM: separate safer GM from partial volumed voxels.
  crudegmmedian = image.statistic('safe_sdm.mif', 'median', 'crude_gm.mif')
  run.command('mrcalc crude_gm.mif safe_sdm.mif 0 -if ' + str(crudegmmedian) + ' -gt _crudegmhigh.mif -datatype bit')
  run.command('mrcalc _crudegmhigh.mif 0 crude_gm.mif -if _crudegmlow.mif -datatype bit')
  run.command('mrcalc _crudegmhigh.mif safe_sdm.mif ' + str(crudegmmedian) + ' -subtract 0 -if - | mrthreshold - - -mask _crudegmhigh.mif -invert | mrcalc _crudegmhigh.mif - 0 -if _crudegmhighselect.mif -datatype bit')
  run.command('mrcalc _crudegmlow.mif safe_sdm.mif ' + str(crudegmmedian) + ' -subtract -neg 0 -if - | mrthreshold - - -mask _crudegmlow.mif -invert | mrcalc _crudegmlow.mif - 0 -if _crudegmlowselect.mif -datatype bit')
  run.command('mrcalc _crudegmhighselect.mif 1 _crudegmlowselect.mif -if refined_gm.mif -datatype bit')

  # Refine CSF: recover lost CSF from crude WM SDM outliers, separate safer CSF from partial volumed voxels.
  crudecsfmin = image.statistic('safe_sdm.mif', 'min', 'crude_csf.mif')
  run.command('mrcalc _crudewmoutliers.mif safe_sdm.mif 0 -if ' + str(crudecsfmin) + ' -gt 1 crude_csf.mif -if _crudecsfextra.mif -datatype bit')
  run.command('mrcalc _crudecsfextra.mif safe_sdm.mif ' + str(crudecsfmin) + ' -subtract 0 -if - | mrthreshold - - -mask _crudecsfextra.mif | mrcalc _crudecsfextra.mif - 0 -if refined_csf.mif -datatype bit')


  # Get final voxels for single-fibre WM response function estimation from WM using 'tournier' algorithm.
  refwmcount = float(image.statistic('refined_wm.mif', 'count', 'refined_wm.mif'))
  voxsfwmcount = int(round(refwmcount * app.args.sfwm / 100.0))
  app.console('Running \'tournier\' algorithm to select ' + str(voxsfwmcount) + ' single-fibre WM voxels.')
  cleanopt = ''
  if not app._cleanup:
    cleanopt = ' -nocleanup'
  run.command('dwi2response tournier dwi.mif _respsfwmss.txt -sf_voxels ' + str(voxsfwmcount) + ' -iter_voxels ' + str(voxsfwmcount * 10) + ' -mask refined_wm.mif -voxels voxels_sfwm.mif -tempdir ' + app._tempDir + cleanopt)

  # Get final voxels for GM response function estimation from GM.
  refgmmedian = image.statistic('safe_sdm.mif', 'median', 'refined_gm.mif')
  run.command('mrcalc refined_gm.mif safe_sdm.mif 0 -if ' + str(refgmmedian) + ' -gt _refinedgmhigh.mif -datatype bit')
  run.command('mrcalc _refinedgmhigh.mif 0 refined_gm.mif -if _refinedgmlow.mif -datatype bit')
  refgmhighcount = float(image.statistic('_refinedgmhigh.mif', 'count', '_refinedgmhigh.mif'))
  refgmlowcount = float(image.statistic('_refinedgmlow.mif', 'count', '_refinedgmlow.mif'))
  voxgmhighcount = int(round(refgmhighcount * app.args.gm / 100.0))
  voxgmlowcount = int(round(refgmlowcount * app.args.gm / 100.0))
  run.command('mrcalc _refinedgmhigh.mif safe_sdm.mif 0 -if - | mrthreshold - - -bottom ' + str(voxgmhighcount) + ' -ignorezero | mrcalc _refinedgmhigh.mif - 0 -if _refinedgmhighselect.mif -datatype bit')
  run.command('mrcalc _refinedgmlow.mif safe_sdm.mif 0 -if - | mrthreshold - - -top ' + str(voxgmlowcount) + ' -ignorezero | mrcalc _refinedgmlow.mif - 0 -if _refinedgmlowselect.mif -datatype bit')
  run.command('mrcalc _refinedgmhighselect.mif 1 _refinedgmlowselect.mif -if voxels_gm.mif -datatype bit')

  # Get final voxels for CSF response function estimation from CSF.
  refcsfcount = float(image.statistic('refined_csf.mif', 'count', 'refined_csf.mif'))
  voxcsfcount = int(round(refcsfcount * app.args.csf / 100.0))
  run.command('mrcalc refined_csf.mif safe_sdm.mif 0 -if - | mrthreshold - - -top ' + str(voxcsfcount) + ' -ignorezero | mrcalc refined_csf.mif - 0 -if voxels_csf.mif -datatype bit')


  # Show summary of voxels counts.
  textarrow = ' --> '
  app.console('Summary of voxel counts:')
  app.console('Mask: ' + str(int(image.statistic('mask.mif', 'count', 'mask.mif'))) + textarrow + str(int(image.statistic('eroded_mask.mif', 'count', 'eroded_mask.mif'))) + textarrow + str(int(image.statistic('safe_mask.mif', 'count', 'safe_mask.mif'))))
  app.console('WM: ' + str(int(image.statistic('crude_wm.mif', 'count', 'crude_wm.mif'))) + textarrow + str(int(image.statistic('refined_wm.mif', 'count', 'refined_wm.mif'))) + textarrow + str(int(image.statistic('voxels_sfwm.mif', 'count', 'voxels_sfwm.mif'))) + ' (SF)')
  app.console('GM: ' + str(int(image.statistic('crude_gm.mif', 'count', 'crude_gm.mif'))) + textarrow + str(int(image.statistic('refined_gm.mif', 'count', 'refined_gm.mif'))) + textarrow + str(int(image.statistic('voxels_gm.mif', 'count', 'voxels_gm.mif'))))
  app.console('CSF: ' + str(int(image.statistic('crude_csf.mif', 'count', 'crude_csf.mif'))) + textarrow + str(int(image.statistic('refined_csf.mif', 'count', 'refined_csf.mif'))) + textarrow + str(int(image.statistic('voxels_csf.mif', 'count', 'voxels_csf.mif'))))


  # Generate single-fibre WM, GM and CSF responses
  bvalues_option = ' -shell ' + ','.join(map(str,bvalues))
  sfwm_lmax_option = ''
  if sfwm_lmax:
    sfwm_lmax_option = ' -lmax ' + ','.join(map(str,sfwm_lmax))
  run.command('amp2response dwi.mif voxels_sfwm.mif safe_vecs.mif response_sfwm.txt' + bvalues_option + sfwm_lmax_option)
  run.command('amp2response dwi.mif voxels_gm.mif safe_vecs.mif response_gm.txt' + bvalues_option + ' -isotropic')
  run.command('amp2response dwi.mif voxels_csf.mif safe_vecs.mif response_csf.txt' + bvalues_option + ' -isotropic')
  run.function(shutil.copyfile, 'response_sfwm.txt', path.fromUser(app.args.out_sfwm, False))
  run.function(shutil.copyfile, 'response_gm.txt', path.fromUser(app.args.out_gm, False))
  run.function(shutil.copyfile, 'response_csf.txt', path.fromUser(app.args.out_csf, False))


  # Generate 4D binary images with voxel selections at major stages in algorithm (RGB as in MSMT-CSD paper).
  run.command('mrcat crude_csf.mif crude_gm.mif crude_wm.mif crude.mif -axis 3')
  run.command('mrcat refined_csf.mif refined_gm.mif refined_wm.mif refined.mif -axis 3')
  run.command('mrcat voxels_csf.mif voxels_gm.mif voxels_sfwm.mif voxels.mif -axis 3')
예제 #22
0
def execute():  #pylint: disable=unused-variable
    bzero_threshold = float(
        CONFIG['BZeroThreshold']) if 'BZeroThreshold' in CONFIG else 10.0

    # CHECK INPUTS AND OPTIONS
    app.console('-------')

    # Get b-values and number of volumes per b-value.
    bvalues = [
        int(round(float(x)))
        for x in image.mrinfo('dwi.mif', 'shell_bvalues').split()
    ]
    bvolumes = [int(x) for x in image.mrinfo('dwi.mif', 'shell_sizes').split()]
    app.console(
        str(len(bvalues)) + ' unique b-value(s) detected: ' +
        ','.join(map(str, bvalues)) + ' with ' + ','.join(map(str, bvolumes)) +
        ' volumes')
    if len(bvalues) < 2:
        raise MRtrixError('Need at least 2 unique b-values (including b=0).')
    bvalues_option = ' -shells ' + ','.join(map(str, bvalues))

    # Get lmax information (if provided).
    sfwm_lmax = []
    if app.ARGS.lmax:
        sfwm_lmax = [int(x.strip()) for x in app.ARGS.lmax.split(',')]
        if not len(sfwm_lmax) == len(bvalues):
            raise MRtrixError('Number of lmax\'s (' + str(len(sfwm_lmax)) +
                              ', as supplied to the -lmax option: ' +
                              ','.join(map(str, sfwm_lmax)) +
                              ') does not match number of unique b-values.')
        for sfl in sfwm_lmax:
            if sfl % 2:
                raise MRtrixError(
                    'Values supplied to the -lmax option must be even.')
            if sfl < 0:
                raise MRtrixError(
                    'Values supplied to the -lmax option must be non-negative.'
                )
    sfwm_lmax_option = ''
    if sfwm_lmax:
        sfwm_lmax_option = ' -lmax ' + ','.join(map(str, sfwm_lmax))

    # PREPARATION
    app.console('-------')
    app.console('Preparation:')

    # Erode (brain) mask.
    if app.ARGS.erode > 0:
        app.console('* Eroding brain mask by ' + str(app.ARGS.erode) +
                    ' pass(es)...')
        run.command('maskfilter mask.mif erode eroded_mask.mif -npass ' +
                    str(app.ARGS.erode),
                    show=False)
    else:
        app.console('Not eroding brain mask.')
        run.command('mrconvert mask.mif eroded_mask.mif -datatype bit',
                    show=False)
    statmaskcount = image.statistics('mask.mif', mask='mask.mif').count
    statemaskcount = image.statistics('eroded_mask.mif',
                                      mask='eroded_mask.mif').count
    app.console('  [ mask: ' + str(statmaskcount) + ' -> ' +
                str(statemaskcount) + ' ]')

    # Get volumes, compute mean signal and SDM per b-value; compute overall SDM; get rid of erroneous values.
    app.console('* Computing signal decay metric (SDM):')
    totvolumes = 0
    fullsdmcmd = 'mrcalc'
    errcmd = 'mrcalc'
    zeropath = 'mean_b' + str(bvalues[0]) + '.mif'
    for ibv, bval in enumerate(bvalues):
        app.console(' * b=' + str(bval) + '...')
        meanpath = 'mean_b' + str(bval) + '.mif'
        run.command('dwiextract dwi.mif -shells ' + str(bval) +
                    ' - | mrcalc - 0 -max - | mrmath - mean ' + meanpath +
                    ' -axis 3',
                    show=False)
        errpath = 'err_b' + str(bval) + '.mif'
        run.command('mrcalc ' + meanpath + ' -finite ' + meanpath +
                    ' 0 -if 0 -le ' + errpath + ' -datatype bit',
                    show=False)
        errcmd += ' ' + errpath
        if ibv > 0:
            errcmd += ' -add'
            sdmpath = 'sdm_b' + str(bval) + '.mif'
            run.command('mrcalc ' + zeropath + ' ' + meanpath +
                        ' -divide -log ' + sdmpath,
                        show=False)
            totvolumes += bvolumes[ibv]
            fullsdmcmd += ' ' + sdmpath + ' ' + str(bvolumes[ibv]) + ' -mult'
            if ibv > 1:
                fullsdmcmd += ' -add'
    fullsdmcmd += ' ' + str(totvolumes) + ' -divide full_sdm.mif'
    run.command(fullsdmcmd, show=False)
    app.console('* Removing erroneous voxels from mask and correcting SDM...')
    run.command(
        'mrcalc full_sdm.mif -finite full_sdm.mif 0 -if 0 -le err_sdm.mif -datatype bit',
        show=False)
    errcmd += ' err_sdm.mif -add 0 eroded_mask.mif -if safe_mask.mif -datatype bit'
    run.command(errcmd, show=False)
    run.command('mrcalc safe_mask.mif full_sdm.mif 0 -if 10 -min safe_sdm.mif',
                show=False)
    statsmaskcount = image.statistics('safe_mask.mif',
                                      mask='safe_mask.mif').count
    app.console('  [ mask: ' + str(statemaskcount) + ' -> ' +
                str(statsmaskcount) + ' ]')

    # CRUDE SEGMENTATION
    app.console('-------')
    app.console('Crude segmentation:')

    # Compute FA and principal eigenvectors; crude WM versus GM-CSF separation based on FA.
    app.console('* Crude WM versus GM-CSF separation (at FA=' +
                str(app.ARGS.fa) + ')...')
    run.command(
        'dwi2tensor dwi.mif - -mask safe_mask.mif | tensor2metric - -fa safe_fa.mif -vector safe_vecs.mif -modulate none -mask safe_mask.mif',
        show=False)
    run.command('mrcalc safe_mask.mif safe_fa.mif 0 -if ' + str(app.ARGS.fa) +
                ' -gt crude_wm.mif -datatype bit',
                show=False)
    run.command(
        'mrcalc crude_wm.mif 0 safe_mask.mif -if _crudenonwm.mif -datatype bit',
        show=False)
    statcrudewmcount = image.statistics('crude_wm.mif',
                                        mask='crude_wm.mif').count
    statcrudenonwmcount = image.statistics('_crudenonwm.mif',
                                           mask='_crudenonwm.mif').count
    app.console('  [ ' + str(statsmaskcount) + ' -> ' + str(statcrudewmcount) +
                ' (WM) & ' + str(statcrudenonwmcount) + ' (GM-CSF) ]')

    # Crude GM versus CSF separation based on SDM.
    app.console('* Crude GM versus CSF separation...')
    crudenonwmmedian = image.statistics('safe_sdm.mif',
                                        mask='_crudenonwm.mif').median
    run.command(
        'mrcalc _crudenonwm.mif safe_sdm.mif ' + str(crudenonwmmedian) +
        ' -subtract 0 -if - | mrthreshold - - -mask _crudenonwm.mif | mrcalc _crudenonwm.mif - 0 -if crude_csf.mif -datatype bit',
        show=False)
    run.command(
        'mrcalc crude_csf.mif 0 _crudenonwm.mif -if crude_gm.mif -datatype bit',
        show=False)
    statcrudegmcount = image.statistics('crude_gm.mif',
                                        mask='crude_gm.mif').count
    statcrudecsfcount = image.statistics('crude_csf.mif',
                                         mask='crude_csf.mif').count
    app.console('  [ ' + str(statcrudenonwmcount) + ' -> ' +
                str(statcrudegmcount) + ' (GM) & ' + str(statcrudecsfcount) +
                ' (CSF) ]')

    # REFINED SEGMENTATION
    app.console('-------')
    app.console('Refined segmentation:')

    # Refine WM: remove high SDM outliers.
    app.console('* Refining WM...')
    crudewmmedian = image.statistics('safe_sdm.mif',
                                     mask='crude_wm.mif').median
    run.command('mrcalc crude_wm.mif safe_sdm.mif ' + str(crudewmmedian) +
                ' -subtract -abs 0 -if _crudewm_sdmad.mif',
                show=False)
    crudewmmad = image.statistics('_crudewm_sdmad.mif',
                                  mask='crude_wm.mif').median
    crudewmoutlthresh = crudewmmedian + (1.4826 * crudewmmad * 2.0)
    run.command('mrcalc crude_wm.mif safe_sdm.mif 0 -if ' +
                str(crudewmoutlthresh) +
                ' -gt _crudewmoutliers.mif -datatype bit',
                show=False)
    run.command(
        'mrcalc _crudewmoutliers.mif 0 crude_wm.mif -if refined_wm.mif -datatype bit',
        show=False)
    statrefwmcount = image.statistics('refined_wm.mif',
                                      mask='refined_wm.mif').count
    app.console('  [ WM: ' + str(statcrudewmcount) + ' -> ' +
                str(statrefwmcount) + ' ]')

    # Refine GM: separate safer GM from partial volumed voxels.
    app.console('* Refining GM...')
    crudegmmedian = image.statistics('safe_sdm.mif',
                                     mask='crude_gm.mif').median
    run.command('mrcalc crude_gm.mif safe_sdm.mif 0 -if ' +
                str(crudegmmedian) + ' -gt _crudegmhigh.mif -datatype bit',
                show=False)
    run.command(
        'mrcalc _crudegmhigh.mif 0 crude_gm.mif -if _crudegmlow.mif -datatype bit',
        show=False)
    run.command(
        'mrcalc _crudegmhigh.mif safe_sdm.mif ' + str(crudegmmedian) +
        ' -subtract 0 -if - | mrthreshold - - -mask _crudegmhigh.mif -invert | mrcalc _crudegmhigh.mif - 0 -if _crudegmhighselect.mif -datatype bit',
        show=False)
    run.command(
        'mrcalc _crudegmlow.mif safe_sdm.mif ' + str(crudegmmedian) +
        ' -subtract -neg 0 -if - | mrthreshold - - -mask _crudegmlow.mif -invert | mrcalc _crudegmlow.mif - 0 -if _crudegmlowselect.mif -datatype bit',
        show=False)
    run.command(
        'mrcalc _crudegmhighselect.mif 1 _crudegmlowselect.mif -if refined_gm.mif -datatype bit',
        show=False)
    statrefgmcount = image.statistics('refined_gm.mif',
                                      mask='refined_gm.mif').count
    app.console('  [ GM: ' + str(statcrudegmcount) + ' -> ' +
                str(statrefgmcount) + ' ]')

    # Refine CSF: recover lost CSF from crude WM SDM outliers, separate safer CSF from partial volumed voxels.
    app.console('* Refining CSF...')
    crudecsfmin = image.statistics('safe_sdm.mif', mask='crude_csf.mif').min
    run.command('mrcalc _crudewmoutliers.mif safe_sdm.mif 0 -if ' +
                str(crudecsfmin) +
                ' -gt 1 crude_csf.mif -if _crudecsfextra.mif -datatype bit',
                show=False)
    run.command(
        'mrcalc _crudecsfextra.mif safe_sdm.mif ' + str(crudecsfmin) +
        ' -subtract 0 -if - | mrthreshold - - -mask _crudecsfextra.mif | mrcalc _crudecsfextra.mif - 0 -if refined_csf.mif -datatype bit',
        show=False)
    statrefcsfcount = image.statistics('refined_csf.mif',
                                       mask='refined_csf.mif').count
    app.console('  [ CSF: ' + str(statcrudecsfcount) + ' -> ' +
                str(statrefcsfcount) + ' ]')

    # FINAL VOXEL SELECTION AND RESPONSE FUNCTION ESTIMATION
    app.console('-------')
    app.console('Final voxel selection and response function estimation:')

    # Get final voxels for CSF response function estimation from refined CSF.
    app.console('* CSF:')
    app.console(' * Selecting final voxels (' + str(app.ARGS.csf) +
                '% of refined CSF)...')
    voxcsfcount = int(round(statrefcsfcount * app.ARGS.csf / 100.0))
    run.command(
        'mrcalc refined_csf.mif safe_sdm.mif 0 -if - | mrthreshold - - -top ' +
        str(voxcsfcount) +
        ' -ignorezero | mrcalc refined_csf.mif - 0 -if - -datatype bit | mrconvert - voxels_csf.mif -axes 0,1,2',
        show=False)
    statvoxcsfcount = image.statistics('voxels_csf.mif',
                                       mask='voxels_csf.mif').count
    app.console('   [ CSF: ' + str(statrefcsfcount) + ' -> ' +
                str(statvoxcsfcount) + ' ]')
    # Estimate CSF response function
    app.console(' * Estimating response function...')
    run.command(
        'amp2response dwi.mif voxels_csf.mif safe_vecs.mif response_csf.txt' +
        bvalues_option + ' -isotropic',
        show=False)

    # Get final voxels for GM response function estimation from refined GM.
    app.console('* GM:')
    app.console(' * Selecting final voxels (' + str(app.ARGS.gm) +
                '% of refined GM)...')
    voxgmcount = int(round(statrefgmcount * app.ARGS.gm / 100.0))
    refgmmedian = image.statistics('safe_sdm.mif',
                                   mask='refined_gm.mif').median
    run.command(
        'mrcalc refined_gm.mif safe_sdm.mif ' + str(refgmmedian) +
        ' -subtract -abs 1 -add 0 -if - | mrthreshold - - -bottom ' +
        str(voxgmcount) +
        ' -ignorezero | mrcalc refined_gm.mif - 0 -if - -datatype bit | mrconvert - voxels_gm.mif -axes 0,1,2',
        show=False)
    statvoxgmcount = image.statistics('voxels_gm.mif',
                                      mask='voxels_gm.mif').count
    app.console('   [ GM: ' + str(statrefgmcount) + ' -> ' +
                str(statvoxgmcount) + ' ]')
    # Estimate GM response function
    app.console(' * Estimating response function...')
    run.command(
        'amp2response dwi.mif voxels_gm.mif safe_vecs.mif response_gm.txt' +
        bvalues_option + ' -isotropic',
        show=False)

    # Get final voxels for single-fibre WM response function estimation from refined WM.
    app.console('* Single-fibre WM:')
    app.console(' * Selecting final voxels' +
                ('' if app.ARGS.wm_algo == 'tax' else
                 (' (' + str(app.ARGS.sfwm) + '% of refined WM)')) + '...')
    voxsfwmcount = int(round(statrefwmcount * app.ARGS.sfwm / 100.0))

    if app.ARGS.wm_algo:
        recursive_cleanup_option = ''
        if not app.DO_CLEANUP:
            recursive_cleanup_option = ' -nocleanup'
        app.console('   Selecting WM single-fibre voxels using \'' +
                    app.ARGS.wm_algo + '\' algorithm')
        if app.ARGS.wm_algo == 'tax' and app.ARGS.sfwm != 0.5:
            app.warn(
                'Single-fibre WM response function selection algorithm "tax" will not honour requested WM voxel percentage'
            )
        run.command(
            'dwi2response ' + app.ARGS.wm_algo +
            ' dwi.mif _respsfwmss.txt -mask refined_wm.mif -voxels voxels_sfwm.mif'
            + ('' if app.ARGS.wm_algo == 'tax' else
               (' -number ' + str(voxsfwmcount))) + ' -scratch ' +
            path.quote(app.SCRATCH_DIR) + recursive_cleanup_option,
            show=False)
    else:
        app.console(
            '   Selecting WM single-fibre voxels using built-in (Dhollander et al., 2019) algorithm'
        )
        run.command('mrmath dwi.mif mean mean_sig.mif -axis 3', show=False)
        refwmcoef = image.statistics('mean_sig.mif',
                                     mask='refined_wm.mif').median * math.sqrt(
                                         4.0 * math.pi)
        if sfwm_lmax:
            isiso = [lm == 0 for lm in sfwm_lmax]
        else:
            isiso = [bv < bzero_threshold for bv in bvalues]
        with open('ewmrf.txt', 'w') as ewr:
            for iis in isiso:
                if iis:
                    ewr.write("%s 0 0 0\n" % refwmcoef)
                else:
                    ewr.write("%s -%s %s -%s\n" %
                              (refwmcoef, refwmcoef, refwmcoef, refwmcoef))
        run.command(
            'dwi2fod msmt_csd dwi.mif ewmrf.txt abs_ewm2.mif response_csf.txt abs_csf2.mif -mask refined_wm.mif -lmax 2,0'
            + bvalues_option,
            show=False)
        run.command(
            'mrconvert abs_ewm2.mif - -coord 3 0 | mrcalc - abs_csf2.mif -add abs_sum2.mif',
            show=False)
        run.command(
            'sh2peaks abs_ewm2.mif - -num 1 -mask refined_wm.mif | peaks2amp - - | mrcalc - abs_sum2.mif -divide - | mrconvert - metric_sfwm2.mif -coord 3 0 -axes 0,1,2',
            show=False)
        run.command(
            'mrcalc refined_wm.mif metric_sfwm2.mif 0 -if - | mrthreshold - - -top '
            + str(voxsfwmcount * 2) +
            ' -ignorezero | mrcalc refined_wm.mif - 0 -if - -datatype bit | mrconvert - refined_sfwm.mif -axes 0,1,2',
            show=False)
        run.command(
            'dwi2fod msmt_csd dwi.mif ewmrf.txt abs_ewm6.mif response_csf.txt abs_csf6.mif -mask refined_sfwm.mif -lmax 6,0'
            + bvalues_option,
            show=False)
        run.command(
            'mrconvert abs_ewm6.mif - -coord 3 0 | mrcalc - abs_csf6.mif -add abs_sum6.mif',
            show=False)
        run.command(
            'sh2peaks abs_ewm6.mif - -num 1 -mask refined_sfwm.mif | peaks2amp - - | mrcalc - abs_sum6.mif -divide - | mrconvert - metric_sfwm6.mif -coord 3 0 -axes 0,1,2',
            show=False)
        run.command(
            'mrcalc refined_sfwm.mif metric_sfwm6.mif 0 -if - | mrthreshold - - -top '
            + str(voxsfwmcount) +
            ' -ignorezero | mrcalc refined_sfwm.mif - 0 -if - -datatype bit | mrconvert - voxels_sfwm.mif -axes 0,1,2',
            show=False)

    statvoxsfwmcount = image.statistics('voxels_sfwm.mif',
                                        mask='voxels_sfwm.mif').count
    app.console('   [ WM: ' + str(statrefwmcount) + ' -> ' +
                str(statvoxsfwmcount) + ' (single-fibre) ]')
    # Estimate SF WM response function
    app.console(' * Estimating response function...')
    run.command(
        'amp2response dwi.mif voxels_sfwm.mif safe_vecs.mif response_sfwm.txt'
        + bvalues_option + sfwm_lmax_option,
        show=False)

    # OUTPUT AND SUMMARY
    app.console('-------')
    app.console('Generating outputs...')

    # Generate 4D binary images with voxel selections at major stages in algorithm (RGB: WM=blue, GM=green, CSF=red).
    run.command(
        'mrcat crude_csf.mif crude_gm.mif crude_wm.mif check_crude.mif -axis 3',
        show=False)
    run.command(
        'mrcat refined_csf.mif refined_gm.mif refined_wm.mif check_refined.mif -axis 3',
        show=False)
    run.command(
        'mrcat voxels_csf.mif voxels_gm.mif voxels_sfwm.mif check_voxels.mif -axis 3',
        show=False)

    # Copy results to output files
    run.function(shutil.copyfile,
                 'response_sfwm.txt',
                 path.from_user(app.ARGS.out_sfwm, False),
                 show=False)
    run.function(shutil.copyfile,
                 'response_gm.txt',
                 path.from_user(app.ARGS.out_gm, False),
                 show=False)
    run.function(shutil.copyfile,
                 'response_csf.txt',
                 path.from_user(app.ARGS.out_csf, False),
                 show=False)
    if app.ARGS.voxels:
        run.command('mrconvert check_voxels.mif ' +
                    path.from_user(app.ARGS.voxels),
                    mrconvert_keyval=path.from_user(app.ARGS.input, False),
                    force=app.FORCE_OVERWRITE,
                    show=False)
    app.console('-------')
예제 #23
0
def execute():  #pylint: disable=unused-variable
    import math, os
    from distutils.spawn import find_executable
    from mrtrix3 import app, fsl, image, MRtrixError, path, run, utils

    if utils.is_windows():
        raise MRtrixError(
            '\'fsl\' algorithm of 5ttgen script cannot be run on Windows: FSL not available on Windows'
        )

    fsl_path = os.environ.get('FSLDIR', '')
    if not fsl_path:
        raise MRtrixError(
            'Environment variable FSLDIR is not set; please run appropriate FSL configuration script'
        )

    bet_cmd = fsl.exe_name('bet')
    fast_cmd = fsl.exe_name('fast')
    first_cmd = fsl.exe_name('run_first_all')
    ssroi_cmd = fsl.exe_name('standard_space_roi')

    first_atlas_path = os.path.join(fsl_path, 'data', 'first',
                                    'models_336_bin')
    if not os.path.isdir(first_atlas_path):
        raise MRtrixError(
            'Atlases required for FSL\'s FIRST program not installed; please install fsl-first-data using your relevant package manager'
        )

    fsl_suffix = fsl.suffix()

    sgm_structures = [
        'L_Accu', 'R_Accu', 'L_Caud', 'R_Caud', 'L_Pall', 'R_Pall', 'L_Puta',
        'R_Puta', 'L_Thal', 'R_Thal'
    ]
    if app.ARGS.sgm_amyg_hipp:
        sgm_structures.extend(['L_Amyg', 'R_Amyg', 'L_Hipp', 'R_Hipp'])

    t1_spacing = image.Header('input.mif').spacing()
    upsample_for_first = False
    # If voxel size is 1.25mm or larger, make a guess that the user has erroneously re-gridded their data
    if math.pow(t1_spacing[0] * t1_spacing[1] * t1_spacing[2],
                1.0 / 3.0) > 1.225:
        app.warn(
            'Voxel size larger than expected for T1-weighted images (' +
            str(t1_spacing) + '); '
            'note that ACT does not require re-gridding of T1 image to DWI space, and indeed '
            'retaining the original higher resolution of the T1 image is preferable'
        )
        upsample_for_first = True

    run.command('mrconvert input.mif T1.nii -strides -1,+2,+3')

    fast_t1_input = 'T1.nii'
    fast_t2_input = ''

    # Decide whether or not we're going to do any brain masking
    if os.path.exists('mask.mif'):

        fast_t1_input = 'T1_masked' + fsl_suffix

        # Check to see if the mask matches the T1 image
        if image.match('T1.nii', 'mask.mif'):
            run.command('mrcalc T1.nii mask.mif -mult ' + fast_t1_input)
            mask_path = 'mask.mif'
        else:
            app.warn('Mask image does not match input image - re-gridding')
            run.command(
                'mrtransform mask.mif mask_regrid.mif -template T1.nii -datatype bit'
            )
            run.command('mrcalc T1.nii mask_regrid.mif -mult ' + fast_t1_input)
            mask_path = 'mask_regrid.mif'

        if os.path.exists('T2.nii'):
            fast_t2_input = 'T2_masked' + fsl_suffix
            run.command('mrcalc T2.nii ' + mask_path + ' -mult ' +
                        fast_t2_input)

    elif app.ARGS.premasked:

        fast_t1_input = 'T1.nii'
        if os.path.exists('T2.nii'):
            fast_t2_input = 'T2.nii'

    else:

        # Use FSL command standard_space_roi to do an initial masking of the image before BET
        # Also reduce the FoV of the image
        # Using MNI 1mm dilated brain mask rather than the -b option in standard_space_roi (which uses the 2mm mask); the latter looks 'buggy' to me... Unfortunately even with the 1mm 'dilated' mask, it can still cut into some brain areas, hence the explicit dilation
        mni_mask_path = os.path.join(fsl_path, 'data', 'standard',
                                     'MNI152_T1_1mm_brain_mask_dil.nii.gz')
        mni_mask_dilation = 0
        if os.path.exists(mni_mask_path):
            mni_mask_dilation = 4
        else:
            mni_mask_path = os.path.join(
                fsl_path, 'data', 'standard',
                'MNI152_T1_2mm_brain_mask_dil.nii.gz')
            if os.path.exists(mni_mask_path):
                mni_mask_dilation = 2
        try:
            if mni_mask_dilation:
                run.command('maskfilter ' + mni_mask_path +
                            ' dilate mni_mask.nii -npass ' +
                            str(mni_mask_dilation))
                if app.ARGS.nocrop:
                    ssroi_roi_option = ' -roiNONE'
                else:
                    ssroi_roi_option = ' -roiFOV'
                run.command(ssroi_cmd + ' T1.nii T1_preBET' + fsl_suffix +
                            ' -maskMASK mni_mask.nii' + ssroi_roi_option)
            else:
                run.command(ssroi_cmd + ' T1.nii T1_preBET' + fsl_suffix +
                            ' -b')
        except run.MRtrixCmdError:
            pass
        try:
            pre_bet_image = fsl.find_image('T1_preBET')
        except MRtrixError:
            app.warn('FSL script \'standard_space_roi\' did not complete successfully' + \
                     ('' if find_executable('dc') else ' (possibly due to program \'dc\' not being installed') + '; ' + \
                     'attempting to continue by providing un-cropped image to BET')
            pre_bet_image = 'T1.nii'

        # BET
        run.command(bet_cmd + ' ' + pre_bet_image + ' T1_BET' + fsl_suffix +
                    ' -f 0.15 -R')
        fast_t1_input = fsl.find_image('T1_BET' + fsl_suffix)

        if os.path.exists('T2.nii'):
            if app.ARGS.nocrop:
                fast_t2_input = 'T2.nii'
            else:
                # Just a reduction of FoV, no sub-voxel interpolation going on
                run.command('mrtransform T2.nii T2_cropped.nii -template ' +
                            fast_t1_input + ' -interp nearest')
                fast_t2_input = 'T2_cropped.nii'

    # Finish branching based on brain masking

    # FAST
    if fast_t2_input:
        run.command(fast_cmd + ' -S 2 ' + fast_t2_input + ' ' + fast_t1_input)
    else:
        run.command(fast_cmd + ' ' + fast_t1_input)

    # FIRST
    first_input = 'T1.nii'
    if upsample_for_first:
        app.warn(
            'Generating 1mm isotropic T1 image for FIRST in hope of preventing failure, since input image is of lower resolution'
        )
        run.command('mrgrid T1.nii regrid T1_1mm.nii -voxel 1.0 -interp sinc')
        first_input = 'T1_1mm.nii'
    first_brain_extracted_option = ''
    if app.ARGS.premasked:
        first_brain_extracted_option = ' -b'
    first_debug_option = ''
    if not app.DO_CLEANUP:
        first_debug_option = ' -d'
    first_verbosity_option = ''
    if app.VERBOSITY == 3:
        first_verbosity_option = ' -v'
    run.command(first_cmd + ' -m none -s ' + ','.join(sgm_structures) +
                ' -i ' + first_input + ' -o first' +
                first_brain_extracted_option + first_debug_option +
                first_verbosity_option)
    fsl.check_first('first', sgm_structures)

    # Convert FIRST meshes to partial volume images
    pve_image_list = []
    progress = app.ProgressBar(
        'Generating partial volume images for SGM structures',
        len(sgm_structures))
    for struct in sgm_structures:
        pve_image_path = 'mesh2voxel_' + struct + '.mif'
        vtk_in_path = 'first-' + struct + '_first.vtk'
        vtk_temp_path = struct + '.vtk'
        run.command('meshconvert ' + vtk_in_path + ' ' + vtk_temp_path +
                    ' -transform first2real ' + first_input)
        run.command('mesh2voxel ' + vtk_temp_path + ' ' + fast_t1_input + ' ' +
                    pve_image_path)
        pve_image_list.append(pve_image_path)
        progress.increment()
    progress.done()
    run.command(['mrmath', pve_image_list, 'sum', '-', '|', \
                 'mrcalc', '-', '1.0', '-min', 'all_sgms.mif'])

    # Combine the tissue images into the 5TT format within the script itself
    fast_output_prefix = fast_t1_input.split('.')[0]
    fast_csf_output = fsl.find_image(fast_output_prefix + '_pve_0')
    fast_gm_output = fsl.find_image(fast_output_prefix + '_pve_1')
    fast_wm_output = fsl.find_image(fast_output_prefix + '_pve_2')
    # Step 1: Run LCC on the WM image
    run.command(
        'mrthreshold ' + fast_wm_output +
        ' - -abs 0.001 | maskfilter - connect - -connectivity | mrcalc 1 - 1 -gt -sub remove_unconnected_wm_mask.mif -datatype bit'
    )
    # Step 2: Generate the images in the same fashion as the old 5ttgen binary used to:
    #   - Preserve CSF as-is
    #   - Preserve SGM, unless it results in a sum of volume fractions greater than 1, in which case clamp
    #   - Multiply the FAST volume fractions of GM and CSF, so that the sum of CSF, SGM, CGM and WM is 1.0
    run.command('mrcalc ' + fast_csf_output +
                ' remove_unconnected_wm_mask.mif -mult csf.mif')
    run.command('mrcalc 1.0 csf.mif -sub all_sgms.mif -min sgm.mif')
    run.command('mrcalc 1.0 csf.mif sgm.mif -add -sub ' + fast_gm_output +
                ' ' + fast_wm_output + ' -add -div multiplier.mif')
    run.command(
        'mrcalc multiplier.mif -finite multiplier.mif 0.0 -if multiplier_noNAN.mif'
    )
    run.command(
        'mrcalc ' + fast_gm_output +
        ' multiplier_noNAN.mif -mult remove_unconnected_wm_mask.mif -mult cgm.mif'
    )
    run.command(
        'mrcalc ' + fast_wm_output +
        ' multiplier_noNAN.mif -mult remove_unconnected_wm_mask.mif -mult wm.mif'
    )
    run.command('mrcalc 0 wm.mif -min path.mif')
    run.command(
        'mrcat cgm.mif sgm.mif wm.mif csf.mif path.mif - -axis 3 | mrconvert - combined_precrop.mif -strides +2,+3,+4,+1'
    )

    # Crop to reduce file size (improves caching of image data during tracking)
    if app.ARGS.nocrop:
        run.function(os.rename, 'combined_precrop.mif', 'result.mif')
    else:
        run.command(
            'mrmath combined_precrop.mif sum - -axis 3 | mrthreshold - - -abs 0.5 | mrgrid combined_precrop.mif crop result.mif -mask -'
        )

    run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output),
                mrconvert_keyval=path.from_user(app.ARGS.input),
                force=app.FORCE_OVERWRITE)
예제 #24
0
def get_inputs():  #pylint: disable=unused-variable
    run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' +
                path.to_scratch('input.mif'))
    if app.ARGS.lut:
        run.function(shutil.copyfile, path.from_user(app.ARGS.lut, False),
                     path.to_scratch('LUT.txt', False))
예제 #25
0
def execute():  #pylint: disable=unused-variable
    lmax_option = ''
    if app.ARGS.lmax:
        lmax_option = ' -lmax ' + app.ARGS.lmax

    if app.ARGS.max_iters < 2:
        raise MRtrixError('Number of iterations must be at least 2')

    progress = app.ProgressBar('Optimising')

    iter_voxels = app.ARGS.iter_voxels
    if iter_voxels == 0:
        iter_voxels = 10 * app.ARGS.number
    elif iter_voxels < app.ARGS.number:
        raise MRtrixError(
            'Number of selected voxels (-iter_voxels) must be greater than number of voxels desired (-number)'
        )

    iteration = 0
    while iteration < app.ARGS.max_iters:
        prefix = 'iter' + str(iteration) + '_'

        if iteration == 0:
            rf_in_path = 'init_RF.txt'
            mask_in_path = 'mask.mif'
            init_rf = '1 -1 1'
            with open(rf_in_path, 'w') as init_rf_file:
                init_rf_file.write(init_rf)
            iter_lmax_option = ' -lmax 4'
        else:
            rf_in_path = 'iter' + str(iteration - 1) + '_RF.txt'
            mask_in_path = 'iter' + str(iteration - 1) + '_SF_dilated.mif'
            iter_lmax_option = lmax_option

        # Run CSD
        run.command('dwi2fod csd dwi.mif ' + rf_in_path + ' ' + prefix +
                    'FOD.mif -mask ' + mask_in_path)
        # Get amplitudes of two largest peaks, and direction of largest
        run.command('fod2fixel ' + prefix + 'FOD.mif ' + prefix +
                    'fixel -peak peaks.mif -mask ' + mask_in_path +
                    ' -fmls_no_thresholds')
        app.cleanup(prefix + 'FOD.mif')
        if iteration:
            app.cleanup(mask_in_path)
        run.command('fixel2voxel ' + prefix + 'fixel/peaks.mif none ' +
                    prefix + 'amps.mif -number 2')
        run.command('mrconvert ' + prefix + 'amps.mif ' + prefix +
                    'first_peaks.mif -coord 3 0 -axes 0,1,2')
        run.command('mrconvert ' + prefix + 'amps.mif ' + prefix +
                    'second_peaks.mif -coord 3 1 -axes 0,1,2')
        app.cleanup(prefix + 'amps.mif')
        run.command('fixel2peaks ' + prefix + 'fixel/directions.mif ' +
                    prefix + 'first_dir.mif -number 1')
        app.cleanup(prefix + 'fixel')
        # Calculate the 'cost function' Donald derived for selecting single-fibre voxels
        # https://github.com/MRtrix3/mrtrix3/pull/426
        #  sqrt(|peak1|) * (1 - |peak2| / |peak1|)^2
        run.command('mrcalc ' + prefix + 'first_peaks.mif -sqrt 1 ' + prefix +
                    'second_peaks.mif ' + prefix +
                    'first_peaks.mif -div -sub 2 -pow -mult ' + prefix +
                    'CF.mif')
        app.cleanup(prefix + 'first_peaks.mif')
        app.cleanup(prefix + 'second_peaks.mif')
        voxel_count = image.statistics(prefix + 'CF.mif').count
        # Select the top-ranked voxels
        run.command('mrthreshold ' + prefix + 'CF.mif -top ' +
                    str(min([app.ARGS.number, voxel_count])) + ' ' + prefix +
                    'SF.mif')
        # Generate a new response function based on this selection
        run.command('amp2response dwi.mif ' + prefix + 'SF.mif ' + prefix +
                    'first_dir.mif ' + prefix + 'RF.txt' + iter_lmax_option)
        app.cleanup(prefix + 'first_dir.mif')

        new_rf = matrix.load_vector(prefix + 'RF.txt')
        progress.increment('Optimising (' + str(iteration + 1) +
                           ' iterations, RF: [ ' + ', '.join('{:.3f}'.format(n)
                                                             for n in new_rf) +
                           '] )')

        # Should we terminate?
        if iteration > 0:
            run.command('mrcalc ' + prefix + 'SF.mif iter' +
                        str(iteration - 1) + '_SF.mif -sub ' + prefix +
                        'SF_diff.mif')
            app.cleanup('iter' + str(iteration - 1) + '_SF.mif')
            max_diff = image.statistics(prefix + 'SF_diff.mif').max
            app.cleanup(prefix + 'SF_diff.mif')
            if not max_diff:
                app.cleanup(prefix + 'CF.mif')
                run.function(shutil.copyfile, prefix + 'RF.txt',
                             'response.txt')
                run.function(shutil.move, prefix + 'SF.mif', 'voxels.mif')
                break

        # Select a greater number of top single-fibre voxels, and dilate (within bounds of initial mask);
        #   these are the voxels that will be re-tested in the next iteration
        run.command('mrthreshold ' + prefix + 'CF.mif -top ' +
                    str(min([iter_voxels, voxel_count])) +
                    ' - | maskfilter - dilate - -npass ' +
                    str(app.ARGS.dilate) + ' | mrcalc mask.mif - -mult ' +
                    prefix + 'SF_dilated.mif')
        app.cleanup(prefix + 'CF.mif')

        iteration += 1

    progress.done()

    # If terminating due to running out of iterations, still need to put the results in the appropriate location
    if os.path.exists('response.txt'):
        app.console(
            'Convergence of SF voxel selection detected at iteration ' +
            str(iteration + 1))
    else:
        app.console('Exiting after maximum ' + str(app.ARGS.max_iters) +
                    ' iterations')
        run.function(shutil.copyfile,
                     'iter' + str(app.ARGS.max_iters - 1) + '_RF.txt',
                     'response.txt')
        run.function(shutil.move,
                     'iter' + str(app.ARGS.max_iters - 1) + '_SF.mif',
                     'voxels.mif')

    run.function(shutil.copyfile, 'response.txt',
                 path.from_user(app.ARGS.output, False))
    if app.ARGS.voxels:
        run.command('mrconvert voxels.mif ' + path.from_user(app.ARGS.voxels),
                    mrconvert_keyval=path.from_user(app.ARGS.input, False),
                    force=app.FORCE_OVERWRITE)
예제 #26
0
def execute():
  import math, os, shutil
  from mrtrix3 import app, file, image, path, run

  lmax_option = ''
  if app.args.lmax:
    lmax_option = ' -lmax ' + app.args.lmax

  convergence_change = 0.01 * app.args.convergence

  for iteration in range(0, app.args.max_iters):
    prefix = 'iter' + str(iteration) + '_'

    # How to initialise response function?
    # old dwi2response command used mean & standard deviation of DWI data; however
    #   this may force the output FODs to lmax=2 at the first iteration
    # Chantal used a tensor with low FA, but it'd be preferable to get the scaling right
    # Other option is to do as before, but get the ratio between l=0 and l=2, and
    #   generate l=4,6,... using that amplitude ratio
    if iteration == 0:
      RF_in_path = 'init_RF.txt'
      mask_in_path = 'mask.mif'
      # TODO This can be changed once #71 is implemented (mrstats statistics across volumes)
      volume_means = [float(x) for x in image.statistic('dwi.mif', 'mean', 'mask.mif').split()]
      mean = sum(volume_means) / float(len(volume_means))
      volume_stds = [float(x) for x in image.statistic('dwi.mif', 'std', 'mask.mif').split()]
      std = sum(volume_stds) / float(len(volume_stds))
      # Scale these to reflect the fact that we're moving to the SH basis
      mean *= math.sqrt(4.0 * math.pi)
      std  *= math.sqrt(4.0 * math.pi)
      # Now produce the initial response function
      # Let's only do it to lmax 4
      init_RF = [ str(mean), str(-0.5*std), str(0.25*std*std/mean) ]
      with open('init_RF.txt', 'w') as f:
        f.write(' '.join(init_RF))
    else:
      RF_in_path = 'iter' + str(iteration-1) + '_RF.txt'
      mask_in_path = 'iter' + str(iteration-1) + '_SF.mif'

    # Run CSD
    run.command('dwi2fod csd dwi.mif ' + RF_in_path + ' ' + prefix + 'FOD.mif -mask ' + mask_in_path)
    # Get amplitudes of two largest peaks, and directions of largest
    run.command('fod2fixel ' + prefix + 'FOD.mif ' + prefix + 'fixel -peak peaks.mif -mask ' + mask_in_path + ' -fmls_no_thresholds')
    file.delTempFile(prefix + 'FOD.mif')
    run.command('fixel2voxel ' + prefix + 'fixel/peaks.mif split_data ' + prefix + 'amps.mif')
    run.command('mrconvert ' + prefix + 'amps.mif ' + prefix + 'first_peaks.mif -coord 3 0 -axes 0,1,2')
    run.command('mrconvert ' + prefix + 'amps.mif ' + prefix + 'second_peaks.mif -coord 3 1 -axes 0,1,2')
    file.delTempFile(prefix + 'amps.mif')
    run.command('fixel2voxel ' + prefix + 'fixel/directions.mif split_dir ' + prefix + 'all_dirs.mif')
    file.delTempFolder(prefix + 'fixel')
    run.command('mrconvert ' + prefix + 'all_dirs.mif ' + prefix + 'first_dir.mif -coord 3 0:2')
    file.delTempFile(prefix + 'all_dirs.mif')
    # Revise single-fibre voxel selection based on ratio of tallest to second-tallest peak
    run.command('mrcalc ' + prefix + 'second_peaks.mif ' + prefix + 'first_peaks.mif -div ' + prefix + 'peak_ratio.mif')
    file.delTempFile(prefix + 'first_peaks.mif')
    file.delTempFile(prefix + 'second_peaks.mif')
    run.command('mrcalc ' + prefix + 'peak_ratio.mif ' + str(app.args.peak_ratio) + ' -lt ' + mask_in_path + ' -mult ' + prefix + 'SF.mif -datatype bit')
    file.delTempFile(prefix + 'peak_ratio.mif')
    # Make sure image isn't empty
    SF_voxel_count = int(image.statistic(prefix + 'SF.mif', 'count', prefix + 'SF.mif'))
    if not SF_voxel_count:
      app.error('Aborting: All voxels have been excluded from single-fibre selection')
    # Generate a new response function
    run.command('amp2response dwi.mif ' + prefix + 'SF.mif ' + prefix + 'first_dir.mif ' + prefix + 'RF.txt' + lmax_option)
    file.delTempFile(prefix + 'first_dir.mif')

    # Detect convergence
    # Look for a change > some percentage - don't bother looking at the masks
    if iteration > 0:
      with open(RF_in_path, 'r') as old_RF_file:
        old_RF = [ float(x) for x in old_RF_file.read().split() ]
      with open(prefix + 'RF.txt', 'r') as new_RF_file:
        new_RF = [ float(x) for x in new_RF_file.read().split() ]
      reiterate = False
      for index in range(0, len(old_RF)):
        mean = 0.5 * (old_RF[index] + new_RF[index])
        diff = math.fabs(0.5 * (old_RF[index] - new_RF[index]))
        ratio = diff / mean
        if ratio > convergence_change:
          reiterate = True
      if not reiterate:
        app.console('Exiting at iteration ' + str(iteration) + ' with ' + str(SF_voxel_count) + ' SF voxels due to unchanged response function coefficients')
        run.function(shutil.copyfile, prefix + 'RF.txt', 'response.txt')
        run.function(shutil.copyfile, prefix + 'SF.mif', 'voxels.mif')
        break

    file.delTempFile(RF_in_path)
    file.delTempFile(mask_in_path)
  # Go to the next iteration

  # If we've terminated due to hitting the iteration limiter, we still need to copy the output file(s) to the correct location
  if not os.path.exists('response.txt'):
    app.console('Exiting after maximum ' + str(app.args.max_iters-1) + ' iterations with ' + str(SF_voxel_count) + ' SF voxels')
    run.function(shutil.copyfile, 'iter' + str(app.args.max_iters-1) + '_RF.txt', 'response.txt')
    run.function(shutil.copyfile, 'iter' + str(app.args.max_iters-1) + '_SF.mif', 'voxels.mif')

  run.function(shutil.copyfile, 'response.txt', path.fromUser(app.args.output, False))
예제 #27
0
def runSubject(bids_dir, label, output_prefix):

    output_dir = os.path.join(output_prefix, label)
    if os.path.exists(output_dir):
        shutil.rmtree(output_dir)
    os.makedirs(output_dir)
    os.makedirs(os.path.join(output_dir, 'connectome'))
    os.makedirs(os.path.join(output_dir, 'dwi'))

    fsl_path = os.environ.get('FSLDIR', '')
    if not fsl_path:
        app.error(
            'Environment variable FSLDIR is not set; please run appropriate FSL configuration script'
        )

    flirt_cmd = fsl.exeName('flirt')
    fslanat_cmd = fsl.exeName('fsl_anat')
    fsl_suffix = fsl.suffix()

    unring_cmd = 'unring.a64'
    if not find_executable(unring_cmd):
        app.console('Command \'' + unring_cmd +
                    '\' not found; cannot perform Gibbs ringing removal')
        unring_cmd = ''

    dwibiascorrect_algo = '-ants'
    if not find_executable('N4BiasFieldCorrection'):
        # Can't use findFSLBinary() here, since we want to proceed even if it's not found
        if find_executable('fast') or find_executable('fsl5.0-fast'):
            dwibiascorrect_algo = '-fsl'
            app.console('Could not find ANTs program N4BiasFieldCorrection; '
                        'using FSL FAST for bias field correction')
        else:
            dwibiascorrect_algo = ''
            app.warn(
                'Could not find ANTs program \'N4BiasFieldCorrection\' or FSL program \'fast\'; '
                'will proceed without performing DWI bias field correction')

    if not app.args.parcellation:
        app.error(
            'For participant-level analysis, desired parcellation must be provided using the -parcellation option'
        )

    parc_image_path = ''
    parc_lut_file = ''
    mrtrix_lut_file = os.path.join(
        os.path.dirname(os.path.abspath(app.__file__)), os.pardir, os.pardir,
        'share', 'mrtrix3', 'labelconvert')

    if app.args.parcellation == 'fs_2005' or app.args.parcellation == 'fs_2009':
        if not 'FREESURFER_HOME' in os.environ:
            app.error(
                'Environment variable FREESURFER_HOME not set; please verify FreeSurfer installation'
            )
        if not find_executable('recon-all'):
            app.error(
                'Could not find FreeSurfer script recon-all; please verify FreeSurfer installation'
            )
        parc_lut_file = os.path.join(os.environ['FREESURFER_HOME'],
                                     'FreeSurferColorLUT.txt')
        if app.args.parcellation == 'fs_2005':
            mrtrix_lut_file = os.path.join(mrtrix_lut_file, 'fs_default.txt')
        else:
            mrtrix_lut_file = os.path.join(mrtrix_lut_file, 'fs_a2009s.txt')

    if app.args.parcellation == 'aal' or app.args.parcellation == 'aal2':
        mni152_path = os.path.join(fsl_path, 'data', 'standard',
                                   'MNI152_T1_1mm.nii.gz')
        if not os.path.isfile(mni152_path):
            app.error(
                'Could not find MNI152 template image within FSL installation (expected location: '
                + mni152_path + ')')
        if app.args.parcellation == 'aal':
            parc_image_path = os.path.abspath(
                os.path.join(os.sep, 'opt', 'aal', 'ROI_MNI_V4.nii'))
            parc_lut_file = os.path.abspath(
                os.path.join(os.sep, 'opt', 'aal', 'ROI_MNI_V4.txt'))
            mrtrix_lut_file = os.path.join(mrtrix_lut_file, 'aal.txt')
        else:
            parc_image_path = os.path.abspath(
                os.path.join(os.sep, 'opt', 'aal', 'ROI_MNI_V5.nii'))
            parc_lut_file = os.path.abspath(
                os.path.join(os.sep, 'opt', 'aal', 'ROI_MNI_V5.txt'))
            mrtrix_lut_file = os.path.join(mrtrix_lut_file, 'aal2.txt')

    if parc_image_path and not os.path.isfile(parc_image_path):
        if app.args.atlas_path:
            parc_image_path = [
                parc_image_path,
                os.path.join(os.path.dirname(app.args.atlas_path),
                             os.path.basename(parc_image_path))
            ]
            if os.path.isfile(parc_image_path[1]):
                parc_image_path = parc_image_path[1]
            else:
                app.error(
                    'Could not find parcellation image (tested locations: ' +
                    str(parc_image_path) + ')')
        else:
            app.error(
                'Could not find parcellation image (expected location: ' +
                parc_image_path + ')')
    if not os.path.isfile(parc_lut_file):
        if app.args.atlas_path:
            parc_lut_file = [
                parc_lut_file,
                os.path.join(os.path.dirname(app.args.atlas_path),
                             os.path.basename(parc_lut_file))
            ]
            if os.path.isfile(parc_lut_file[1]):
                parc_lut_file = parc_lut_file[1]
            else:
                app.error(
                    'Could not find parcellation lookup table file (tested locations: '
                    + str(parc_lut_file) + ')')
        else:
            app.error(
                'Could not find parcellation lookup table file (expected location: '
                + parc_lut_file + ')')
    if not os.path.exists(mrtrix_lut_file):
        app.error(
            'Could not find MRtrix3 connectome lookup table file (expected location: '
            + mrtrix_lut_file + ')')

    app.makeTempDir()

    # Need to perform an initial import of JSON data using mrconvert; so let's grab the diffusion gradient table as well
    # If no bvec/bval present, need to go down the directory listing
    # Only try to import JSON file if it's actually present
    #   direction in the acquisition they'll need to be split across multiple files
    # May need to concatenate more than one input DWI, since if there's more than one phase-encode direction
    #   in the acquired DWIs (i.e. not just those used for estimating the inhomogeneity field), they will
    #   need to be stored as separate NIfTI files in the 'dwi/' directory.
    dwi_image_list = glob.glob(
        os.path.join(bids_dir, label, 'dwi', label) + '*_dwi.nii*')
    dwi_index = 1
    for entry in dwi_image_list:
        # os.path.split() falls over with .nii.gz extensions; only removes the .gz
        prefix = entry.split(os.extsep)[0]
        if os.path.isfile(prefix + '.bval') and os.path.isfile(prefix +
                                                               '.bvec'):
            prefix = prefix + '.'
        else:
            prefix = os.path.join(bids_dir, 'dwi')
            if not (os.path.isfile(prefix + 'bval')
                    and os.path.isfile(prefix + 'bvec')):
                app.error(
                    'Unable to locate valid diffusion gradient table for image \''
                    + entry + '\'')
        grad_import_option = ' -fslgrad ' + prefix + 'bvec ' + prefix + 'bval'
        json_path = prefix + 'json'
        if os.path.isfile(json_path):
            json_import_option = ' -json_import ' + json_path
        else:
            json_import_option = ''
        run.command('mrconvert ' + entry + grad_import_option +
                    json_import_option + ' ' +
                    path.toTemp('dwi' + str(dwi_index) + '.mif', True))
        dwi_index += 1

    # Go hunting for reversed phase-encode data dedicated to field map estimation
    fmap_image_list = []
    fmap_dir = os.path.join(bids_dir, label, 'fmap')
    fmap_index = 1
    if os.path.isdir(fmap_dir):
        if app.args.preprocessed:
            app.error('fmap/ directory detected for subject \'' + label +
                      '\' despite use of ' + option_prefix +
                      'preprocessed option')
        fmap_image_list = glob.glob(
            os.path.join(fmap_dir, label) + '_dir-*_epi.nii*')
        for entry in fmap_image_list:
            prefix = entry.split(os.extsep)[0]
            json_path = prefix + '.json'
            with open(json_path, 'r') as f:
                json_elements = json.load(f)
            if 'IntendedFor' in json_elements and not any(
                    i.endswith(json_elements['IntendedFor'])
                    for i in dwi_image_list):
                app.console('Image \'' + entry +
                            '\' is not intended for use with DWIs; skipping')
                continue
            if os.path.isfile(json_path):
                json_import_option = ' -json_import ' + json_path
                # fmap files will not come with any gradient encoding in the JSON;
                #   therefore we need to add it manually ourselves so that mrcat / mrconvert can
                #   appropriately handle the table once these images are concatenated with the DWIs
                fmap_image_size = image.Header(entry).size()
                fmap_image_num_volumes = 1 if len(
                    fmap_image_size) == 3 else fmap_image_size[3]
                run.command('mrconvert ' + entry + json_import_option +
                            ' -set_property dw_scheme \"' +
                            '\\n'.join(['0,0,1,0'] * fmap_image_num_volumes) +
                            '\" ' +
                            path.toTemp('fmap' + str(fmap_index) +
                                        '.mif', True))
                fmap_index += 1
            else:
                app.warn('No corresponding .json file found for image \'' +
                         entry + '\'; skipping')

        fmap_image_list = [
            'fmap' + str(index) + '.mif' for index in range(1, fmap_index)
        ]
    # If there's no data in fmap/ directory, need to check to see if there's any phase-encoding
    #   contrast within the input DWI(s)
    elif len(dwi_image_list) < 2 and not app.args.preprocessed:
        app.error(
            'Inadequate data for pre-processing of subject \'' + label +
            '\': No phase-encoding contrast in input DWIs or fmap/ directory')

    dwi_image_list = [
        'dwi' + str(index) + '.mif' for index in range(1, dwi_index)
    ]

    # Import anatomical image
    run.command('mrconvert ' +
                os.path.join(bids_dir, label, 'anat', label + '_T1w.nii.gz') +
                ' ' + path.toTemp('T1.mif', True))

    cwd = os.getcwd()
    app.gotoTempDir()

    dwipreproc_se_epi = ''
    dwipreproc_se_epi_option = ''

    # For automated testing, down-sampled images are used. However, this invalidates the requirements of
    #   both MP-PCA denoising and Gibbs ringing removal. In addition, eddy can still take a long time
    #   despite the down-sampling. Therefore, provide images that have been pre-processed to the stage
    #   where it is still only DWI, JSON & bvecs/bvals that need to be provided.
    if app.args.preprocessed:

        if len(dwi_image_list) > 1:
            app.error(
                'If DWIs have been pre-processed, then only a single DWI file should need to be provided'
            )
        app.console(
            'Skipping MP-PCA denoising, ' +
            ('Gibbs ringing removal, ' if unring_cmd else '') +
            'distortion correction and bias field correction due to use of ' +
            option_prefix + 'preprocessed option')
        run.function(os.rename, dwi_image_list[0], 'dwi.mif')

    else:  # Do initial image pre-processing (denoising, Gibbs ringing removal if available, distortion correction & bias field correction) as normal

        # Concatenate any SE EPI images with the DWIs before denoising (& unringing), then
        #   separate them again after the fact
        dwidenoise_input = 'dwidenoise_input.mif'
        fmap_num_volumes = 0
        if fmap_image_list:
            run.command('mrcat ' + ' '.join(fmap_image_list) +
                        ' fmap_cat.mif -axis 3')
            for i in fmap_image_list:
                file.delTemporary(i)
            fmap_num_volumes = image.Header('fmap_cat.mif').size()[3]
            dwidenoise_input = 'all_cat.mif'
            run.command('mrcat fmap_cat.mif ' + ' '.join(dwi_image_list) +
                        ' ' + dwidenoise_input + ' -axis 3')
            file.delTemporary('fmap_cat.mif')
        else:
            # Even if no explicit fmap images, may still need to concatenate multiple DWI inputs
            if len(dwi_image_list) > 1:
                run.command('mrcat ' + ' '.join(dwi_image_list) + ' ' +
                            dwidenoise_input + ' -axis 3')
            else:
                run.function(shutil.move, dwi_image_list[0], dwidenoise_input)

        for i in dwi_image_list:
            file.delTemporary(i)

        # Step 1: Denoise
        run.command('dwidenoise ' + dwidenoise_input + ' dwi_denoised.' +
                    ('nii' if unring_cmd else 'mif'))
        if unring_cmd:
            run.command('mrinfo ' + dwidenoise_input +
                        ' -json_keyval input.json')
        file.delTemporary(dwidenoise_input)

        # Step 2: Gibbs ringing removal (if available)
        if unring_cmd:
            run.command(unring_cmd + ' dwi_denoised.nii dwi_unring' +
                        fsl_suffix + ' -n 100')
            file.delTemporary('dwi_denoised.nii')
            unring_output_path = fsl.findImage('dwi_unring')
            run.command('mrconvert ' + unring_output_path +
                        ' dwi_unring.mif -json_import input.json')
            file.delTemporary(unring_output_path)
            file.delTemporary('input.json')

        # If fmap images and DWIs have been concatenated, now is the time to split them back apart
        dwipreproc_input = 'dwi_unring.mif' if unring_cmd else 'dwi_denoised.mif'

        if fmap_num_volumes:
            cat_input = 'dwi_unring.mif' if unring_cmd else 'dwi_denoised.mif'
            dwipreproc_se_epi = 'se_epi.mif'
            run.command('mrconvert ' + cat_input + ' ' + dwipreproc_se_epi +
                        ' -coord 3 0:' + str(fmap_num_volumes - 1))
            cat_num_volumes = image.Header(cat_input).size()[3]
            run.command('mrconvert ' + cat_input +
                        ' dwipreproc_in.mif -coord 3 ' +
                        str(fmap_num_volumes) + ':' + str(cat_num_volumes - 1))
            file.delTemporary(dwipreproc_input)
            dwipreproc_input = 'dwipreproc_in.mif'
            dwipreproc_se_epi_option = ' -se_epi ' + dwipreproc_se_epi

        # Step 3: Distortion correction
        run.command('dwipreproc ' + dwipreproc_input +
                    ' dwi_preprocessed.mif -rpe_header' +
                    dwipreproc_se_epi_option)
        file.delTemporary(dwipreproc_input)
        if dwipreproc_se_epi:
            file.delTemporary(dwipreproc_se_epi)

        # Step 4: Bias field correction
        if dwibiascorrect_algo:
            run.command('dwibiascorrect dwi_preprocessed.mif dwi.mif ' +
                        dwibiascorrect_algo)
            file.delTemporary('dwi_preprocessed.mif')
        else:
            run.function(shutil.move, 'dwi_preprocessed.mif', 'dwi.mif')

    # No longer branching based on whether or not -preprocessed was specified

    # Step 5: Generate a brain mask for DWI
    run.command('dwi2mask dwi.mif dwi_mask.mif')

    # Step 6: Perform brain extraction on the T1 image in its original space
    #         (this is necessary for histogram matching prior to registration)
    #         Use fsl_anat script
    run.command('mrconvert T1.mif T1.nii -stride -1,+2,+3')
    run.command(fslanat_cmd + ' -i T1.nii --noseg --nosubcortseg')
    run.command('mrconvert ' +
                fsl.findImage('T1.anat' + os.sep + 'T1_biascorr_brain_mask') +
                ' T1_mask.mif -datatype bit')
    run.command('mrconvert ' +
                fsl.findImage('T1.anat' + os.sep + 'T1_biascorr_brain') +
                ' T1_biascorr_brain.mif')
    file.delTemporary('T1.anat')

    # Step 7: Generate target images for T1->DWI registration
    run.command('dwiextract dwi.mif -bzero - | '
                'mrcalc - 0.0 -max - | '
                'mrmath - mean -axis 3 dwi_meanbzero.mif')
    run.command(
        'mrcalc 1 dwi_meanbzero.mif -div dwi_mask.mif -mult - | '
        'mrhistmatch - T1_biascorr_brain.mif dwi_pseudoT1.mif -mask_input dwi_mask.mif -mask_target T1_mask.mif'
    )
    run.command(
        'mrcalc 1 T1_biascorr_brain.mif -div T1_mask.mif -mult - | '
        'mrhistmatch - dwi_meanbzero.mif T1_pseudobzero.mif -mask_input T1_mask.mif -mask_target dwi_mask.mif'
    )

    # Step 8: Perform T1->DWI registration
    #         Note that two registrations are performed: Even though we have a symmetric registration,
    #         generation of the two histogram-matched images means that you will get slightly different
    #         answers depending on which synthesized image & original image you use.
    run.command(
        'mrregister T1_biascorr_brain.mif dwi_pseudoT1.mif -type rigid -mask1 T1_mask.mif -mask2 dwi_mask.mif -rigid rigid_T1_to_pseudoT1.txt'
    )
    file.delTemporary('T1_biascorr_brain.mif')
    run.command(
        'mrregister T1_pseudobzero.mif dwi_meanbzero.mif -type rigid -mask1 T1_mask.mif -mask2 dwi_mask.mif -rigid rigid_pseudobzero_to_bzero.txt'
    )
    file.delTemporary('dwi_meanbzero.mif')
    run.command(
        'transformcalc rigid_T1_to_pseudoT1.txt rigid_pseudobzero_to_bzero.txt average rigid_T1_to_dwi.txt'
    )
    file.delTemporary('rigid_T1_to_pseudoT1.txt')
    file.delTemporary('rigid_pseudobzero_to_bzero.txt')
    run.command(
        'mrtransform T1.mif T1_registered.mif -linear rigid_T1_to_dwi.txt')
    file.delTemporary('T1.mif')
    # Note: Since we're using a mask from fsl_anat (which crops the FoV), but using it as input to 5ttge fsl
    #   (which is receiving the raw T1), we need to resample in order to have the same dimensions between these two
    run.command(
        'mrtransform T1_mask.mif T1_mask_registered.mif -linear rigid_T1_to_dwi.txt -template T1_registered.mif -interp nearest'
    )
    file.delTemporary('T1_mask.mif')

    # Step 9: Generate 5TT image for ACT
    run.command(
        '5ttgen fsl T1_registered.mif 5TT.mif -mask T1_mask_registered.mif')
    file.delTemporary('T1_mask_registered.mif')

    # Step 10: Estimate response functions for spherical deconvolution
    run.command(
        'dwi2response dhollander dwi.mif response_wm.txt response_gm.txt response_csf.txt -mask dwi_mask.mif'
    )

    # Step 11: Determine whether we are working with single-shell or multi-shell data
    shells = [
        int(round(float(value)))
        for value in image.mrinfo('dwi.mif', 'shellvalues').strip().split()
    ]
    multishell = (len(shells) > 2)

    # Step 12: Perform spherical deconvolution
    #          Use a dilated mask for spherical deconvolution as a 'safety margin' -
    #          ACT should be responsible for stopping streamlines before they reach the edge of the DWI mask
    run.command('maskfilter dwi_mask.mif dilate dwi_mask_dilated.mif -npass 3')
    if multishell:
        run.command(
            'dwi2fod msmt_csd dwi.mif response_wm.txt FOD_WM.mif response_gm.txt FOD_GM.mif response_csf.txt FOD_CSF.mif '
            '-mask dwi_mask_dilated.mif -lmax 10,0,0')
        file.delTemporary('FOD_GM.mif')
        file.delTemporary('FOD_CSF.mif')
    else:
        # Still use the msmt_csd algorithm with single-shell data: Use hard non-negativity constraint
        # Also incorporate the CSF response to provide some fluid attenuation
        run.command(
            'dwi2fod msmt_csd dwi.mif response_wm.txt FOD_WM.mif response_csf.txt FOD_CSF.mif '
            '-mask dwi_mask_dilated.mif -lmax 10,0')
        file.delTemporary('FOD_CSF.mif')

    # Step 13: Generate the grey matter parcellation
    #          The necessary steps here will vary significantly depending on the parcellation scheme selected
    run.command(
        'mrconvert T1_registered.mif T1_registered.nii -stride +1,+2,+3')
    if app.args.parcellation == 'fs_2005' or app.args.parcellation == 'fs_2009':

        # Run FreeSurfer pipeline on this subject's T1 image
        run.command('recon-all -sd ' + app.tempDir +
                    ' -subjid freesurfer -i T1_registered.nii')
        run.command('recon-all -sd ' + app.tempDir +
                    ' -subjid freesurfer -all')

        # Grab the relevant parcellation image and target lookup table for conversion
        parc_image_path = os.path.join('freesurfer', 'mri')
        if app.args.parcellation == 'fs_2005':
            parc_image_path = os.path.join(parc_image_path, 'aparc+aseg.mgz')
        else:
            parc_image_path = os.path.join(parc_image_path,
                                           'aparc.a2009s+aseg.mgz')

        # Perform the index conversion
        run.command('labelconvert ' + parc_image_path + ' ' + parc_lut_file +
                    ' ' + mrtrix_lut_file + ' parc_init.mif')
        if app.cleanup:
            run.function(shutil.rmtree, 'freesurfer')

        # Fix the sub-cortical grey matter parcellations using FSL FIRST
        run.command('labelsgmfix parc_init.mif T1_registered.mif ' +
                    mrtrix_lut_file + ' parc.mif')
        file.delTemporary('parc_init.mif')

    elif app.args.parcellation == 'aal' or app.args.parcellation == 'aal2':

        # Can use MNI152 image provided with FSL for registration
        run.command(flirt_cmd + ' -ref ' + mni152_path +
                    ' -in T1_registered.nii -omat T1_to_MNI_FLIRT.mat -dof 12')
        run.command('transformconvert T1_to_MNI_FLIRT.mat T1_registered.nii ' +
                    mni152_path + ' flirt_import T1_to_MNI_MRtrix.mat')
        file.delTemporary('T1_to_MNI_FLIRT.mat')
        run.command(
            'transformcalc T1_to_MNI_MRtrix.mat invert MNI_to_T1_MRtrix.mat')
        file.delTemporary('T1_to_MNI_MRtrix.mat')
        run.command('mrtransform ' + parc_image_path +
                    ' AAL.mif -linear MNI_to_T1_MRtrix.mat '
                    '-template T1_registered.mif -interp nearest')
        file.delTemporary('MNI_to_T1_MRtrix.mat')
        run.command('labelconvert AAL.mif ' + parc_lut_file + ' ' +
                    mrtrix_lut_file + ' parc.mif')
        file.delTemporary('AAL.mif')

    else:
        app.error('Unknown parcellation scheme requested: ' +
                  app.args.parcellation)
    file.delTemporary('T1_registered.nii')

    # Step 14: Generate the tractogram
    # If not manually specified, determine the appropriate number of streamlines based on the number of nodes in the parcellation:
    #   mean edge weight of 1,000 streamlines
    # A smaller FOD amplitude threshold of 0.06 (default 0.1) is used for tracking due to the use of the msmt_csd
    #   algorithm, which imposes a hard rather than soft non-negativity constraint
    num_nodes = int(image.statistic('parc.mif', 'max'))
    num_streamlines = 1000 * num_nodes * num_nodes
    if app.args.streamlines:
        num_streamlines = app.args.streamlines
    run.command(
        'tckgen FOD_WM.mif tractogram.tck -act 5TT.mif -backtrack -crop_at_gmwmi -cutoff 0.06 -maxlength 250 -power 0.33 '
        '-select ' + str(num_streamlines) + ' -seed_dynamic FOD_WM.mif')

    # Step 15: Use SIFT2 to determine streamline weights
    fd_scale_gm_option = ''
    if not multishell:
        fd_scale_gm_option = ' -fd_scale_gm'
    run.command(
        'tcksift2 tractogram.tck FOD_WM.mif weights.csv -act 5TT.mif -out_mu mu.txt'
        + fd_scale_gm_option)

    # Step 16: Generate a TDI (to verify that SIFT2 has worked correctly)
    with open('mu.txt', 'r') as f:
        mu = float(f.read())
    run.command(
        'tckmap tractogram.tck -tck_weights_in weights.csv -template FOD_WM.mif -precise - | '
        'mrcalc - ' + str(mu) + ' -mult tdi.mif')

    # Step 17: Generate the connectome
    #          Only provide the standard density-weighted connectome for now
    run.command(
        'tck2connectome tractogram.tck parc.mif connectome.csv -tck_weights_in weights.csv'
    )
    file.delTemporary('weights.csv')

    # Move necessary files to output directory
    run.function(
        shutil.copy, 'connectome.csv',
        os.path.join(output_dir, 'connectome', label + '_connectome.csv'))
    run.command('mrconvert dwi.mif ' +
                os.path.join(output_dir, 'dwi', label + '_dwi.nii.gz') +
                ' -export_grad_fsl ' +
                os.path.join(output_dir, 'dwi', label + '_dwi.bvec') + ' ' +
                os.path.join(output_dir, 'dwi', label + '_dwi.bval') +
                ' -json_export ' +
                os.path.join(output_dir, 'dwi', label + '_dwi.json'))
    run.command('mrconvert tdi.mif ' +
                os.path.join(output_dir, 'dwi', label + '_tdi.nii.gz'))
    run.function(shutil.copy, 'mu.txt',
                 os.path.join(output_dir, 'connectome', label + '_mu.txt'))
    run.function(shutil.copy, 'response_wm.txt',
                 os.path.join(output_dir, 'dwi', label + '_response.txt'))

    # Manually wipe and zero the temp directory (since we might be processing more than one subject)
    os.chdir(cwd)
    if app.cleanup:
        app.console('Deleting temporary directory ' + app.tempDir)
        # Can't use run.function() here; it'll try to write to the log file that resides in the temp directory just deleted
        shutil.rmtree(app.tempDir)
    else:
        app.console('Contents of temporary directory kept, location: ' +
                    app.tempDir)
    app.tempDir = ''
예제 #28
0
def execute():  #pylint: disable=unused-variable
    import math, os, shutil
    from mrtrix3 import app, image, matrix, MRtrixError, path, run

    lmax_option = ''
    if app.ARGS.lmax:
        lmax_option = ' -lmax ' + app.ARGS.lmax

    convergence_change = 0.01 * app.ARGS.convergence

    progress = app.ProgressBar('Optimising')

    iteration = 0
    while iteration < app.ARGS.max_iters:
        prefix = 'iter' + str(iteration) + '_'

        # How to initialise response function?
        # old dwi2response command used mean & standard deviation of DWI data; however
        #   this may force the output FODs to lmax=2 at the first iteration
        # Chantal used a tensor with low FA, but it'd be preferable to get the scaling right
        # Other option is to do as before, but get the ratio between l=0 and l=2, and
        #   generate l=4,6,... using that amplitude ratio
        if iteration == 0:
            rf_in_path = 'init_RF.txt'
            mask_in_path = 'mask.mif'

            # Grab the mean and standard deviation across all volumes in a single mrstats call
            # Also scale them to reflect the fact that we're moving to the SH basis
            mean = image.statistic('dwi.mif', 'mean',
                                   '-mask mask.mif -allvolumes') * math.sqrt(
                                       4.0 * math.pi)
            std = image.statistic('dwi.mif', 'std',
                                  '-mask mask.mif -allvolumes') * math.sqrt(
                                      4.0 * math.pi)

            # Now produce the initial response function
            # Let's only do it to lmax 4
            init_rf = [
                str(mean),
                str(-0.5 * std),
                str(0.25 * std * std / mean)
            ]
            with open('init_RF.txt', 'w') as init_rf_file:
                init_rf_file.write(' '.join(init_rf))
        else:
            rf_in_path = 'iter' + str(iteration - 1) + '_RF.txt'
            mask_in_path = 'iter' + str(iteration - 1) + '_SF.mif'

        # Run CSD
        run.command('dwi2fod csd dwi.mif ' + rf_in_path + ' ' + prefix +
                    'FOD.mif -mask ' + mask_in_path)
        # Get amplitudes of two largest peaks, and directions of largest
        run.command('fod2fixel ' + prefix + 'FOD.mif ' + prefix +
                    'fixel -peak peaks.mif -mask ' + mask_in_path +
                    ' -fmls_no_thresholds')
        app.cleanup(prefix + 'FOD.mif')
        run.command('fixel2voxel ' + prefix + 'fixel/peaks.mif split_data ' +
                    prefix + 'amps.mif')
        run.command('mrconvert ' + prefix + 'amps.mif ' + prefix +
                    'first_peaks.mif -coord 3 0 -axes 0,1,2')
        run.command('mrconvert ' + prefix + 'amps.mif ' + prefix +
                    'second_peaks.mif -coord 3 1 -axes 0,1,2')
        app.cleanup(prefix + 'amps.mif')
        run.command('fixel2voxel ' + prefix +
                    'fixel/directions.mif split_dir ' + prefix +
                    'all_dirs.mif')
        app.cleanup(prefix + 'fixel')
        run.command('mrconvert ' + prefix + 'all_dirs.mif ' + prefix +
                    'first_dir.mif -coord 3 0:2')
        app.cleanup(prefix + 'all_dirs.mif')
        # Revise single-fibre voxel selection based on ratio of tallest to second-tallest peak
        run.command('mrcalc ' + prefix + 'second_peaks.mif ' + prefix +
                    'first_peaks.mif -div ' + prefix + 'peak_ratio.mif')
        app.cleanup(prefix + 'first_peaks.mif')
        app.cleanup(prefix + 'second_peaks.mif')
        run.command('mrcalc ' + prefix + 'peak_ratio.mif ' +
                    str(app.ARGS.peak_ratio) + ' -lt ' + mask_in_path +
                    ' -mult ' + prefix + 'SF.mif -datatype bit')
        app.cleanup(prefix + 'peak_ratio.mif')
        # Make sure image isn't empty
        sf_voxel_count = image.statistic(prefix + 'SF.mif', 'count',
                                         '-mask ' + prefix + 'SF.mif')
        if not sf_voxel_count:
            raise MRtrixError(
                'Aborting: All voxels have been excluded from single-fibre selection'
            )
        # Generate a new response function
        run.command('amp2response dwi.mif ' + prefix + 'SF.mif ' + prefix +
                    'first_dir.mif ' + prefix + 'RF.txt' + lmax_option)
        app.cleanup(prefix + 'first_dir.mif')

        new_rf = matrix.load_vector(prefix + 'RF.txt')
        progress.increment('Optimising (' + str(iteration + 1) +
                           ' iterations, ' + str(sf_voxel_count) +
                           ' voxels, RF: [ ' + ', '.join('{:.3f}'.format(n)
                                                         for n in new_rf) +
                           '] )')

        # Detect convergence
        # Look for a change > some percentage - don't bother looking at the masks
        if iteration > 0:
            old_rf = matrix.load_vector(rf_in_path)
            reiterate = False
            for old_value, new_value in zip(old_rf, new_rf):
                mean = 0.5 * (old_value + new_value)
                diff = math.fabs(0.5 * (old_value - new_value))
                ratio = diff / mean
                if ratio > convergence_change:
                    reiterate = True
            if not reiterate:
                run.function(shutil.copyfile, prefix + 'RF.txt',
                             'response.txt')
                run.function(shutil.copyfile, prefix + 'SF.mif', 'voxels.mif')
                break

        app.cleanup(rf_in_path)
        app.cleanup(mask_in_path)

        iteration += 1

    progress.done()

    # If we've terminated due to hitting the iteration limiter, we still need to copy the output file(s) to the correct location
    if os.path.exists('response.txt'):
        app.console('Exited at iteration ' + str(iteration + 1) + ' with ' +
                    str(sf_voxel_count) +
                    ' SF voxels due to unchanged RF coefficients')
    else:
        app.console('Exited after maximum ' + str(app.ARGS.max_iters) +
                    ' iterations with ' + str(sf_voxel_count) + ' SF voxels')
        run.function(shutil.copyfile,
                     'iter' + str(app.ARGS.max_iters - 1) + '_RF.txt',
                     'response.txt')
        run.function(shutil.copyfile,
                     'iter' + str(app.ARGS.max_iters - 1) + '_SF.mif',
                     'voxels.mif')

    run.function(shutil.copyfile, 'response.txt',
                 path.from_user(app.ARGS.output, False))
    if app.ARGS.voxels:
        run.command('mrconvert voxels.mif ' + path.from_user(app.ARGS.voxels),
                    mrconvert_keyval=path.from_user(app.ARGS.input),
                    force=app.FORCE_OVERWRITE)
예제 #29
0
def execute():
  import math, os, shutil
  from mrtrix3 import app, image, path, run

  # Ideally want to use the oversampling-based regridding of the 5TT image from the SIFT model, not mrtransform
  # May need to commit 5ttregrid...

  # Verify input 5tt image
  run.command('5ttcheck 5tt.mif', False)

  # Get shell information
  shells = [ int(round(float(x))) for x in image.headerField('dwi.mif', 'shells').split() ]
  if len(shells) < 3:
    app.warn('Less than three b-value shells; response functions will not be applicable in resolving three tissues using MSMT-CSD algorithm')

  # Get lmax information (if provided)
  wm_lmax = [ ]
  if app.args.lmax:
    wm_lmax = [ int(x.strip()) for x in app.args.lmax.split(',') ]
    if not len(wm_lmax) == len(shells):
      app.error('Number of manually-defined lmax\'s (' + str(len(wm_lmax)) + ') does not match number of b-value shells (' + str(len(shells)) + ')')
    for l in wm_lmax:
      if l%2:
        app.error('Values for lmax must be even')
      if l<0:
        app.error('Values for lmax must be non-negative')

  run.command('dwi2tensor dwi.mif - -mask mask.mif | tensor2metric - -fa fa.mif -vector vector.mif')
  if not os.path.exists('dirs.mif'):
    run.function(shutil.copy, 'vector.mif', 'dirs.mif')
  run.command('mrtransform 5tt.mif 5tt_regrid.mif -template fa.mif -interp linear')

  # Basic tissue masks
  run.command('mrconvert 5tt_regrid.mif - -coord 3 2 -axes 0,1,2 | mrcalc - ' + str(app.args.pvf) + ' -gt mask.mif -mult wm_mask.mif')
  run.command('mrconvert 5tt_regrid.mif - -coord 3 0 -axes 0,1,2 | mrcalc - ' + str(app.args.pvf) + ' -gt fa.mif ' + str(app.args.fa) + ' -lt -mult mask.mif -mult gm_mask.mif')
  run.command('mrconvert 5tt_regrid.mif - -coord 3 3 -axes 0,1,2 | mrcalc - ' + str(app.args.pvf) + ' -gt fa.mif ' + str(app.args.fa) + ' -lt -mult mask.mif -mult csf_mask.mif')

  # Revise WM mask to only include single-fibre voxels
  app.console('Calling dwi2response recursively to select WM single-fibre voxels using \'' + app.args.wm_algo + '\' algorithm')
  recursive_cleanup_option=''
  if not app._cleanup:
    recursive_cleanup_option = ' -nocleanup'
  run.command('dwi2response ' + app.args.wm_algo + ' dwi.mif wm_ss_response.txt -mask wm_mask.mif -voxels wm_sf_mask.mif -tempdir ' + app._tempDir + recursive_cleanup_option)

  # Check for empty masks
  wm_voxels  = int(image.statistic('wm_sf_mask.mif', 'count', 'wm_sf_mask.mif'))
  gm_voxels  = int(image.statistic('gm_mask.mif',    'count', 'gm_mask.mif'))
  csf_voxels = int(image.statistic('csf_mask.mif',   'count', 'csf_mask.mif'))
  empty_masks = [ ]
  if not wm_voxels:
    empty_masks.append('WM')
  if not gm_voxels:
    empty_masks.append('GM')
  if not csf_voxels:
    empty_masks.append('CSF')
  if empty_masks:
    message = ','.join(empty_masks)
    message += ' tissue mask'
    if len(empty_masks) > 1:
      message += 's'
    message += ' empty; cannot estimate response function'
    if len(empty_masks) > 1:
      message += 's'
    app.error(message)

  # For each of the three tissues, generate a multi-shell response
  bvalues_option = ' -shell ' + ','.join(map(str,shells))
  sfwm_lmax_option = ''
  if wm_lmax:
    sfwm_lmax_option = ' -lmax ' + ','.join(map(str,wm_lmax))
  run.command('amp2response dwi.mif wm_sf_mask.mif dirs.mif wm.txt' + bvalues_option + sfwm_lmax_option)
  run.command('amp2response dwi.mif gm_mask.mif dirs.mif gm.txt' + bvalues_option + ' -isotropic')
  run.command('amp2response dwi.mif csf_mask.mif dirs.mif csf.txt' + bvalues_option + ' -isotropic')
  run.function(shutil.copyfile, 'wm.txt',  path.fromUser(app.args.out_wm,  False))
  run.function(shutil.copyfile, 'gm.txt',  path.fromUser(app.args.out_gm,  False))
  run.function(shutil.copyfile, 'csf.txt', path.fromUser(app.args.out_csf, False))

  # Generate output 4D binary image with voxel selections; RGB as in MSMT-CSD paper
  run.command('mrcat csf_mask.mif gm_mask.mif wm_sf_mask.mif voxels.mif -axis 3')