def transform_scalars(self, dataset, amount=0.5, threshold=0.0, sigma=1.0): """This filter performs anisotropic diffusion on an image using the classic Perona-Malik, gradient magnitude-based equation. """ # Initial progress self.progress.value = 0 self.progress.maximum = 100 # Approximate percentage of work completed after each step in the # transform step_pct = iter([10, 20, 90, 100]) try: import itk from tomviz import itkutils except Exception as exc: print("Could not import necessary module(s)") raise exc try: self.progress.message = "Converting data to ITK image" self.progress.value = 0 # Get the ITK image. The itk.GradientAnisotropicDiffusionImageFilter # is templated over float pixel types only, so explicitly request a # float ITK image type. itk_image = itkutils.convert_vtk_to_itk_image(dataset) self.progress.value = next(step_pct) self.progress.message = "Running filter" self.progress.value = next(step_pct) unsharp_mask = \ itk.UnsharpMaskImageFilter.New(Input=itk_image) unsharp_mask.SetAmount(amount) unsharp_mask.SetThreshold(threshold) unsharp_mask.SetSigma(sigma) itkutils.observe_filter_progress(self, unsharp_mask, self.progress.value, next(step_pct)) try: unsharp_mask.Update() except RuntimeError: return self.progress.message = "Saving results" enhanced = unsharp_mask.GetOutput() itkutils.set_array_from_itk_image(dataset, enhanced) self.progress.value = next(step_pct) except Exception as exc: print("Problem encountered while running %s" % self.__class__.__name__) raise exc
def transform_scalars(self, dataset, amount=0.5, threshold=0.0, sigma=1.0): """This filter performs anisotropic diffusion on an image using the classic Perona-Malik, gradient magnitude-based equation. """ # Initial progress self.progress.value = 0 self.progress.maximum = 100 # Approximate percentage of work completed after each step in the # transform step_pct = iter([10, 20, 90, 100]) try: import itk from tomviz import itkutils except Exception as exc: print("Could not import necessary module(s)") raise exc try: self.progress.message = "Converting data to ITK image" self.progress.value = 0 # Get the ITK image. The itk.GradientAnisotropicDiffusionImageFilter # is templated over float pixel types only, so explicitly request a # float ITK image type. itk_image = itkutils.convert_vtk_to_itk_image(dataset) self.progress.value = next(step_pct) self.progress.message = "Running filter" self.progress.value = next(step_pct) unsharp_mask = \ itk.UnsharpMaskImageFilter.New(Input=itk_image) unsharp_mask.SetAmount(amount) unsharp_mask.SetThreshold(threshold) unsharp_mask.SetSigma(sigma) itkutils.observe_filter_progress(self, unsharp_mask, self.progress.value, next(step_pct)) try: unsharp_mask.Update() except RuntimeError: return self.progress.message = "Saving results" enhanced = unsharp_mask.GetOutput() itkutils.set_array_from_itk_image(dataset, enhanced) self.progress.value = next(step_pct) except Exception as exc: print("Problem encountered while running %s" % self.__class__.__name__) raise exc
def transform_scalars(self, dataset): """Define this method for Python operators that transform the input array. This example uses an ITK filter to add 10 to each voxel value.""" # Try imports to make sure we have everything that is needed try: from tomviz import itkutils import itk except Exception as exc: print("Could not import necessary module(s)") raise exc self.progress.value = 0 self.progress.maximum = 100 # Add a try/except around the ITK portion. ITK exceptions are # passed up to the Python layer, so we can at least report what # went wrong with the script, e.g., unsupported image type. try: self.progress.value = 0 self.progress.message = "Converting data to ITK image" # Get the ITK image itk_image = itkutils.convert_vtk_to_itk_image(dataset) itk_input_image_type = type(itk_image) self.progress.value = 30 self.progress.message = "Running filter" # ITK filter filter = itk.AddImageFilter[itk_input_image_type, # Input 1 itk_input_image_type, # Input 2 itk_input_image_type].New() # Output filter.SetInput1(itk_image) filter.SetConstant2(10) itkutils.observe_filter_progress(self, filter, 30, 70) try: filter.Update() except RuntimeError: # Exception thrown when ITK filter is aborted return self.progress.message = "Saving results" itkutils.set_array_from_itk_image(dataset, filter.GetOutput()) self.progress.value = 100 except Exception as exc: print("Problem encountered while running %s" % self.__class__.__name__) raise exc
def transform_scalars(self, dataset): """Define this method for Python operators that transform the input array. This example uses an ITK filter to add 10 to each voxel value.""" # Try imports to make sure we have everything that is needed try: from tomviz import itkutils import itk except Exception as exc: print("Could not import necessary module(s)") raise exc self.progress.value = 0 self.progress.maximum = 100 # Add a try/except around the ITK portion. ITK exceptions are # passed up to the Python layer, so we can at least report what # went wrong with the script, e.g., unsupported image type. try: self.progress.value = 0 self.progress.message = "Converting data to ITK image" # Get the ITK image itk_image = itkutils.convert_vtk_to_itk_image(dataset) itk_input_image_type = type(itk_image) self.progress.value = 30 self.progress.message = "Running filter" # ITK filter filter = itk.AddImageFilter[itk_input_image_type, # Input 1 itk_input_image_type, # Input 2 itk_input_image_type].New() # Output filter.SetInput1(itk_image) filter.SetConstant2(10) itkutils.observe_filter_progress(self, filter, 30, 70) try: filter.Update() except RuntimeError: # Exception thrown when ITK filter is aborted return self.progress.message = "Saving results" itkutils.set_array_from_itk_image(dataset, filter.GetOutput()) self.progress.value = 100 except Exception as exc: print("Problem encountered while running %s" % self.__class__.__name__) raise exc
def transform_scalars(self, dataset, lower_threshold=40.0, upper_threshold=255.0): """This filter computes a binary threshold on the data set and stores the result in a child data set. It does not modify the dataset passed in.""" # Initial progress self.progress.value = 0 self.progress.maximum = 100 # Approximate percentage of work completed after each step in the # transform STEP_PCT = [20, 40, 75, 90, 100] # Set up return value returnValue = None # Try imports to make sure we have everything that is needed try: self.progress.message = "Loading modules" import itk import vtk from tomviz import itkutils except Exception as exc: print("Could not import necessary module(s)") raise exc # Add a try/except around the ITK portion. ITK exceptions are # passed up to the Python layer, so we can at least report what # went wrong with the script, e.g,, unsupported image type. try: self.progress.value = STEP_PCT[0] self.progress.message = "Converting data to ITK image" # Get the ITK image itk_image = itkutils.convert_vtk_to_itk_image(dataset) itk_input_image_type = type(itk_image) self.progress.value = STEP_PCT[1] self.progress.message = "Running filter" # We change the output type to unsigned char 3D # (itk.Image.UC3D) to save memory in the output label map # representation. itk_output_image_type = itk.Image.UC3 # ITK's BinaryThresholdImageFilter does the hard work threshold_filter = itk.BinaryThresholdImageFilter[ itk_input_image_type, itk_output_image_type].New() python_cast = itkutils.get_python_voxel_type(itk_image) threshold_filter.SetLowerThreshold(python_cast(lower_threshold)) threshold_filter.SetUpperThreshold(python_cast(upper_threshold)) threshold_filter.SetInsideValue(1) threshold_filter.SetOutsideValue(0) threshold_filter.SetInput(itk_image) itkutils.observe_filter_progress(self, threshold_filter, STEP_PCT[2], STEP_PCT[3]) try: threshold_filter.Update() except RuntimeError: return returnValue self.progress.message = "Creating child data set" # Set the output as a new child data object of the current data set label_map_dataset = vtk.vtkImageData() label_map_dataset.CopyStructure(dataset) itkutils.set_array_from_itk_image(label_map_dataset, threshold_filter.GetOutput()) self.progress.value = STEP_PCT[4] returnValue = { "thresholded_segmentation": label_map_dataset } except Exception as exc: print("Problem encountered while running %s" % self.__class__.__name__) raise exc return returnValue
def transform_scalars(self, dataset, number_of_thresholds=1, enable_valley_emphasis=False): """This filter performs semi-automatic multithresholding of a data set. Voxels are automatically classified into a chosen number of classes such that inter-class variance of the voxel values is minimized. The output is a label map with one label per voxel class. """ # Initial progress self.progress.value = 0 self.progress.maximum = 100 # Approximate percentage of work completed after each step in the # transform STEP_PCT = [10, 20, 70, 90, 100] try: import itk import itkExtras import itkTypes from vtkmodules.vtkCommonDataModel import vtkImageData from tomviz import itkutils from tomviz import utils except Exception as exc: print("Could not import necessary module(s)") raise exc # Return values returnValues = None # Add a try/except around the ITK portion. ITK exceptions are # passed up to the Python layer, so we can at least report what # went wrong with the script, e.g,, unsupported image type. try: self.progress.value = STEP_PCT[0] self.progress.message = "Converting data to ITK image" # Get the ITK image itk_image = itkutils.convert_vtk_to_itk_image(dataset) itk_input_image_type = type(itk_image) # OtsuMultipleThresholdsImageFilter's wrapping requires that the # input and output image types be the same. itk_threshold_image_type = itk_input_image_type # Otsu multiple threshold filter otsu_filter = itk.OtsuMultipleThresholdsImageFilter[ itk_input_image_type, itk_threshold_image_type].New() otsu_filter.SetNumberOfThresholds(number_of_thresholds) otsu_filter.SetValleyEmphasis(enable_valley_emphasis) otsu_filter.SetInput(itk_image) itkutils.observe_filter_progress(self, otsu_filter, STEP_PCT[1], STEP_PCT[2]) try: otsu_filter.Update() except RuntimeError: return print("Otsu threshold(s): %s" % (otsu_filter.GetThresholds(),)) itk_image_data = otsu_filter.GetOutput() # Cast threshold output to an integral type if needed. py_buffer_type = itk_threshold_image_type voxel_type = itkExtras.template(itk_threshold_image_type)[1][0] if voxel_type is itkTypes.F or voxel_type is itkTypes.D: self.progress.message = "Casting output to integral type" # Unsigned char supports 256 labels, or 255 threshold levels. # This should be sufficient for all but the most unusual use # cases. py_buffer_type = itk.Image.UC3 caster = itk.CastImageFilter[itk_threshold_image_type, py_buffer_type].New() caster.SetInput(itk_image_data) itkutils.observe_filter_progress(self, caster, STEP_PCT[2], STEP_PCT[3]) try: caster.Update() except RuntimeError: return itk_image_data = caster.GetOutput() self.progress.value = STEP_PCT[3] self.progress.message = "Saving results" label_buffer = itk.PyBuffer[py_buffer_type] \ .GetArrayFromImage(itk_image_data) label_map_dataset = vtkImageData() label_map_dataset.CopyStructure(dataset) utils.set_array(label_map_dataset, label_buffer, isFortran=False) self.progress.value = STEP_PCT[4] # Set up dictionary to return operator results returnValues = {} returnValues["label_map"] = label_map_dataset except Exception as exc: print("Problem encountered while running %s" % self.__class__.__name__) raise exc return returnValues
def transform_scalars(self, dataset, number_of_thresholds=1, enable_valley_emphasis=False): """This filter performs semi-automatic multithresholding of a data set. Voxels are automatically classified into a chosen number of classes such that inter-class variance of the voxel values is minimized. The output is a label map with one label per voxel class. """ # Initial progress self.progress.value = 0 self.progress.maximum = 100 # Approximate percentage of work completed after each step in the # transform STEP_PCT = [10, 20, 70, 90, 100] try: import itk import itkExtras import itkTypes import vtk from tomviz import itkutils from tomviz import utils except Exception as exc: print("Could not import necessary module(s)") raise exc # Return values returnValues = None # Add a try/except around the ITK portion. ITK exceptions are # passed up to the Python layer, so we can at least report what # went wrong with the script, e.g,, unsupported image type. try: self.progress.value = STEP_PCT[0] self.progress.message = "Converting data to ITK image" # Get the ITK image itk_image = itkutils.convert_vtk_to_itk_image(dataset) itk_input_image_type = type(itk_image) # OtsuMultipleThresholdsImageFilter's wrapping requires that the # input and output image types be the same. itk_threshold_image_type = itk_input_image_type # Otsu multiple threshold filter otsu_filter = itk.OtsuMultipleThresholdsImageFilter[ itk_input_image_type, itk_threshold_image_type].New() otsu_filter.SetNumberOfThresholds(number_of_thresholds) otsu_filter.SetValleyEmphasis(enable_valley_emphasis) otsu_filter.SetInput(itk_image) itkutils.observe_filter_progress(self, otsu_filter, STEP_PCT[1], STEP_PCT[2]) try: otsu_filter.Update() except RuntimeError: return print("Otsu threshold(s): %s" % (otsu_filter.GetThresholds(),)) itk_image_data = otsu_filter.GetOutput() # Cast threshold output to an integral type if needed. py_buffer_type = itk_threshold_image_type voxel_type = itkExtras.template(itk_threshold_image_type)[1][0] if voxel_type is itkTypes.F or voxel_type is itkTypes.D: self.progress.message = "Casting output to integral type" # Unsigned char supports 256 labels, or 255 threshold levels. # This should be sufficient for all but the most unusual use # cases. py_buffer_type = itk.Image.UC3 caster = itk.CastImageFilter[itk_threshold_image_type, py_buffer_type].New() caster.SetInput(itk_image_data) itkutils.observe_filter_progress(self, caster, STEP_PCT[2], STEP_PCT[3]) try: caster.Update() except RuntimeError: return itk_image_data = caster.GetOutput() self.progress.value = STEP_PCT[3] self.progress.message = "Saving results" label_buffer = itk.PyBuffer[py_buffer_type] \ .GetArrayFromImage(itk_image_data) label_map_dataset = vtk.vtkImageData() label_map_dataset.CopyStructure(dataset) utils.set_array(label_map_dataset, label_buffer) self.progress.value = STEP_PCT[4] # Set up dictionary to return operator results returnValues = {} returnValues["label_map"] = label_map_dataset except Exception as exc: print("Problem encountered while running %s" % self.__class__.__name__) raise exc return returnValues
def connected_components(dataset, background_value=0, progress_callback=None): try: import itk import itkTypes import vtk from tomviz import itkutils except Exception as exc: print("Could not import necessary module(s)") print(exc) scalarType = dataset.GetScalarType() if scalarType == vtk.VTK_FLOAT or scalarType == vtk.VTK_DOUBLE: raise Exception( "Connected Components works only on images with integral types.") # Add a try/except around the ITK portion. ITK exceptions are # passed up to the Python layer, so we can at least report what # went wrong with the script, e.g,, unsupported image type. try: # Get the ITK image. The input is assumed to have an integral type. # Take care of casting to an unsigned short image so we can store up # to 65,535 connected components (the number of connected components # is limited to the maximum representable number in the voxel type # of the input image in the ConnectedComponentsFilter). itk_image = itkutils.convert_vtk_to_itk_image(dataset, itkTypes.US) itk_image_type = type(itk_image) # ConnectedComponentImageFilter connected_filter = itk.ConnectedComponentImageFilter[ itk_image_type, itk_image_type].New() connected_filter.SetBackgroundValue(background_value) connected_filter.SetInput(itk_image) if progress_callback is not None: def connected_progress_func(): progress = connected_filter.GetProgress() abort = progress_callback(progress * 0.5) connected_filter.SetAbortGenerateData(abort) connected_observer = itk.PyCommand.New() connected_observer.SetCommandCallable(connected_progress_func) connected_filter.AddObserver(itk.ProgressEvent(), connected_observer) # Relabel filter. This will compress the label numbers to a # continugous range between 1 and n where n is the number of # labels. It will also sort the components from largest to # smallest, where the largest component has label 1, the # second largest has label 2, and so on... relabel_filter = itk.RelabelComponentImageFilter[ itk_image_type, itk_image_type].New() relabel_filter.SetInput(connected_filter.GetOutput()) relabel_filter.SortByObjectSizeOn() if progress_callback is not None: def relabel_progress_func(): progress = relabel_filter.GetProgress() abort = progress_callback(progress * 0.5 + 0.5) relabel_filter.SetAbortGenerateData(abort) relabel_observer = itk.PyCommand.New() relabel_observer.SetCommandCallable(relabel_progress_func) relabel_filter.AddObserver(itk.ProgressEvent(), relabel_observer) try: relabel_filter.Update() except RuntimeError: return itk_image_data = relabel_filter.GetOutput() label_buffer = itk.PyBuffer[ itk_image_type].GetArrayFromImage(itk_image_data) # Flip the labels so that the largest component has the highest label # value, e.g., the labeling ordering by size goes from [1, 2, ... N] to # [N, N-1, N-2, ..., 1]. Note that zero is the background value, so we # do not want to change it. import numpy as np minimum = 1 # Minimum label is always 1, background is 0 maximum = np.max(label_buffer) # Try more memory-efficient approach gt_zero = label_buffer > 0 label_buffer[gt_zero] = minimum - label_buffer[gt_zero] + maximum set_array(dataset, label_buffer, isFortran=False) except Exception as exc: print("Problem encountered while running ConnectedComponents") raise exc
def transform_scalars(self, dataset, lower_threshold=40.0, upper_threshold=255.0): """This filter computes a binary threshold on the data set and stores the result in a child data set. It does not modify the dataset passed in.""" # Initial progress self.progress.value = 0 self.progress.maximum = 100 # Approximate percentage of work completed after each step in the # transform STEP_PCT = [20, 40, 75, 90, 100] # Set up return value returnValue = None # Try imports to make sure we have everything that is needed try: self.progress.message = "Loading modules" import itk from vtkmodules.vtkCommonDataModel import vtkImageData from tomviz import itkutils except Exception as exc: print("Could not import necessary module(s)") raise exc # Add a try/except around the ITK portion. ITK exceptions are # passed up to the Python layer, so we can at least report what # went wrong with the script, e.g,, unsupported image type. try: self.progress.value = STEP_PCT[0] self.progress.message = "Converting data to ITK image" # Get the ITK image itk_image = itkutils.convert_vtk_to_itk_image(dataset) itk_input_image_type = type(itk_image) self.progress.value = STEP_PCT[1] self.progress.message = "Running filter" # We change the output type to unsigned char 3D # (itk.Image.UC3D) to save memory in the output label map # representation. itk_output_image_type = itk.Image.UC3 # ITK's BinaryThresholdImageFilter does the hard work threshold_filter = itk.BinaryThresholdImageFilter[ itk_input_image_type, itk_output_image_type].New() python_cast = itkutils.get_python_voxel_type(itk_image) threshold_filter.SetLowerThreshold(python_cast(lower_threshold)) threshold_filter.SetUpperThreshold(python_cast(upper_threshold)) threshold_filter.SetInsideValue(1) threshold_filter.SetOutsideValue(0) threshold_filter.SetInput(itk_image) itkutils.observe_filter_progress(self, threshold_filter, STEP_PCT[2], STEP_PCT[3]) try: threshold_filter.Update() except RuntimeError: return returnValue self.progress.message = "Creating child data set" # Set the output as a new child data object of the current data set label_map_dataset = vtkImageData() label_map_dataset.CopyStructure(dataset) itkutils.set_array_from_itk_image(label_map_dataset, threshold_filter.GetOutput()) self.progress.value = STEP_PCT[4] returnValue = {"thresholded_segmentation": label_map_dataset} except Exception as exc: print("Problem encountered while running %s" % self.__class__.__name__) raise exc return returnValue
def connected_components(dataset, background_value=0, progress_callback=None): try: import itk import itkTypes import vtk from tomviz import itkutils except Exception as exc: print("Could not import necessary module(s)") print(exc) scalarType = dataset.GetScalarType() if scalarType == vtk.VTK_FLOAT or scalarType == vtk.VTK_DOUBLE: raise Exception( "Connected Components works only on images with integral types.") # Add a try/except around the ITK portion. ITK exceptions are # passed up to the Python layer, so we can at least report what # went wrong with the script, e.g,, unsupported image type. try: # Get the ITK image. The input is assumed to have an integral type. # Take care of casting to an unsigned short image so we can store up # to 65,535 connected components (the number of connected components # is limited to the maximum representable number in the voxel type # of the input image in the ConnectedComponentsFilter). itk_image = itkutils.convert_vtk_to_itk_image(dataset, itkTypes.US) itk_image_type = type(itk_image) # ConnectedComponentImageFilter connected_filter = itk.ConnectedComponentImageFilter[ itk_image_type, itk_image_type].New() connected_filter.SetBackgroundValue(background_value) connected_filter.SetInput(itk_image) if progress_callback is not None: def connected_progress_func(): progress = connected_filter.GetProgress() abort = progress_callback(progress * 0.5) connected_filter.SetAbortGenerateData(abort) connected_observer = itk.PyCommand.New() connected_observer.SetCommandCallable(connected_progress_func) connected_filter.AddObserver(itk.ProgressEvent(), connected_observer) # Relabel filter. This will compress the label numbers to a # continugous range between 1 and n where n is the number of # labels. It will also sort the components from largest to # smallest, where the largest component has label 1, the # second largest has label 2, and so on... relabel_filter = itk.RelabelComponentImageFilter[itk_image_type, itk_image_type].New() relabel_filter.SetInput(connected_filter.GetOutput()) relabel_filter.SortByObjectSizeOn() if progress_callback is not None: def relabel_progress_func(): progress = relabel_filter.GetProgress() abort = progress_callback(progress * 0.5 + 0.5) relabel_filter.SetAbortGenerateData(abort) relabel_observer = itk.PyCommand.New() relabel_observer.SetCommandCallable(relabel_progress_func) relabel_filter.AddObserver(itk.ProgressEvent(), relabel_observer) try: relabel_filter.Update() except RuntimeError: return itk_image_data = relabel_filter.GetOutput() label_buffer = itk.PyBuffer[itk_image_type].GetArrayFromImage( itk_image_data) # Flip the labels so that the largest component has the highest label # value, e.g., the labeling ordering by size goes from [1, 2, ... N] to # [N, N-1, N-2, ..., 1]. Note that zero is the background value, so we # do not want to change it. import numpy as np minimum = 1 # Minimum label is always 1, background is 0 maximum = np.max(label_buffer) # Try more memory-efficient approach gt_zero = label_buffer > 0 label_buffer[gt_zero] = minimum - label_buffer[gt_zero] + maximum set_array(dataset, label_buffer, isFortran=False) except Exception as exc: print("Problem encountered while running ConnectedComponents") raise exc
def transform_scalars(self, dataset, minimum_radius=4): """Segment spherical particles from a homogeneous, dark background. Even if the particles have pores, they are segmented as solid structures. """ # Initial progress self.progress.value = 0 self.progress.maximum = 100 # Approximate percentage of work completed after each step in the # transform step_pct = iter([10, 10, 10, 10, 10, 10, 10, 10, 10, 10]) try: import itk import vtk from tomviz import itkutils from tomviz import utils except Exception as exc: print("Could not import necessary module(s)") raise exc # Return values returnValues = None # Add a try/except around the ITK portion. ITK exceptions are # passed up to the Python layer, so we can at least report what # went wrong with the script, e.g,, unsupported image type. try: self.progress.message = "Converting data to ITK image" # Get the ITK image itk_input_image = itkutils.convert_vtk_to_itk_image(dataset) self.progress.value = next(step_pct) smoothed = median_filter(self, step_pct, itk_input_image) Dimension = 3 StructuringElementType = itk.FlatStructuringElement[Dimension] structuring_element = StructuringElementType.Ball(minimum_radius) # Reduces reconstruction streak artifact effects and artifacts far # from the center of the image. opened = opening_by_reconstruction(self, step_pct, smoothed, structuring_element) thresholded = threshold(self, step_pct, opened) # Removes structures smaller than the structuring element while # retaining particle shape # Grayscale implementation is faster than binary cleaned = opening_by_reconstruction(self, step_pct, thresholded, structuring_element) # Fill in pores # Grayscale implementation is faster than binary closed = morphological_closing(self, step_pct, cleaned, structuring_element) # Fill in pores # Grayscale implementation is faster than binary filled = fill_holes(self, step_pct, closed) # Disconnect separate particles and reduce reconstruction opening = morphological_opening(self, step_pct, filled, structuring_element) self.progress.message = "Saving results" label_buffer = itk.PyBuffer[type(itk_input_image)] \ .GetArrayFromImage(opening) label_map_dataset = vtk.vtkImageData() label_map_dataset.CopyStructure(dataset) utils.set_array(label_map_dataset, label_buffer, isFortran=False) # Set up dictionary to return operator results returnValues = {} returnValues["label_map"] = label_map_dataset except Exception as exc: print("Problem encountered while running %s" % self.__class__.__name__) raise exc return returnValues
def transform_scalars(self, dataset, stencil_radius=2, iterations=10, threshold=50.0): """This filter smooths a binary image by evolving a level set with a curvature-based speed function. The Stencil Radius determines the scale of the noise to remove. The Threshold determines the iso-contour brightness to discriminate between two pixel classes. """ # Initial progress self.progress.value = 0 self.progress.maximum = 100 # Approximate percentage of work completed after each step in the # transform STEP_PCT = [10, 20, 30, 70, 80, 90, 100] try: import itk import itkTypes from tomviz import itkutils except Exception as exc: print("Could not import necessary module(s)") raise exc # Return values returnValue = None # Add a try/except around the ITK portion. ITK exceptions are # passed up to the Python layer, so we can at least report what # went wrong with the script, e.g,, unsupported image type. try: self.progress.value = STEP_PCT[0] self.progress.message = "Converting data to ITK image" # Get the ITK image itk_image = itkutils.convert_vtk_to_itk_image(dataset) itk_input_image_type = type(itk_image) self.progress.message = "Casting input to float type" itk_filter_image_type = itk.Image[itkTypes.F, itk_image.GetImageDimension()] caster = itk.CastImageFilter[itk_input_image_type, itk_filter_image_type].New() caster.SetInput(itk_image) itkutils.observe_filter_progress(self, caster, STEP_PCT[1], STEP_PCT[2]) try: caster.Update() except RuntimeError: return self.progress.message = "Running filter" smoothing_filter = itk.BinaryMinMaxCurvatureFlowImageFilter[ itk_filter_image_type, itk_filter_image_type].New() smoothing_filter.SetThreshold(threshold) smoothing_filter.SetStencilRadius(stencil_radius) smoothing_filter.SetNumberOfIterations(iterations) smoothing_filter.SetTimeStep(0.0625) smoothing_filter.SetInput(caster) itkutils.observe_filter_progress(self, smoothing_filter, STEP_PCT[2], STEP_PCT[3]) try: smoothing_filter.Update() except RuntimeError: return itk_image_data = smoothing_filter.GetOutput() # Cast output to the input type self.progress.message = "Casting output to input type" caster = itk.CastImageFilter[itk_filter_image_type, itk_input_image_type].New() caster.SetInput(itk_image_data) itkutils.observe_filter_progress(self, caster, STEP_PCT[3], STEP_PCT[4]) try: caster.Update() except RuntimeError: return itk_image_data = caster.GetOutput() self.progress.value = STEP_PCT[5] self.progress.message = "Saving results" itkutils.set_array_from_itk_image(dataset, itk_image_data) self.progress.value = STEP_PCT[6] except Exception as exc: print("Problem encountered while running %s" % self.__class__.__name__) raise exc return returnValue
def transform_scalars(self, dataset, minimum_radius=0.5, maximum_radius=6.): """Segment pores. The pore size must be greater than the minimum radius and less than the maximum radius. Pores will be separated according to the minimum radius.""" # Initial progress self.progress.value = 0 self.progress.maximum = 100 # Approximate percentage of work completed after each step in the # transform step_pct = iter([5, 5, 5, 5, 5, 5, 5, 5, 5, 30, 10, 5, 5, 5]) try: import itk from vtkmodules.vtkCommonDataModel import vtkImageData from tomviz import itkutils from tomviz import utils import numpy as np except Exception as exc: print("Could not import necessary module(s)") raise exc # Return values returnValues = None # Add a try/except around the ITK portion. ITK exceptions are # passed up to the Python layer, so we can at least report what # went wrong with the script, e.g,, unsupported image type. try: self.progress.message = "Converting data to ITK image" # Get the ITK image itk_input_image = itkutils.convert_vtk_to_itk_image(dataset) self.progress.value = next(step_pct) # Reduce noise smoothed = median_filter(self, step_pct, itk_input_image) # Enhance pore contrast enhanced = unsharp_mask(self, step_pct, smoothed) thresholded = threshold(self, step_pct, enhanced) dimension = itk_input_image.GetImageDimension() spacing = itk_input_image.GetSpacing() closing_radius = itk.Size[dimension]() closing_radius.Fill(1) for dim in range(dimension): radius = int(np.round(maximum_radius / spacing[dim])) if radius > closing_radius[dim]: closing_radius[dim] = radius StructuringElementType = itk.FlatStructuringElement[dimension] structuring_element = \ StructuringElementType.Ball(closing_radius) particle_mask = morphological_closing(self, step_pct, thresholded, structuring_element) encapsulated = encapsulate(self, step_pct, thresholded, particle_mask, structuring_element) distance = get_distance(self, step_pct, encapsulated) segmented = watershed(self, step_pct, distance, minimum_radius) inverted = invert(self, step_pct, thresholded) segmented.DisconnectPipeline() inverted.DisconnectPipeline() separated = apply_mask(self, step_pct, segmented, inverted) separated.DisconnectPipeline() particle_mask.DisconnectPipeline() in_particles = apply_mask(self, step_pct, separated, particle_mask) opening_radius = itk.Size[dimension]() opening_radius.Fill(1) for dim in range(dimension): radius = int(np.round(minimum_radius / spacing[dim])) if radius > opening_radius[dim]: opening_radius[dim] = radius structuring_element = \ StructuringElementType.Ball(opening_radius) opened = opening_by_reconstruction(self, step_pct, in_particles, structuring_element) self.progress.message = "Saving results" label_buffer = itk.PyBuffer[type(opened)] \ .GetArrayFromImage(opened) # temp label_buffer = label_buffer.copy() label_map_dataset = vtkImageData() label_map_dataset.CopyStructure(dataset) utils.set_array(label_map_dataset, label_buffer, isFortran=False) # Set up dictionary to return operator results returnValues = {} returnValues["label_map"] = label_map_dataset except Exception as exc: print("Problem encountered while running %s" % self.__class__.__name__) raise exc return returnValues
def transform_scalars(self, dataset, structuring_element_id=0, radius=1, object_label=1, background_label=0): """Dilate segmented objects with a given label by a spherically symmetric structuring element with a given radius. """ # Initial progress self.progress.value = 0 self.progress.maximum = 100 # Approximate percentage of work completed after each step in the # transform STEP_PCT = [10, 20, 90, 100] try: import itk from tomviz import itkutils except Exception as exc: print("Could not import necessary module(s)") raise exc # Add a try/except around the ITK portion. ITK exceptions are # passed up to the Python layer, so we can at least report what # went wrong with the script, e.g,, unsupported image type. try: self.progress.value = STEP_PCT[0] self.progress.message = "Converting data to ITK image" # Get the ITK image itk_image = itkutils.convert_vtk_to_itk_image(dataset) itk_input_image_type = type(itk_image) itk_kernel_type = itk.FlatStructuringElement[3] if (structuring_element_id == 0): itk_kernel = itk_kernel_type.Box(radius) elif (structuring_element_id == 1): itk_kernel = itk_kernel_type.Ball(radius) elif (structuring_element_id == 2): itk_kernel = itk_kernel_type.Cross(radius) else: raise Exception('Invalid kernel shape id %d' % structuring_element_id) self.progress.value = STEP_PCT[1] self.progress.message = "Running filter" dilate_filter = itk.BinaryDilateImageFilter[itk_input_image_type, itk_input_image_type, itk_kernel_type].New() dilate_filter.SetDilateValue(object_label) dilate_filter.SetBackgroundValue(background_label) dilate_filter.SetKernel(itk_kernel) dilate_filter.SetInput(itk_image) itkutils.observe_filter_progress(self, dilate_filter, STEP_PCT[1], STEP_PCT[2]) try: dilate_filter.Update() except RuntimeError: return self.progress.message = "Saving results" itkutils.set_array_from_itk_image(dataset, dilate_filter.GetOutput()) self.progress.value = STEP_PCT[3] except Exception as exc: print("Problem encountered while running %s" % self.__class__.__name__) raise exc
def transform_scalars(self, dataset, minimum_radius=4): """Segment spherical particles from a homogeneous, dark background. Even if the particles have pores, they are segmented as solid structures. """ # Initial progress self.progress.value = 0 self.progress.maximum = 100 # Approximate percentage of work completed after each step in the # transform step_pct = iter([10, 10, 10, 10, 10, 10, 10, 10, 10, 10]) try: import itk from vtkmodules.vtkCommonDataModel import vtkImageData from tomviz import itkutils from tomviz import utils except Exception as exc: print("Could not import necessary module(s)") raise exc # Return values returnValues = None # Add a try/except around the ITK portion. ITK exceptions are # passed up to the Python layer, so we can at least report what # went wrong with the script, e.g,, unsupported image type. try: self.progress.message = "Converting data to ITK image" # Get the ITK image itk_input_image = itkutils.convert_vtk_to_itk_image(dataset) self.progress.value = next(step_pct) smoothed = median_filter(self, step_pct, itk_input_image) Dimension = 3 StructuringElementType = itk.FlatStructuringElement[Dimension] structuring_element = StructuringElementType.Ball(minimum_radius) # Reduces reconstruction streak artifact effects and artifacts far # from the center of the image. opened = opening_by_reconstruction(self, step_pct, smoothed, structuring_element) thresholded = threshold(self, step_pct, opened) # Removes structures smaller than the structuring element while # retaining particle shape # Grayscale implementation is faster than binary cleaned = opening_by_reconstruction(self, step_pct, thresholded, structuring_element) # Fill in pores # Grayscale implementation is faster than binary closed = morphological_closing(self, step_pct, cleaned, structuring_element) # Fill in pores # Grayscale implementation is faster than binary filled = fill_holes(self, step_pct, closed) # Disconnect separate particles and reduce reconstruction opening = morphological_opening(self, step_pct, filled, structuring_element) self.progress.message = "Saving results" label_buffer = itk.PyBuffer[type(itk_input_image)] \ .GetArrayFromImage(opening) label_map_dataset = vtkImageData() label_map_dataset.CopyStructure(dataset) utils.set_array(label_map_dataset, label_buffer, isFortran=False) # Set up dictionary to return operator results returnValues = {} returnValues["label_map"] = label_map_dataset except Exception as exc: print("Problem encountered while running %s" % self.__class__.__name__) raise exc return returnValues
def transform_scalars(self, dataset, conductance=1.0, iterations=100, timestep=0.0625): """This filter performs anisotropic diffusion on an image using the classic Perona-Malik, gradient magnitude-based equation. """ # Initial progress self.progress.value = 0 self.progress.maximum = 100 # Approximate percentage of work completed after each step in the # transform STEP_PCT = [10, 20, 90, 100] try: import itk import itkTypes from tomviz import itkutils except Exception as exc: print("Could not import necessary module(s)") raise exc try: self.progress.value = STEP_PCT[0] self.progress.message = "Converting data to ITK image" # Get the ITK image. The itk.GradientAnisotropicDiffusionImageFilter # is templated over float pixel types only, so explicitly request a # float ITK image type. itk_image = itkutils.convert_vtk_to_itk_image(dataset, itkTypes.F) itk_image_type = type(itk_image) self.progress.value = STEP_PCT[1] self.progress.message = "Running filter" DiffusionFilterType = \ itk.GradientAnisotropicDiffusionImageFilter[itk_image_type, itk_image_type] diffusion_filter = DiffusionFilterType.New() diffusion_filter.SetConductanceParameter(conductance) diffusion_filter.SetNumberOfIterations(iterations) diffusion_filter.SetTimeStep(timestep) diffusion_filter.SetInput(itk_image) itkutils.observe_filter_progress(self, diffusion_filter, STEP_PCT[1], STEP_PCT[2]) try: diffusion_filter.Update() except RuntimeError: return self.progress.message = "Saving results" itkutils.set_array_from_itk_image(dataset, diffusion_filter.GetOutput()) self.progress.value = STEP_PCT[3] except Exception as exc: print("Problem encountered while running %s" % self.__class__.__name__) raise exc
def transform_scalars(self, dataset, stencil_radius=2, iterations=10, threshold=50.0): """This filter smooths a binary image by evolving a level set with a curvature-based speed function. The Stencil Radius determines the scale of the noise to remove. The Threshold determines the iso-contour brightness to discriminate between two pixel classes. """ # Initial progress self.progress.value = 0 self.progress.maximum = 100 # Approximate percentage of work completed after each step in the # transform STEP_PCT = [10, 20, 30, 70, 80, 90, 100] try: import itk import itkTypes from tomviz import itkutils except Exception as exc: print("Could not import necessary module(s)") raise exc # Return values returnValue = None # Add a try/except around the ITK portion. ITK exceptions are # passed up to the Python layer, so we can at least report what # went wrong with the script, e.g,, unsupported image type. try: self.progress.value = STEP_PCT[0] self.progress.message = "Converting data to ITK image" # Get the ITK image itk_image = itkutils.convert_vtk_to_itk_image(dataset) itk_input_image_type = type(itk_image) self.progress.message = "Casting input to float type" itk_filter_image_type = itk.Image[itkTypes.F, itk_image.GetImageDimension()] caster = itk.CastImageFilter[itk_input_image_type, itk_filter_image_type].New() caster.SetInput(itk_image) itkutils.observe_filter_progress(self, caster, STEP_PCT[1], STEP_PCT[2]) try: caster.Update() except RuntimeError: return self.progress.message = "Running filter" smoothing_filter = itk.BinaryMinMaxCurvatureFlowImageFilter[ itk_filter_image_type, itk_filter_image_type].New() smoothing_filter.SetThreshold(threshold) smoothing_filter.SetStencilRadius(stencil_radius) smoothing_filter.SetNumberOfIterations(iterations) smoothing_filter.SetTimeStep(0.0625) smoothing_filter.SetInput(caster) itkutils.observe_filter_progress(self, smoothing_filter, STEP_PCT[2], STEP_PCT[3]) try: smoothing_filter.Update() except RuntimeError: return itk_image_data = smoothing_filter.GetOutput() # Cast output to the input type self.progress.message = "Casting output to input type" caster = itk.CastImageFilter[itk_filter_image_type, itk_input_image_type].New() caster.SetInput(itk_image_data) itkutils.observe_filter_progress(self, caster, STEP_PCT[3], STEP_PCT[4]) try: caster.Update() except RuntimeError: return itk_image_data = caster.GetOutput() self.progress.value = STEP_PCT[5] self.progress.message = "Saving results" itkutils.set_array_from_itk_image(dataset, itk_image_data) self.progress.value = STEP_PCT[6] except Exception as exc: print("Problem encountered while running %s" % self.__class__.__name__) raise exc return returnValue
def transform_scalars(self, dataset, minimum_radius=0.5, maximum_radius=6.): """Segment pores. The pore size must be greater than the minimum radius and less than the maximum radius. Pores will be separated according to the minimum radius.""" # Initial progress self.progress.value = 0 self.progress.maximum = 100 # Approximate percentage of work completed after each step in the # transform step_pct = iter([5, 5, 5, 5, 5, 5, 5, 5, 5, 30, 10, 5, 5, 5]) try: import itk from vtkmodules.vtkCommonDataModel import vtkImageData from tomviz import itkutils from tomviz import utils import numpy as np except Exception as exc: print("Could not import necessary module(s)") raise exc # Return values returnValues = None # Add a try/except around the ITK portion. ITK exceptions are # passed up to the Python layer, so we can at least report what # went wrong with the script, e.g,, unsupported image type. try: self.progress.message = "Converting data to ITK image" # Get the ITK image itk_input_image = itkutils.convert_vtk_to_itk_image(dataset) self.progress.value = next(step_pct) # Reduce noise smoothed = median_filter(self, step_pct, itk_input_image) # Enhance pore contrast enhanced = unsharp_mask(self, step_pct, smoothed) thresholded = threshold(self, step_pct, enhanced) dimension = itk_input_image.GetImageDimension() spacing = itk_input_image.GetSpacing() closing_radius = itk.Size[dimension]() closing_radius.Fill(1) for dim in range(dimension): radius = int(np.round(maximum_radius / spacing[dim])) if radius > closing_radius[dim]: closing_radius[dim] = radius StructuringElementType = itk.FlatStructuringElement[dimension] structuring_element = \ StructuringElementType.Ball(closing_radius) particle_mask = morphological_closing(self, step_pct, thresholded, structuring_element) encapsulated = encapsulate(self, step_pct, thresholded, particle_mask, structuring_element) distance = get_distance(self, step_pct, encapsulated) segmented = watershed(self, step_pct, distance, minimum_radius) inverted = invert(self, step_pct, thresholded) segmented.DisconnectPipeline() inverted.DisconnectPipeline() separated = apply_mask(self, step_pct, segmented, inverted) separated.DisconnectPipeline() particle_mask.DisconnectPipeline() in_particles = apply_mask(self, step_pct, separated, particle_mask) opening_radius = itk.Size[dimension]() opening_radius.Fill(1) for dim in range(dimension): radius = int(np.round(minimum_radius / spacing[dim])) if radius > opening_radius[dim]: opening_radius[dim] = radius structuring_element = \ StructuringElementType.Ball(opening_radius) opened = opening_by_reconstruction(self, step_pct, in_particles, structuring_element) self.progress.message = "Saving results" label_buffer = itk.PyBuffer[type(opened)] \ .GetArrayFromImage(opened) # temp label_buffer = label_buffer.copy() label_map_dataset = vtkImageData() label_map_dataset.CopyStructure(dataset) utils.set_array(label_map_dataset, label_buffer, isFortran=False) # Set up dictionary to return operator results returnValues = {} returnValues["label_map"] = label_map_dataset except Exception as exc: print("Problem encountered while running %s" % self.__class__.__name__) raise exc return returnValues
def transform_scalars(self, dataset, conductance=1.0, iterations=100, timestep=0.0625): """This filter performs anisotropic diffusion on an image using the classic Perona-Malik, gradient magnitude-based equation. """ # Initial progress self.progress.value = 0 self.progress.maximum = 100 # Approximate percentage of work completed after each step in the # transform STEP_PCT = [10, 20, 90, 100] try: import itk import itkTypes from tomviz import itkutils except Exception as exc: print("Could not import necessary module(s)") raise exc try: self.progress.value = STEP_PCT[0] self.progress.message = "Converting data to ITK image" # Get the ITK image. The itk.GradientAnisotropicDiffusionImageFilter # is templated over float pixel types only, so explicitly request a # float ITK image type. itk_image = itkutils.convert_vtk_to_itk_image(dataset, itkTypes.F) itk_image_type = type(itk_image) self.progress.value = STEP_PCT[1] self.progress.message = "Running filter" DiffusionFilterType = \ itk.GradientAnisotropicDiffusionImageFilter[itk_image_type, itk_image_type] diffusion_filter = DiffusionFilterType.New() diffusion_filter.SetConductanceParameter(conductance) diffusion_filter.SetNumberOfIterations(iterations) diffusion_filter.SetTimeStep(timestep) diffusion_filter.SetInput(itk_image) itkutils.observe_filter_progress(self, diffusion_filter, STEP_PCT[1], STEP_PCT[2]) try: diffusion_filter.Update() except RuntimeError: return self.progress.message = "Saving results" itkutils.set_array_from_itk_image(dataset, diffusion_filter.GetOutput()) self.progress.value = STEP_PCT[3] except Exception as exc: print("Problem encountered while running %s" % self.__class__.__name__) raise exc
def transform_scalars(self, dataset, structuring_element_id=0, radius=1, object_label=1, background_label=0): """Perform morphological opening on segmented objects with a given label by a spherically symmetric structuring element with a given radius. """ # Initial progress self.progress.value = 0 self.progress.maximum = 100 # Approximate percentage of work completed after each step in the # transform STEP_PCT = [10, 30, 60, 90, 100] try: import itk from tomviz import itkutils except Exception as exc: print("Could not import necessary module(s)") raise exc # Add a try/except around the ITK portion. ITK exceptions are # passed up to the Python layer, so we can at least report what # went wrong with the script, e.g,, unsupported image type. try: self.progress.value = STEP_PCT[0] self.progress.message = "Converting data to ITK image" # Get the ITK image itk_image = itkutils.convert_vtk_to_itk_image(dataset) itk_input_image_type = type(itk_image) itk_kernel_type = itk.FlatStructuringElement[3] if (structuring_element_id == 0): itk_kernel = itk_kernel_type.Box(radius) elif (structuring_element_id == 1): itk_kernel = itk_kernel_type.Ball(radius) elif (structuring_element_id == 2): itk_kernel = itk_kernel_type.Cross(radius) else: raise Exception('Invalid kernel shape id %d' % structuring_element_id) self.progress.value = STEP_PCT[1] self.progress.message = "Running filter" erode_filter = itk.BinaryErodeImageFilter[itk_input_image_type, itk_input_image_type, itk_kernel_type].New() erode_filter.SetErodeValue(object_label) erode_filter.SetBackgroundValue(background_label) erode_filter.SetKernel(itk_kernel) erode_filter.SetInput(itk_image) itkutils.observe_filter_progress(self, erode_filter, STEP_PCT[1], STEP_PCT[2]) dilate_filter = itk.BinaryDilateImageFilter[itk_input_image_type, itk_input_image_type, itk_kernel_type].New() dilate_filter.SetDilateValue(object_label) dilate_filter.SetBackgroundValue(background_label) dilate_filter.SetKernel(itk_kernel) dilate_filter.SetInput(erode_filter.GetOutput()) itkutils.observe_filter_progress(self, dilate_filter, STEP_PCT[2], STEP_PCT[3]) try: dilate_filter.Update() except RuntimeError: return self.progress.message = "Saving results" itkutils.set_array_from_itk_image(dataset, dilate_filter.GetOutput()) self.progress.value = STEP_PCT[4] except Exception as exc: print("Problem encountered while running %s" % self.__class__.__name__) raise exc