def _validate_options(self):
        super(self.__class__, self)._validate_options()

        # This is to validate potentially all input data,
        # Unfortunately, some of the parameters are defined
        # after this validation step and cannot be validated
        # here. They will be checked individually later.

        assert self.options.input_image, \
            self._logger.error(r("Moving image is required, Please provide \
            the `--input-image` parameter."))

        assert self.options.output_image, \
            self._logger.error(r("Output image is required, Please provide \
            the `--output-image` parameter."))

        assert self.options.reference_image, \
            self._logger.error(r("Reference image is required. Please provide \
            the `--reference-image` parameter"))

        assert self.options.coregistration_affine, \
            self._logger.error(r("Affine, 3D transform is not provided. Please \
            supply the `--coregistration-affine` parameter"))

        assert self.options.coregistration_deformable_forward and \
               self.options.coregistration_deformable_inverse ,\
            self._logger.error(r("Both, forward and inverse 3D warps are \
            required. One or both warp fields are not provided. Please \
            supply both, `--coregistration-deformable-forward` and \
            `--coregistration-deformable-inverse` parameters."))

        assert self.options.section_deformable_fwd_template and \
                self.options.section_deformable_inv_template ,\
            self._logger.error(r("Both forward and inverse 2D warps are \
            required. One or both warp field filename templates are \
            missing. Please provide `--section-deformable-fwd-template` \
            and `--section-deformable-inv-template`."))
Beispiel #2
0
def calculate_labels_midpoints(itk_image):
    """
    This function introduces a workflow for calculating the middle midpoints of
    the labelled imags. The term 'middle midpoints' is used on purpose. You might
    think that we're calculating centroids here, but not. I use the term
    'middle midpoints' as it is not the centroids what is calculated here.

    Anyway, this function calculated middle midpoints of labels in the provided image.
    The midpoints are calculated in the following way:

    Now iterate over all available labels except the background label which
    has been removed. The overall idea of this loop is to:
    1) Extract given label from the segmentation
    2) Extract the largest patch of the segmentation as there
       might be multiple disjoint regions colored with given label
    3) Apply the distance transform to the largest path with
       given segmentation
    4) Pick the maximum of the distance transform for given segmentation
       and by this define the 'middle point' of given label.

    .. note :: Please have in ming that this procedure returns position of the
    first (index-wise) voxel with the maimum value. This means that if there is
    more than one pixels with the maximum value of the distance transform,
    location of the first one is returned. One could think that probably a
    centre of mass of the max voxels should be returned, but no. It is unknown
    is such centre would be located in the actual structure or outside the
    structure. Therefore some of the results may look wired but they are
    actually ok.

    :param itk_image: Labelled image, the image is expected to be a labelled
                      image in which individual discrete values correspond
                      to individual structures. Formally this means that
                      the image has to be of `uchar` or `ushort` type,
                      to have a single component and to have
                      a dimensionality of two or three. Images having
                      different properties will not be processed.
    :type itk_image: `itk.Image`

    :return: Middle midpoints of the labels in the image.
    :rtype: {int: ((float, float, float), (float, float, float)), ...}

    And now it it a time to do some unit testing. Please also consited this set
    of unittests as an example how to use this function.

    >>> import base64
    >>> from possum import pos_itk_transforms
    >>> example_two_dimensions='H4sIAAAAAAAAA4thZCACFDEwMWgAISMcogImBg44u8EegdHBBmdUPosTNtvCizJLSlLzFJIqFQIq/TzTQjwVylKLijPz8xQM9IwMDA0MzAzM9QyJcfiAgTxtdPcxwgETHDDDwag6+qjjggNuOOCBA144GFVHH3UicCAKB2JwIA4Ho+roo04ODuThQAEOFOFgVB191AEAXtGveKAHAAA='

    >>> input_filename="/tmp/pos_itk_centroids_example_two_dimensions.nii.gz"
    >>> open(input_filename, "w").write(base64.decodestring(example_two_dimensions))
    >>> itk_image = pos_itk_transforms.read_itk_image(input_filename)
    >>> midpoints = calculate_labels_midpoints(itk_image)

    >>> sorted(midpoints.keys()) == [1, 2, 3, 10, 11, 12, 13, 20, 21, 22, 23, 30, 31, 32, 33]
    True

    >>> map(int, midpoints[1][0]) == [14, 0, 0]
    True

    >>> map(int, midpoints[21][0]) == [14, 24, 0]
    True

    >>> midpoints[30] == ((0.0, 39.0, 0), (0, 39, 0))
    True

    >>> type(midpoints[30][1][1]) == type(1)
    True

    >>> type(midpoints[30][0][1]) == type(1)
    False

    >>> type(midpoints[30][0][1]) == type(1.0)
    True

    >>> os.remove(input_filename)


    Now we will try to process a 3D image

    >>> example_three_dimensions="H4sIAAAAAAAAA+3PPUtCURzH8XN1iUposNzqNAty1MqlchCqu4Rp0NIUXsPlWjcJby3Rw1tI23rYeoCGrLfnF4nEWvxPDfd8DhfOF+5wfvuOGkOg4mpmcJzvMyqmJn7uF8Xh99t7abQLpb//KLUXNFotz9cHoS6H225919WnXnDSaPraZHIma8yKKWSy4zz83/jp4fscxBCHtCcxhWkkIO0kZjGHFKQ9jwVoLELadr/dH+X9OeSxhGVIexVrWEcR0t7AJrbgQtpl7KCCKqRt99v9Ud5fg4c6DiFtH00c4RjSbiPEGc4h7Utc4Ro3kLbdb/dHef8tOujiDtK+xwMe8QRpP+MFr3iDtD/Qwye+IG273+6P8v4+5Jgfs2ARAAA="

    >>> input_filename="/tmp/pos_itk_centroids_example_three_dimensions.nii.gz"
    >>> open(input_filename, "w").write(base64.decodestring(example_three_dimensions))
    >>> itk_image = pos_itk_transforms.read_itk_image(input_filename)
    >>> midpoints = calculate_labels_midpoints(itk_image)
    >>> os.remove(input_filename)

    >>> str(type(midpoints)) == "<type 'dict'>"
    True

    >>> len(midpoints.keys()) == 63
    True

    >>> str(midpoints.get(0,None)) == "None"
    True

    >>> midpoints[1] == ((5.0, 0.0, 0.0), (5, 0, 0))
    True

    >>> type(midpoints[30][0][1]) == type(1)
    False

    >>> type(midpoints[30][0][1]) == type(1)
    False

    >>> type(midpoints[30][0][1]) == type(1.0)
    True

    >>> midpoints[183] == ((15.0, 15.0, 15.0), (15, 15, 15))
    True

    >>> midpoints[111] == ((5.0, 5.0, 9.0), (5, 5, 9))
    True

    >>> midpoints[53] == ((13.0, 0.0, 5.0), (13, 0, 5))
    True
    """

    C_BACKGROUND_LABEL_IDX = 0
    # Define the dimensionality, data type and number of components
    # of the label image
    label_type = \
        pos_itk_core.io_image_type_to_component_string_name[
            itk_image.__class__]

    # Extract the details of the image provided and check if they are
    # ok to use in the routine.

    n_dim = len(itk_image.GetLargestPossibleRegion().GetSize())
    number_of_components = itk_image.GetNumberOfComponentsPerPixel()
    data_type = label_type[1]

    assert n_dim in [2, 3], \
        "Incorrect dimensionality."

    assert number_of_components == 1, \
        "Only single component images are allowed."

    assert data_type in ["unsigned_char", "unsigned_short"], \
        r("Incorrect data type for a labelled image only unsigned_char\
          and unsigned_short are accepted.")

    # t_label_img is the ITK image type class to be used in filters
    # templates.
    t_label_img = itk_image.__class__

    # We'll be also using another image type. This one is identical
    # in terms of size and dimensionality as the labelled image.
    # The differe is in data type: this one has to be float to handle
    # the distance transform well.
    float_type = list(label_type)
    float_type[1] = "float"
    t_float_img = \
        pos_itk_core.io_component_string_name_to_image_type[tuple(float_type)]

    # The purpose of the filter below is to define the unique labels
    # given segmentation contains.
    unique_labels = \
        itk.LabelGeometryImageFilter[(t_label_img, t_label_img)].New()
    unique_labels.SetInput(itk_image)
    unique_labels.CalculatePixelIndicesOff()
    unique_labels.Update()

    # This is where we'll collect the results. We collect, both, the physical
    # location as well as the
    middle_points = {}

    # We have to map the available labels returned by itk
    # as sometimes strange things happen and they are returned as longints
    # which are apparently incomparibile with python in type.
    # Consider it a safety precaution
    available_labels = map(int, unique_labels.GetLabels())

    # Now we need to remove the background label (if such
    # label actually exists)
    C_BACKGROUND_LABEL_IDX
    try:
        available_labels.remove(C_BACKGROUND_LABEL_IDX)
    except:
        pass

    # Now iterate over all available labels except the background label which
    # has been removed. The overall idea of this loop is to:
    # 1) Extract given label from the segmentation
    # 2) Extract the largest patch of the segmentation as there
    #    might be multiple disjoint regions colored with given label
    # 3) Apply the distance transform to the largest path with
    #    given segmentation
    # 4) Pick the maximum of the distance transform for given segmentation
    #    and by this define the 'middle point' of given label
    # I call the midpoints 'middle midpoints' not centroids as centroids
    # are something different and they are calculated in a different
    # way. Our center midpoints cannot be called centroids.
    for label_idx in available_labels:
        extract_label = \
            itk.BinaryThresholdImageFilter[
                (t_label_img, t_label_img)].New()
        extract_label.SetInput(itk_image)
        extract_label.SetUpperThreshold(label_idx)
        extract_label.SetLowerThreshold(label_idx)
        extract_label.SetOutsideValue(0)
        extract_label.SetInsideValue(1)
        extract_label.Update()

        patches = \
            itk.ConnectedComponentImageFilter[
                (t_label_img, t_label_img)].New()
        patches.SetInput(extract_label.GetOutput())
        patches.Update()

        largest_patch = \
            itk.LabelShapeKeepNObjectsImageFilter[t_label_img].New()
        largest_patch.SetInput(patches.GetOutput())
        largest_patch.SetBackgroundValue(0)
        largest_patch.SetNumberOfObjects(1)
        largest_patch.SetAttribute(100)
        largest_patch.Update()

        distance_transform = \
            itk.SignedMaurerDistanceMapImageFilter[
                (t_label_img, t_float_img)].New()
        distance_transform.SetInput(largest_patch.GetOutput())
        distance_transform.InsideIsPositiveOn()
        distance_transform.Update()

        centroid = itk.MinimumMaximumImageCalculator[t_float_img].New()
        centroid.SetImage(distance_transform.GetOutput())
        centroid.Compute()
        centroid.GetIndexOfMaximum()

        index = centroid.GetIndexOfMaximum()
        point = itk_image.TransformIndexToPhysicalPoint(index)

        # We need to slightly refine the results returned by itk
        # The results have to be processed in a slightly different way for
        # two dimensional results and slightly different for 3D resuls:
        # Again, we do a lot of explicit casting assure types
        # compatibility. The 2D midpoints are converted into 3D midpoints since
        # it is easier to use them in vtk if they're 3D midpoints.
        if n_dim == 2:
            point = map(float, point) + [0]
            index = map(int, index) + [0]
        if n_dim == 3:
            point = map(float, point)
            index = map(int, index)

        middle_points[label_idx] = (tuple(point), tuple(index))

    # Below there is some debugging code. Not really important for everyday
    # use.
    # print middle_points.__repr__()

    return middle_points
    def parseArgs():
        usage_string = "%\nprog -i FILE -r FILE -o FILE \n\
            --coregistration-affine FILE \n\
            --coregistration-deformable-forward FILE \n\
            --coregistration-deformable-inverse FILE \n\
            --section-affine-template FILE \n\
            --section-deformable-fwd-template FILE \n\
            --section-deformable-inv-template FILE \n\
            [other options]"

        parser = \
            enclosed._getCommandLineParser()
        parser.set_description(r("""The main purpose of the %prog workflow is to \
            map any kind of spatial imaging data defined in the space of \
            the experimental image into the space of the reference image \
            and the other way around: to map spatially defined imaging data \
            from the reference space into the space (coordinate system) of the \
            raw (experimental) image stack. As this kind of mapping involves \
            a lot of transformations, this script may look a bit complicated. \
            Just follow the examples from the docstring of the class and \
            its all gonna be fine. So stop reading this description and go \
            check the DOCSTRING of this class."""))
        parser.set_usage(usage_string)

        parser.add_option('--input-image', '-i', dest='input_image',
            type='str', default=None, metavar="FILE",
            help=r("Input image of the mapping. \
            This alway the 'experimental image' which is an image with the \
            experimental data. This is never the reference image - the atlas image.\
            Anyway, this image is anything located in the experimental image \
            stack space."))
        parser.add_option('--reference-image', '-r', dest='reference_image',
            type='str', default=None, metavar="FILE",
            help=r("Reference image of the mapping \
            or any image data located in the reference (atlas) space. Usually \
            this will be indeed the reference atlas itself."))
        parser.add_option('--output-image', '-o', dest='output_image',
            type='str', default=None, metavar="FILE",
            help=r("The output image filename. \
            This is where the result of the computations will be saved to. \
            The type of the output image is the same as the type of the \
            input image."))
        parser.add_option('--slicing-axis', '-s',
            dest='slicing_axis', type='int', default=1, metavar="INT",
            help=r('Index of the slicing axis: 0, 1 or 2. Default is 1. \
            Zero corresponds to sagittal plane, One corresponds to \
            coronal plane while two represents the horizontal plane.\
            This works only when the images follow the RAS orientation.'))
        parser.add_option('--direction', dest='direction',
            default='from_atlas_to_raw', type="choice",
            choices=C_ALLOWED_DIRECTIONS, metavar="DIRECTION",
            help=r("Direction in which the mapping will be conducted. \
            There are two directions allowed. The 'from_raw_to_atlas' \
            value will map the data from the raw (experimental) stack \
            space into the reference space (the atlas spce). \
            The 'from_atlas_to_raw' will use data located in the atlas \
            space into the space of the input image stack."))
        parser.add_option('--offset', dest='offset',
            default=0, type='int', metavar="INT",
            help=r('Index of the first section. Defaults to 0, but \
            will be usually 1. Use this if numberinf of your sections starts \
            with number other that zero. Please, to not use negative \
            numbers. This will not work.'))
        parser.add_option('--interpolation', dest='interpolation',
            default=1, type='int', metavar="INT",
            help=r('Define interpolation type used for reslicing the images. \
            Three options are allowed at the moment: 0 for NN interpolation, \
            1 for linear interpolation and 2 for order 3 BSpline \
            interpolation.'))
        parser.add_option('--coregistration-affine',
            dest='coregistration_affine', type='str', default=None,
            metavar="FILE",
            help=r("Affine transformation from the deformable reconstruction \
            3D image to the reference image (i.e. the atlas image).\
            This is an obligatory parameter."))
        parser.add_option('--coregistration-deformable-forward',
            dest='coregistration_deformable_forward', type='str',
            metavar="FILE",
            default=None, help=r("Forward deformable warp which maps the \
            3D deformable reconstruction image to the reference image \
            space. This is an obligatory parameter."))
        parser.add_option('--coregistration-deformable-inverse',
            dest='coregistration_deformable_inverse', type='str',
            metavar="FILE",
            default=None, help=r("Inverse deformable warp which maps the \
            3D deformable reconstruction image to the reference image \
            space. This is an obligatory parameter."))
        parser.add_option('--section-affine-template',
            dest='section_affine_template', type='str', default=None,
            metavar="FILE",
            help=r("Filename template for affine transformation from raw stack \
            to affine reconstruction. One should provide values like this: \
            'affine_\%04d.txt'."))
        parser.add_option('--section-deformable-fwd-template',
            dest='section_deformable_fwd_template', type='str', default=None,
            metavar="FILE",
            help=r("Filename template for forward warps from the affine \
            reconstruction to deformable reconstruction. The values should be \
            provided like this: 'section_\%04d_Warp.nii.gz'."))
        parser.add_option('--section-deformable-inv-template',
            dest='section_deformable_inv_template', type='str', default=None,
            metavar="FILE",
            help=r("Filename template for inverse warps from affine \
            reconstruction to deformable reconstruction. Therefore, in fact \
            they are warps from deformably reconstructed sections into the \
            affine recosntruction spcae. The values should be like this: \
            'section_\%04d_Warp.nii.gz'."))

        (options, args) = parser.parse_args()
        return (options, args)
    def use_multicomponent_workflow(self):
        """
        This method executes a multichannel workflow. This part of the mapping
        workflow is a bit more complicated. In this workflow, the multichannel
        / multicomponent image is siplit into individual components and each of
        the components is processed separately. At the end of the processing,
        all individual channels are merged back into a multicomponent image and
        returned.
        """

        self._logger.debug("Entering multichannel workflow.")

        # Ok, this is a bit complicated. What do we do here is that we
        # determine the datatype of a single component. This is not easy as ITK
        # is quite inconsistent when it comes to handling vector and RGB
        # images. Basically what is going one here is:
        # 1) RGB images => ('scalar', 'unsigned_char', 3)
        # 2) Vector images =>
        #    ('scalar', type_derived_from_the_actual_image, 3)
        # So below we determine data type of a single component according to
        # these rules.

        image_type = \
            pos_itk_core.io_image_type_to_component_string_name[self._moving_type]
        if image_type[0] == 'rgb' and image_type[1] == 'unsigned_char':
            image_type_tuple = ('scalar', 'unsigned_char', 3)
        if image_type[0] == 'vector':
            image_type_tuple = ('scalar', image_type[1], 3)

        # Define a single component image type
        component_type = \
            pos_itk_core.io_component_string_name_to_image_type[image_type_tuple]

        # We will collect the consecutive processed components
        # into this array
        processed_components = []

        # Extract the component `i` from the composite image,
        # and process each component independently.
        for i in range(self._numbers_of_components):

            self._logger.debug("Starting processing channel %d of %d.",
                i + 1, self._numbers_of_components)

            extract_filter = \
                itk.VectorIndexSelectionCastImageFilter[
                self._moving_type, component_type].New()
            extract_filter.SetIndex(i)
            extract_filter.SetInput(self._moving_image)

            # Cast the image to an intermediate data type fo the
            # purposes of processing.
            cast_filter = \
                itk.CastImageFilter[
                    (component_type, self._processing_type)].New()
            cast_filter.SetInput(extract_filter)
            cast_filter.Update()

            # Process the component.
            processed_component = \
                self.process_single_component(cast_filter.GetOutput())

            # And then, cast the processed component into the data
            # type of the initial image.
            writer_cast_filter = \
                itk.CastImageFilter[
                    (self._processing_type, component_type)].New()
            writer_cast_filter.SetInput(processed_component)
            writer_cast_filter.Update()

            # Store the processed component in an array.
            processed_components.append(writer_cast_filter.GetOutput())

            self._logger.debug("Finished processing channel %d of %d.",
                i + 1, self._numbers_of_components)

        # After iterating over all channels, compose the individual
        # components back into multichannel image.
        self._logger.info(r("Composing processed channels back \
            into multichannel image."))

        # Compose individual components back into multicomponent image.  In
        # theory arbitrary number of components is supported but it has to be
        # more than one :)
        compose = \
            itk.ComposeImageFilter[component_type, self._moving_type].New()

        for i in range(self._numbers_of_components):
            compose.SetInput(i, processed_components[i])
        compose.Update()

        return compose.GetOutput()