Exemple #1
0
    def execute(self, image: sitk.Image, params: MultiModalRegistrationParams=None) -> sitk.Image:
        """Executes a multi-modal rigid registration.

        Args:
            image (sitk.Image): The moving image.
            params (MultiModalRegistrationParams): The parameters, which contain the fixed image.

        Returns:
            sitk.Image: The registered image.
        """

        if params is None:
            raise ValueError("params is not defined")
        dimension = image.GetDimension()
        if dimension not in (2, 3):
            raise ValueError('Image dimension {} is not among the accepted (2, 3)'.format(dimension))

        # set a transform that is applied to the moving image to initialize the registration
        if self.registration_type == RegistrationType.BSPLINE:
            transform_domain_mesh_size = [10] * image.GetDimension()
            initial_transform = sitk.BSplineTransformInitializer(params.fixed_image, transform_domain_mesh_size)
        else:
            if self.registration_type == RegistrationType.RIGID:
                transform_type = sitk.VersorRigid3DTransform() if dimension == 3 else sitk.Euler2DTransform()
            elif self.registration_type == RegistrationType.AFFINE:
                transform_type = sitk.AffineTransform(dimension)
            elif self.registration_type == RegistrationType.SIMILARITY:
                transform_type = sitk.Similarity3DTransform() if dimension == 3 else sitk.Similarity2DTransform()
            else:
                raise ValueError('not supported registration_type')

            initial_transform = sitk.CenteredTransformInitializer(sitk.Cast(params.fixed_image,
                                                                            image.GetPixelIDValue()),
                                                                  image,
                                                                  transform_type,
                                                                  sitk.CenteredTransformInitializerFilter.GEOMETRY)

        self.registration.SetInitialTransform(initial_transform, inPlace=True)

        if params.fixed_image_mask:
            self.registration.SetMetricFixedMask(params.fixed_image_mask)

        if params.callbacks is not None:
            for callback in params.callbacks:
                callback.set_params(self.registration, params.fixed_image, image, initial_transform)

        self.transform = self.registration.Execute(sitk.Cast(params.fixed_image, sitk.sitkFloat32),
                                                   sitk.Cast(image, sitk.sitkFloat32))

        if self.verbose:
            print('MultiModalRegistration:\n Final metric value: {0}'.format(self.registration.GetMetricValue()))
            print(' Optimizer\'s stopping condition, {0}'.format(
                self.registration.GetOptimizerStopConditionDescription()))
        elif self.number_of_iterations == self.registration.GetOptimizerIteration():
            print('MultiModalRegistration: Optimizer terminated at number of iterations and did not converge!')

        return sitk.Resample(image, params.fixed_image, self.transform, self.resampling_interpolator, 0.0,
                             image.GetPixelIDValue())
Exemple #2
0
def assert_sitk_img_equivalence(img: SimpleITK.Image,
                                img_ref: SimpleITK.Image):
    assert img.GetDimension() == img_ref.GetDimension()
    assert img.GetSize() == img_ref.GetSize()
    assert img.GetOrigin() == img_ref.GetOrigin()
    assert img.GetSpacing() == img_ref.GetSpacing()
    assert (img.GetNumberOfComponentsPerPixel() ==
            img_ref.GetNumberOfComponentsPerPixel())
    assert img.GetPixelIDValue() == img_ref.GetPixelIDValue()
    assert img.GetPixelIDTypeAsString() == img_ref.GetPixelIDTypeAsString()
Exemple #3
0
    def execute(self,
                image: sitk.Image,
                params: SizeCorrectionParams = None) -> sitk.Image:
        """Executes the shape/size correction by padding or cropping.

        Args:
            image (sitk.Image): The image to filter.
            params (SizeCorrectionParams): The filter parameters containing the reference (target) shape.

        Returns:
            sitk.Image: The filtered image.
        """
        if params is None:
            raise ValueError('ShapeParams argument is missing')
        if image.GetDimension() != params.dims:
            raise ValueError(
                'image dimension {} is not compatible with reference shape dimension {}'
                .format(image.GetDimension(), params.dims))

        image_shape = image.GetSize()
        crop = [params.dims * [0], params.dims * [0]]
        pad = [params.dims * [0], params.dims * [0]]
        for dim in range(params.dims):
            ref_size = params.reference_shape[dim]
            dim_size = image_shape[dim]
            if dim_size > ref_size:
                if self.two_sided:
                    crop[0][dim] = (dim_size - ref_size) // 2
                    crop[1][dim] = (dim_size - ref_size) // 2 + (
                        (dim_size - ref_size) % 2)
                else:
                    crop[0][dim] = (dim_size - ref_size)
            elif dim_size < ref_size:
                if self.two_sided:
                    pad[0][dim] = (ref_size - dim_size) // 2
                    pad[1][dim] = (ref_size - dim_size) // 2 + (
                        (ref_size - dim_size) % 2)
                else:
                    pad[0][dim] = (ref_size - dim_size)

        crop_needed = any(any(c) for c in crop)
        if crop_needed:
            image = sitk.Crop(image, crop[0], crop[1])

        pad_needed = any(any(p) for p in pad)
        if pad_needed:
            image = sitk.ConstantPad(image, pad[0], pad[1], self.pad_constant)

        return image
Exemple #4
0
def sitk_to_nib(
    image: sitk.Image,
    keepdim: bool = False,
) -> Tuple[np.ndarray, np.ndarray]:
    data = sitk.GetArrayFromImage(image).transpose()
    num_components = image.GetNumberOfComponentsPerPixel()
    if num_components == 1:
        data = data[np.newaxis]  # add channels dimension
    input_spatial_dims = image.GetDimension()
    if not keepdim:
        data = ensure_4d(data, False, num_spatial_dims=input_spatial_dims)
    assert data.shape[0] == num_components
    assert data.shape[-input_spatial_dims:] == image.GetSize()
    spacing = np.array(image.GetSpacing())
    direction = np.array(image.GetDirection())
    origin = image.GetOrigin()
    if len(direction) == 9:
        rotation = direction.reshape(3, 3)
    elif len(direction) == 4:  # ignore first dimension if 2D (1, 1, H, W)
        rotation_2d = direction.reshape(2, 2)
        rotation = np.eye(3)
        rotation[1:3, 1:3] = rotation_2d
        spacing = 1, *spacing
        origin = 0, *origin
    rotation = np.dot(FLIP_XY, rotation)
    rotation_zoom = rotation * spacing
    translation = np.dot(FLIP_XY, origin)
    affine = np.eye(4)
    affine[:3, :3] = rotation_zoom
    affine[:3, 3] = translation
    return data, affine
Exemple #5
0
def elasticdeform(inpimg: sitk.Image, deformation_sigma: float) -> sitk.Image:

    num_control_points = 10
    interpolator = sitk.sitkNearestNeighbor
    spatial_rank = 2
    fill_value = 0.0

    # initialize B-spline transformation
    transform_mesh_size = [num_control_points] * inpimg.GetDimension()
    bspline_transformation = sitk.BSplineTransformInitializer(
        inpimg, transform_mesh_size)
    params = bspline_transformation.GetParameters()
    params = np.asarray(params, dtype=np.float)
    params += np.random.randn(params.shape[0]) * deformation_sigma

    params = tuple(params)
    bspline_transformation.SetParameters(tuple(params))

    resampler = sitk.ResampleImageFilter()
    resampler.SetReferenceImage(inpimg)
    resampler.SetInterpolator(interpolator)
    resampler.SetDefaultPixelValue(fill_value)
    resampler.SetTransform(bspline_transformation)

    img_deformed = resampler.Execute(inpimg)
    img_deformed.CopyInformation(inpimg)

    return img_deformed
Exemple #6
0
    def assert_img_properties(img: SimpleITK.Image,
                              internal_image: SimpleITKImage):
        color_space = {
            1: ColorSpace.GRAY,
            3: ColorSpace.RGB,
            4: ColorSpace.RGBA,
        }

        assert internal_image.color_space == color_space.get(
            img.GetNumberOfComponentsPerPixel())
        if img.GetDimension() == 4:
            assert internal_image.timepoints == img.GetSize()[-1]
        else:
            assert internal_image.timepoints is None
        if img.GetDepth():
            assert internal_image.depth == img.GetDepth()
            assert internal_image.voxel_depth_mm == img.GetSpacing()[2]
        else:
            assert internal_image.depth is None
            assert internal_image.voxel_depth_mm is None

        assert internal_image.width == img.GetWidth()
        assert internal_image.height == img.GetHeight()
        assert internal_image.voxel_width_mm == approx(img.GetSpacing()[0])
        assert internal_image.voxel_height_mm == approx(img.GetSpacing()[1])
Exemple #7
0
def sitk_to_nib(
    image: sitk.Image,
    keepdim: bool = False,
) -> Tuple[np.ndarray, np.ndarray]:
    data = sitk.GetArrayFromImage(image).transpose()
    num_components = image.GetNumberOfComponentsPerPixel()
    if num_components == 1:
        data = data[np.newaxis]  # add channels dimension
    input_spatial_dims = image.GetDimension()
    if input_spatial_dims == 2:
        data = data[..., np.newaxis]
    if not keepdim:
        data = ensure_4d(data, num_spatial_dims=input_spatial_dims)
    assert data.shape[0] == num_components
    assert data.shape[1:1 + input_spatial_dims] == image.GetSize()
    spacing = np.array(image.GetSpacing())
    direction = np.array(image.GetDirection())
    origin = image.GetOrigin()
    if len(direction) == 9:
        rotation = direction.reshape(3, 3)
    elif len(direction) == 4:  # ignore first dimension if 2D (1, W, H, 1)
        rotation_2d = direction.reshape(2, 2)
        rotation = np.eye(3)
        rotation[:2, :2] = rotation_2d
        spacing = *spacing, 1
        origin = *origin, 0
    else:
        raise RuntimeError(f'Direction not understood: {direction}')
    rotation = np.dot(FLIP_XY, rotation)
    rotation_zoom = rotation * spacing
    translation = np.dot(FLIP_XY, origin)
    affine = np.eye(4)
    affine[:3, :3] = rotation_zoom
    affine[:3, 3] = translation
    return data, affine
    def execute(self,
                image: sitk.Image,
                params: fltr.IFilterParams = None) -> sitk.Image:
        """Executes a neighborhood feature extractor on an image.

        Args:
            image (sitk.Image): The image.
            params (fltr.IFilterParams): The parameters (unused).

        Returns:
            sitk.Image: The normalized image.

        Raises:
            ValueError: If image is not 3-D.
        """

        if image.GetDimension() != 3:
            raise ValueError('image needs to be 3-D')

        # test the function and get the output dimension for later reshaping
        function_output = self.function(np.array([1, 2, 3]))
        if np.isscalar(function_output):
            img_out = sitk.Image(image.GetSize(), sitk.sitkFloat32)
        elif not isinstance(function_output, np.ndarray):
            raise ValueError(
                'function must return a scalar or a 1-D np.ndarray')
        elif function_output.ndim > 1:
            raise ValueError(
                'function must return a scalar or a 1-D np.ndarray')
        elif function_output.shape[0] <= 1:
            raise ValueError(
                'function must return a scalar or a 1-D np.ndarray with at least two elements'
            )
        else:
            img_out = sitk.Image(image.GetSize(), sitk.sitkVectorFloat32,
                                 function_output.shape[0])

        img_out_arr = sitk.GetArrayFromImage(img_out)
        img_arr = sitk.GetArrayFromImage(image)
        z, y, x = img_arr.shape

        z_offset = self.kernel[2]
        y_offset = self.kernel[1]
        x_offset = self.kernel[0]
        pad = ((0, z_offset), (0, y_offset), (0, x_offset))
        img_arr_padded = np.pad(img_arr, pad, 'symmetric')

        for xx in range(x):
            for yy in range(y):
                for zz in range(z):

                    val = self.function(img_arr_padded[zz:zz + z_offset,
                                                       yy:yy + y_offset,
                                                       xx:xx + x_offset])
                    img_out_arr[zz, yy, xx] = val

        img_out = sitk.GetImageFromArray(img_out_arr)
        img_out.CopyInformation(image)

        return img_out
Exemple #9
0
def rotate(image: sitk.Image,
           rotation_centre: Sequence[float],
           angles: Union[float, Sequence[float]],
           interpolation: str = "linear") -> sitk.Image:
    """Rotate an image around a given centre.

    Parameters
    ----------
    image
        The image to rotate.

    rotation_centre
        The centre of rotation in image coordinates.

    angles
        The angles of rotation around x, y and z axes.

    Returns
    -------
    sitk.Image
        The rotated image.
    """
    if isinstance(rotation_centre, np.ndarray):
        rotation_centre = rotation_centre.tolist()

    rotation_centre = image.TransformIndexToPhysicalPoint(rotation_centre)

    if image.GetDimension() == 2:
        rotation = sitk.Euler2DTransform(
            rotation_centre,
            angles,
            (0., 0.)  # no translation
        )
    elif image.GetDimension() == 3:
        x_angle, y_angle, z_angle = angles

        rotation = sitk.Euler3DTransform(
            rotation_centre,
            x_angle,  # the angle of rotation around the x-axis, in radians -> coronal rotation
            y_angle,  # the angle of rotation around the y-axis, in radians -> saggittal rotation
            z_angle,  # the angle of rotation around the z-axis, in radians -> axial rotation
            (0., 0., 0.)  # no translation
        )
    return resample(image,
                    spacing=image.GetSpacing(),
                    interpolation=interpolation,
                    transform=rotation)
Exemple #10
0
def zoom(image: sitk.Image,
         scale_factor: Union[float, Sequence[float]],
         interpolation: str = "linear",
         anti_alias: bool = True,
         anti_alias_sigma: Optional[float] = None) -> sitk.Image:
    """Rescale image, preserving its spatial extent.

    The rescaled image will have the same spatial extent (size) but will be
    rescaled by `scale_factor` in each dimension. Alternatively, a separate
    scale factor for each dimension can be specified by passing a sequence
    of floats.

    Parameters
    ----------
    image
        The image to rescale.

    scale_factor
        If float, each dimension will be scaled by that factor. If tuple, each
        dimension will be scaled by the corresponding element.

    interpolation, optional
        The interpolation method to use. Valid options are:
        - "linear" for bi/trilinear interpolation (default)
        - "nearest" for nearest neighbour interpolation
        - "bspline" for order-3 b-spline interpolation

    anti_alias, optional
        Whether to smooth the image with a Gaussian kernel before resampling.
        Only used when downsampling, i.e. when `size < image.GetSize()`.
        This should be used to avoid aliasing artifacts.

    anti_alias_sigma, optional
        The standard deviation of the Gaussian kernel used for anti-aliasing.

    Returns
    -------
    sitk.Image
        The rescaled image.
    """
    dimension = image.GetDimension()

    if isinstance(scale_factor, float):
        scale_factor = (scale_factor, ) * dimension

    centre_idx = np.array(image.GetSize()) / 2
    centre = image.TransformContinuousIndexToPhysicalPoint(centre_idx)

    transform = sitk.ScaleTransform(dimension, scale_factor)
    transform.SetCenter(centre)

    return resample(image,
                    spacing=image.GetSpacing(),
                    interpolation=interpolation,
                    anti_alias=anti_alias,
                    anti_alias_sigma=anti_alias_sigma,
                    transform=transform,
                    output_size=image.GetSize())
Exemple #11
0
        def slice_by_slice(image: sitk.Image, *args, **kwargs):

            dim = image.GetDimension()
            iter_dim = 2

            if dim <= iter_dim:
                image = func(image, *args, **kwargs)
                return image

            extract_size = list(image.GetSize())
            extract_size[iter_dim:] = itertools.repeat(0, dim - iter_dim)

            extract_index = [0] * dim
            paste_idx = [slice(None, None)] * dim

            extractor = sitk.ExtractImageFilter()
            extractor.SetSize(extract_size)
            if inplace:
                for high_idx in itertools.product(
                        *[range(s) for s in image.GetSize()[iter_dim:]]):
                    extract_index[iter_dim:] = high_idx
                    extractor.SetIndex(extract_index)

                    paste_idx[iter_dim:] = high_idx
                    image[paste_idx] = func(extractor.Execute(image), *args,
                                            **kwargs)

            else:
                img_list = []
                for high_idx in itertools.product(
                        *[range(s) for s in image.GetSize()[iter_dim:]]):
                    extract_index[iter_dim:] = high_idx
                    extractor.SetIndex(extract_index)

                    paste_idx[iter_dim:] = high_idx

                    img_list.append(
                        func(extractor.Execute(image), *args, **kwargs))

                for d in range(iter_dim, dim):
                    step = reduce((lambda x, y: x * y),
                                  image.GetSize()[d + 1:], 1)

                    join_series_filter = sitk.JoinSeriesImageFilter()
                    join_series_filter.SetSpacing(image.GetSpacing()[d])
                    join_series_filter.SetOrigin(image.GetOrigin()[d])

                    img_list = [
                        join_series_filter.Execute(img_list[i::step])
                        for i in range(step)
                    ]

                assert len(img_list) == 1
                image = img_list[0]

            return image
Exemple #12
0
def plot_2d_segmentation_series(path: str,
                                file_name_suffix: str,
                                image: sitk.Image,
                                ground_truth: sitk.Image,
                                segmentation: sitk.Image,
                                alpha: float = 0.5,
                                label: int = 1,
                                file_extension: str = '.png') -> None:
    """Plots an image with an overlaid mask, which indicates under-, correct-, and over-segmentation.

    Args:
        path (str): The output directory path.
        file_name_suffix (str): The output file name suffix.
        image (sitk.Image): The image.
        ground_truth (sitk.Image): The ground truth.
        segmentation (sitk.Image): The segmentation.
        alpha (float): The alpha blending value, between 0 (transparent) and 1 (opaque).
        label (int): The ground truth and segmentation label.
        file_extension (str): The output file extension (with or without dot).

    Examples:
        >>> img_t2 = sitk.ReadImage('your/path/image.mha')
        >>> ground_truth = sitk.ReadImage('your/path/ground_truth.mha')
        >>> segmentation = sitk.ReadImage('your/path/segmentation.mha')
        >>> plot_2d_segmentation_series('/your/path/', 'mysegmentation', img_t2, ground_truth, segmentation)
    """

    if not image.GetSize() == ground_truth.GetSize() == segmentation.GetSize():
        raise ValueError(
            'image, ground_truth, and segmentation must have equal size')
    if not image.GetDimension() == 3:
        raise ValueError('only 3-dimensional images supported')
    if not image.GetNumberOfComponentsPerPixel() == 1:
        raise ValueError('only scalar images supported')

    img_arr = sitk.GetArrayFromImage(image)
    gt_arr = sitk.GetArrayFromImage(ground_truth)
    seg_arr = sitk.GetArrayFromImage(segmentation)

    os.makedirs(path, exist_ok=True)
    file_extension = file_extension if file_extension.startswith(
        '.') else '.' + file_extension

    for slice in range(img_arr.shape[0]):
        full_file_path = os.path.join(
            path, file_name_suffix + str(slice) + file_extension)
        plot_2d_segmentation(full_file_path,
                             img_arr[slice, ...],
                             gt_arr[slice, ...],
                             seg_arr[slice, ...],
                             alpha=alpha,
                             label=label)
Exemple #13
0
    def __init__(self, image: sitk.Image):
        """Initializes a new instance of the ImageInformation class.

        Args:
            image (sitk.Image): The image whose properties to hold.
        """
        self.size = image.GetSize()
        self.origin = image.GetOrigin()
        self.spacing = image.GetSpacing()
        self.direction = image.GetDirection()
        self.dimensions = image.GetDimension()
        self.number_of_components_per_pixel = image.GetNumberOfComponentsPerPixel()
        self.pixel_id = image.GetPixelID()
Exemple #14
0
def rgb_to_grayscale_img(image: sitk.Image, white_light_filter_value=0.9):
    """Convert an RGB to grayscale image by extracting the average intensity, filtering out white light >0.9 max"""
    array = sitk.GetArrayFromImage(image)
    dimension = image.GetDimension()

    grayscale_array = np.average(array, 2)
    grayscale_array[grayscale_array > white_light_filter_value *
                    np.max(array)] = 0

    grayscale_image = sitk.GetImageFromArray(grayscale_array)
    grayscale_image.SetSpacing(image.GetSpacing())
    grayscale_image.SetOrigin(image.GetOrigin())

    return grayscale_image
    def execute(self,
                image: sitk.Image,
                params: fltr.IFilterParams = None) -> sitk.Image:
        """Executes a atlas coordinates feature extractor on an image.

        Args:
            image (sitk.Image): The image.
            params (fltr.IFilterParams): The parameters (unused).

        Returns:
            sitk.Image: The atlas coordinates image
            (a vector image with 3 components, which represent the physical x, y, z coordinates in mm).

        Raises:
            ValueError: If image is not 3-D.
        """

        if image.GetDimension() != 3:
            raise ValueError('image needs to be 3-D')

        x, y, z = image.GetSize()

        # create matrix with homogenous indices in axis 3
        coords = np.zeros((x, y, z, 4))
        coords[..., 0] = np.arange(x)[:, np.newaxis, np.newaxis]
        coords[..., 1] = np.arange(y)[np.newaxis, :, np.newaxis]
        coords[..., 2] = np.arange(z)[np.newaxis, np.newaxis, :]
        coords[..., 3] = 1

        # reshape such that each voxel is one row
        lin_coords = np.reshape(
            coords, [coords.shape[0] * coords.shape[1] * coords.shape[2], 4])

        # generate transformation matrix
        tmpmat = image.GetDirection() + image.GetOrigin()
        tfm = np.reshape(tmpmat, [3, 4], order='F')
        tfm = np.vstack((tfm, [0, 0, 0, 1]))

        atlas_coords = (tfm @ np.transpose(lin_coords))[0:3, :]
        atlas_coords = np.reshape(np.transpose(atlas_coords), [z, y, x, 3],
                                  'F')

        img_out = sitk.GetImageFromArray(atlas_coords)
        img_out.CopyInformation(image)

        return img_out
Exemple #16
0
    def __init__(self, image: sitk.Image):
        """Represents ITK image properties.

        Holds common ITK image meta-data such as the size, origin, spacing, and direction.

        See Also:
            SimpleITK provides `itk::simple::Image::CopyInformation`_ to copy image information.

        .. _itk::simple::Image::CopyInformation:
            https://itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1Image.html#afa8a4757400c414e809d1767ee616bd0

        Args:
            image (sitk.Image): The image whose properties to hold.
        """
        self.size = image.GetSize()
        self.origin = image.GetOrigin()
        self.spacing = image.GetSpacing()
        self.direction = image.GetDirection()
        self.dimensions = image.GetDimension()
        self.number_of_components_per_pixel = image.GetNumberOfComponentsPerPixel()
        self.pixel_id = image.GetPixelID()
Exemple #17
0
def sitk_to_nib(
    image: sitk.Image,
    keepdim: bool = False,
) -> Tuple[np.ndarray, np.ndarray]:
    data = sitk.GetArrayFromImage(image).transpose()
    data = check_uint_to_int(data)
    num_components = image.GetNumberOfComponentsPerPixel()
    if num_components == 1:
        data = data[np.newaxis]  # add channels dimension
    input_spatial_dims = image.GetDimension()
    if input_spatial_dims == 2:
        data = data[..., np.newaxis]
    elif input_spatial_dims == 4:  # probably a bad NIfTI (1, sx, sy, sz, c)
        # Try to fix it
        num_components = data.shape[-1]
        data = data[0]
        data = data.transpose(3, 0, 1, 2)
        input_spatial_dims = 3
    if not keepdim:
        data = ensure_4d(data, num_spatial_dims=input_spatial_dims)
    assert data.shape[0] == num_components
    affine = get_ras_affine_from_sitk(image)
    return data, affine
 def assert_img_properties(img: SimpleITK.Image):
     assert img.GetDimension() == 4
     assert img.GetWidth() == 10
     assert img.GetHeight() == 11
     assert img.GetDepth() == 12
     assert img.GetSize()[-1] == 13
Exemple #19
0
    def write(self, segmentation: sitk.Image,
              source_images: List[pydicom.Dataset]) -> pydicom.Dataset:
        """Writes a DICOM-SEG dataset from a segmentation image and the
        corresponding DICOM source images.

        Args:
            segmentation: A `SimpleITK.Image` with integer labels and a single
                component per spatial location.
            source_images: A list of `pydicom.Dataset` which are the
                source images for the segmentation image.

        Returns:
            A `pydicom.Dataset` instance with all necessary information and
            meta information for writing the dataset to disk.
        """
        if segmentation.GetDimension() != 3:
            raise ValueError("Only 3D segmentation data is supported")

        if segmentation.GetNumberOfComponentsPerPixel() > 1:
            raise ValueError("Multi-class segmentations can only be "
                             "represented with a single component per voxel")

        if segmentation.GetPixelID() not in [
                sitk.sitkUInt8,
                sitk.sitkUInt16,
                sitk.sitkUInt32,
                sitk.sitkUInt64,
        ]:
            raise ValueError("Unsigned integer data type required")

        # TODO Add further checks if source images are from the same series
        slice_to_source_images = self._map_source_images_to_segmentation(
            segmentation, source_images)

        # Compute unique labels and their respective bounding boxes
        label_statistics_filter = sitk.LabelStatisticsImageFilter()
        label_statistics_filter.Execute(segmentation, segmentation)
        unique_labels = set(
            [x for x in label_statistics_filter.GetLabels() if x != 0])
        if len(unique_labels) == 0:
            raise ValueError("Segmentation does not contain any labels")

        # Check if all present labels where declared in the DICOM template
        declared_segments = set(
            [x.SegmentNumber for x in self._template.SegmentSequence])
        missing_declarations = unique_labels.difference(declared_segments)
        if missing_declarations:
            missing_segment_numbers = ", ".join(
                [str(x) for x in missing_declarations])
            message = (
                f"Skipping segment(s) {missing_segment_numbers}, since their "
                "declaration is missing in the DICOM template")
            if not self._skip_missing_segment:
                raise ValueError(message)
            logger.warning(message)
        labels_to_process = unique_labels.intersection(declared_segments)
        if not labels_to_process:
            raise ValueError("No segments found for encoding as DICOM-SEG")

        # Compute bounding boxes for each present label and optionally restrict
        # the volume to serialize to the joined maximum extent
        bboxs = {
            x: label_statistics_filter.GetBoundingBox(x)
            for x in labels_to_process
        }
        if self._inplane_cropping:
            min_x, min_y, _ = np.min([x[::2] for x in bboxs.values()],
                                     axis=0).tolist()
            max_x, max_y, _ = (
                np.max([x[1::2]
                        for x in bboxs.values()], axis=0) + 1).tolist()
            logger.info(
                "Serializing cropped image planes starting at coordinates "
                f"({min_x}, {min_y}) with size ({max_x - min_x}, {max_y - min_y})"
            )
        else:
            min_x, min_y = 0, 0
            max_x, max_y = segmentation.GetWidth(), segmentation.GetHeight()
            logger.info(
                f"Serializing image planes at full size ({max_x}, {max_y})")

        # Create target dataset for storing serialized data
        result = SegmentationDataset(
            reference_dicom=source_images[0] if source_images else None,
            rows=max_y - min_y,
            columns=max_x - min_x,
            segmentation_type=SegmentationType.BINARY,
        )
        dimension_organization = DimensionOrganizationSequence()
        dimension_organization.add_dimension("ReferencedSegmentNumber",
                                             "SegmentIdentificationSequence")
        dimension_organization.add_dimension("ImagePositionPatient",
                                             "PlanePositionSequence")
        result.add_dimension_organization(dimension_organization)
        writer_utils.copy_segmentation_template(
            target=result,
            template=self._template,
            segments=labels_to_process,
            skip_missing_segment=self._skip_missing_segment,
        )
        writer_utils.set_shared_functional_groups_sequence(
            target=result, segmentation=segmentation)

        # FIX - Use ImageOrientationPatient value from DICOM source rather than the segmentation
        result.SharedFunctionalGroupsSequence[0].PlaneOrientationSequence[
            0].ImageOrientationPatient = source_images[
                0].ImageOrientationPatient

        buffer = sitk.GetArrayFromImage(segmentation)
        for segment in labels_to_process:
            logger.info(f"Processing segment {segment}")

            if self._skip_empty_slices:
                bbox = bboxs[segment]
                min_z, max_z = bbox[4], bbox[5] + 1
            else:
                min_z, max_z = 0, segmentation.GetDepth()
            logger.info(
                "Total number of slices that will be processed for segment "
                f"{segment} is {max_z - min_z} (inclusive from {min_z} to {max_z})"
            )

            skipped_slices = []
            for slice_idx in range(min_z, max_z):
                frame_index = (min_x, min_y, slice_idx)
                frame_position = segmentation.TransformIndexToPhysicalPoint(
                    frame_index)
                frame_data = np.equal(
                    buffer[slice_idx, min_y:max_y, min_x:max_x], segment)
                if self._skip_empty_slices and not frame_data.any():
                    skipped_slices.append(slice_idx)
                    continue

                frame_fg_item = result.add_frame(
                    data=frame_data.astype(np.uint8),
                    referenced_segment=segment,
                    referenced_images=slice_to_source_images[slice_idx],
                )

                frame_fg_item.FrameContentSequence = [pydicom.Dataset()]
                frame_fg_item.FrameContentSequence[0].DimensionIndexValues = [
                    segment,  # Segment number
                    slice_idx - min_z + 1,  # Slice index within cropped volume
                ]
                frame_fg_item.PlanePositionSequence = [pydicom.Dataset()]
                frame_fg_item.PlanePositionSequence[0].ImagePositionPatient = [
                    f"{x:e}" for x in frame_position
                ]

            if skipped_slices:
                logger.info(f"Skipped empty slices for segment {segment}: "
                            f'{", ".join([str(x) for x in skipped_slices])}')

        # Encode all frames into a bytearray
        if self._inplane_cropping or self._skip_empty_slices:
            num_encoded_bytes = len(result.PixelData)
            max_encoded_bytes = (segmentation.GetWidth() *
                                 segmentation.GetHeight() *
                                 segmentation.GetDepth() *
                                 len(result.SegmentSequence) // 8)
            savings = (1 - num_encoded_bytes / max_encoded_bytes) * 100
            logger.info(
                f"Optimized frame data length is {num_encoded_bytes:,}B "
                f"instead of {max_encoded_bytes:,}B (saved {savings:.2f}%)")

        result.SegmentsOverlap = "NO"

        return result
    def execute(self, image: sitk.Image, params: fltr.FilterParams = None,
                multiprocessing_features: bool = False) -> sitk.Image:
        """Executes a neighborhood feature extractor on an image.

        Args:
            image (sitk.Image): The image.
            params (fltr.FilterParams): The parameters (unused).
            multiprocessing_features: uses multiprocessing for feature extraction if specified as True

        Returns:
            sitk.Image: The normalized image.

        Raises:
            ValueError: If image is not 3-D.
        """
        # image.GetSize() = (197, 233, 189)
        if image.GetDimension() != 3:
            raise ValueError('image needs to be 3-D')

        # test the function and get the output dimension for later reshaping
        function_output = self.function(np.array([1, 2, 3]))
        if np.isscalar(function_output):  # how can this ever be a scalar if first_order_features output nd.arrays?
            img_out = sitk.Image(image.GetSize(), sitk.sitkFloat32)  # image is 3D (N x M x C)
        elif not isinstance(function_output, np.ndarray):  # if function_output isn't nd.array (scalar nor array)
            raise ValueError('function must return a scalar or a 1-D np.ndarray')
        elif function_output.ndim > 1:  # if nd.array is non 1-D
            raise ValueError('function must return a scalar or a 1-D np.ndarray')
        elif function_output.shape[0] <= 1:  # if nd.array doesn't have at least 2 elements
            raise ValueError('function must return a scalar or a 1-D np.ndarray with at least two elements')
        else:  # ---------------(197, 233, 189)----------------------------number of features: int
            img_out = sitk.Image(image.GetSize(), sitk.sitkVectorFloat32, function_output.shape[0])
        # this last else will create an img_out which has a number of components per pixel = number of features
        #   i.e. another dimension will be added for the 3-D matrix, making it "4-D"
        # prove: "number of components per pixel = img_out.GetNumberOfComponentsPerPixel()
        #   img_out still has size = (197, 233, 189)
        # ------------------------------------------------- z    y    x
        img_out_arr = sitk.GetArrayFromImage(img_out)  # (189, 233, 197, 2)
        img_arr = sitk.GetArrayFromImage(image)  # (189, 233, 197)   shape is "swapped"
        z, y, x = img_arr.shape
        z_offset = self.kernel[2]
        y_offset = self.kernel[1]
        x_offset = self.kernel[0]
        pad = ((0, z_offset), (0, y_offset), (0, x_offset))
        #   img_arr is extended by adding 3 sheets, 3 rows and 3 columns
        img_arr_padded = np.pad(img_arr, pad, 'symmetric')  # (192, 236, 200)

        start = time.perf_counter()

        # with concurrent.futures.ProcessPoolExecutor() as executor:
        # params_list = list_maker(img_arr_padded, y, y_offset, z, z_offset, x, x_offset)
        if not multiprocessing_features:

            for xx in range(x):
                for yy in range(y):
                    for zz in range(z):
                        # print('x, y, z = ' + str(xx) + ' ' + str(yy) + ' ' + str(zz))
                        val = self.function(img_arr_padded[zz:zz + z_offset, yy:yy + y_offset, xx:xx + x_offset])
                        img_out_arr[zz, yy, xx] = val

        else:

            rets = []
            # builds sets of arguments to be fed to pools
            for zz in range(z):
                rets.append([img_arr_padded, img_out_arr, zz, z_offset, y_offset, x_offset])

            # TODO: change argument to (multiprocessing.cpu_count() - 1) when using own computer
            # as it sets a limit of cpu cores to be used, without overloading hardware
            p = multiprocessing.Pool(multiprocessing.cpu_count())

            result = p.map(firstOFeature_slice_aux, rets)

            p.close()
            p.join()

            # Assigns each obtained "zz-slice" to each row of the 4-D array output image
            # We have to check the whole result and assign slices in a sorted way since pools are not synchronous
            for zz in range(z):
                for zzi in range(z):

                    if result[zzi][1] == zz:
                        img_out_arr[zz, :, :] = result[zzi][0]
                        break

        finish = time.perf_counter()
        print(f'Finished in {round(finish - start, 2)} seconds(s)')

        img_out = sitk.GetImageFromArray(img_out_arr)
        img_out.CopyInformation(image)

        return img_out