Exemplo n.º 1
0
def main(image, mask, plot=False, plot_type='objects'):

    mask_image = np.copy(image)
    mask_image[mask == 0] = 0

    if plot:
        logger.info('create plot')
        from jtlib import plotting
        if plot_type == 'objects':
            colorscale = plotting.create_colorscale('Spectral',
                                                    n=image.max(),
                                                    permute=True,
                                                    add_background=True)
            data = [
                plotting.create_mask_image_plot(mask,
                                                'ul',
                                                colorscale=colorscale),
                plotting.create_mask_image_plot(mask_image,
                                                'ur',
                                                colorscale=colorscale)
            ]
            figure = plotting.create_figure(data, title='Masked label image')
        elif plot_type == 'intensity':
            data = [
                plotting.create_mask_image_plot(mask, 'ul'),
                plotting.create_intensity_image_plot(mask_image, 'ur')
            ]
            figure = plotting.create_figure(data,
                                            title='Masked intensity image')
    else:
        figure = str()

    return Output(mask_image, figure)
Exemplo n.º 2
0
def main(mask, plot=False):
    '''Fills holes in connected pixel components.

    Parameters
    ----------
    mask: numpy.ndarray[numpy.bool]
        binary image that should filled
    plot: bool, optional
        whether a plot should be generated (default: ``False``)

    Returns
    -------
    jtmodules.fill.Output[Union[numpy.ndarray, str]]

    '''
    filled_mask = mh.close_holes(mask, np.ones((3, 3), bool))

    if plot:
        from jtlib import plotting
        plots = [
            plotting.create_mask_image_plot(mask, 'ul'),
            plotting.create_mask_image_plot(filled_mask, 'ur')
        ]
        figure = plotting.create_figure(plots, title='Labeled image')
    else:
        figure = str()

    return Output(filled_mask, figure)
Exemplo n.º 3
0
def main(mask, plot=False):
    '''Fills holes in connected pixel components.

    Parameters
    ----------
    mask: numpy.ndarray[numpy.bool]
        binary image that should filled
    plot: bool, optional
        whether a plot should be generated (default: ``False``)

    Returns
    -------
    jtmodules.fill.Output[Union[numpy.ndarray, str]]

    '''
    filled_mask = mh.close_holes(mask, np.ones((3, 3), bool))

    if plot:
        from jtlib import plotting
        plots = [
            plotting.create_mask_image_plot(mask, 'ul'),
            plotting.create_mask_image_plot(filled_mask, 'ur')
        ]
        figure = plotting.create_figure(plots, title='Labeled image')
    else:
        figure = str()

    return Output(filled_mask, figure)
Exemplo n.º 4
0
def main(label_image, plot=False):
    '''Relabels objects in a label image such that the total number of objects
    is preserved.

    Parameters
    ----------
    label_image: numpy.ndarray[numpy.int32]
        label image that should relabeled
    plot: bool, optional
        whether a plot should be generated (default: ``False``)

    Returns
    -------
    jtmodules.relabel.Output[Union[numpy.ndarray, str]]

    '''
    relabeled_image = mh.labeled.relabel(label_image)[0]

    if plot:
        from jtlib import plotting
        plots = [
            plotting.create_mask_image_plot(label_image, 'ul'),
            plotting.create_mask_image_plot(relabeled_image, 'ur')
        ]
        figure = plotting.create_figure(plots, title='Relabeled image')
    else:
        figure = str()

    return Output(relabeled_image, figure)
Exemplo n.º 5
0
def main(image, clipping_mask, plot=False):
    '''Clips a labeled image using another image as a mask, such that
    intersecting pixels/voxels are set to background.

    Parameters
    ----------
    image: numpy.ndarray
        image that should be clipped
    clipping_mask: numpy.ndarray[numpy.int32 or numpy.bool]
        image that should be used as clipping mask
    plot: bool, optional
        whether a plot should be generated (default: ``False``)

    Returns
    -------
    jtmodules.clip_objects.Output

    Raises
    ------
    ValueError
        when `image` and `clipping_mask` don't have the same dimensions
    '''
    if image.shape != clipping_mask.shape:
        raise ValueError(
            '"image" and "clipping_mask" must have the same dimensions')
    clipping_mask = clipping_mask > 0

    clipped_image = image.copy()
    clipped_image[clipping_mask] = 0

    if plot:
        from jtlib import plotting
        if str(image.dtype).startswith('uint'):
            plots = [
                plotting.create_intensity_image_plot(image, 'ul', clip=True),
                plotting.create_mask_image_plot(clipping_mask, 'ur'),
                plotting.create_intensity_image_plot(clipped_image,
                                                     'll',
                                                     clip=True)
            ]
        else:
            n_objects = len(np.unique(image)[1:])
            colorscale = plotting.create_colorscale('Spectral',
                                                    n=n_objects,
                                                    permute=True,
                                                    add_background=True)
            plots = [
                plotting.create_mask_image_plot(image,
                                                'ul',
                                                colorscale=colorscale),
                plotting.create_mask_image_plot(clipping_mask, 'ur'),
                plotting.create_mask_image_plot(clipped_image,
                                                'll',
                                                colorscale=colorscale)
            ]
        figure = plotting.create_figure(plots, title='clipped image')
    else:
        figure = str()

    return Output(clipped_image, figure)
Exemplo n.º 6
0
def main(mask_1, mask_2, plot=False):
    '''Combines two binary masks, such that the resulting combined mask
    is ``True`` where either `mask_1` OR `mask_2` is ``True``.

    Parameters
    ----------
    mask_1: numpy.ndarray[numpy.bool]
        2D binary array
    mask_2: numpy.ndarray[numpy.bool]
        2D binary array

    Returns
    -------
    jtmodules.combine_objects.Output

    '''
    combined_mask = np.logical_or(mask_1, mask_2)

    if plot:
        from jtlib import plotting
        plots = [
            plotting.create_mask_image_plot(mask_1, 'ul'),
            plotting.create_mask_image_plot(mask_2, 'ur'),
            plotting.create_mask_image_plot(combined_mask, 'll')
        ]
        figure = plotting.create_figure(plots, title='combined mask')
    else:
        figure = str()

    return Output(combined_mask, figure)
Exemplo n.º 7
0
def main(objects, plot=False):
    '''Rasterizes objects onto a label image, i.e. assigns to all pixels of a
    connected component an identifier number that is unique for each object
    in the image.

    Parameters
    ----------
    objects: numpy.ndarray[int32]
        label image with objects
    plot: bool, optional
        whether a plot should be generated (default: ``False``)

    Returns
    -------
    jtmodules.label.Output[Union[numpy.ndarray, str]]
    '''
    label_image = objects
    if plot:
        from jtlib import plotting
        plots = [
            plotting.create_mask_image_plot(label_image, 'ur')
        ]
        figure = plotting.create_figure(plots, title='Labeled image')
    else:
        figure = str()

    return Output(label_image, figure)
Exemplo n.º 8
0
def main(image, plot=False):
    '''Inverts `image`.

    Parameters
    ----------
    image: numpy.ndarray[Union[numpy.uint8, numpy.uint16, numpy.bool, numpy.int32]]
        image that should be inverted
    plot: bool, optional
        whether a plot should be generated (default: ``False``)

    Returns
    -------
    jtmodules.invert.Output[Union[numpy.ndarray, str]]

    Note
    ----
    In case `image` is a label image with type ``numpy.int32`` it is binarized
    (casted to ``numpy.bool``) before inversion.
    '''
    if image.dtype == np.int32:
        logger.info('binarize label image before inversion')
        image = image > 0
    logger.info('invert image')
    inverted_image = np.invert(image)

    if plot:
        logger.info('create plot')
        from jtlib import plotting
        if str(image.dtype).startswith('uint'):
            data = [
                plotting.create_intensity_image_plot(
                    image,
                    'ul',
                    clip=True,
                ),
                plotting.create_intensity_image_plot(
                    inverted_image,
                    'ur',
                    clip=True,
                ),
            ]
        else:
            data = [
                plotting.create_mask_image_plot(
                    image,
                    'ul',
                    clip=True,
                ),
                plotting.create_mask_image_plot(
                    inverted_image,
                    'ur',
                    clip=True,
                ),
            ]
        figure = plotting.create_figure(data,
                                        title='original and inverted image')
    else:
        figure = str()

    return Output(inverted_image, figure)
Exemplo n.º 9
0
def main(label_image, plot=False):
    '''Relabels objects in a label image such that the total number of objects
    is preserved.

    Parameters
    ----------
    label_image: numpy.ndarray[numpy.int32]
        label image that should relabeled
    plot: bool, optional
        whether a plot should be generated (default: ``False``)

    Returns
    -------
    jtmodules.relabel.Output[Union[numpy.ndarray, str]]

    '''
    relabeled_image = mh.labeled.relabel(label_image)[0]

    if plot:
        from jtlib import plotting
        plots = [
            plotting.create_mask_image_plot(label_image, 'ul'),
            plotting.create_mask_image_plot(relabeled_image, 'ur')
        ]
        figure = plotting.create_figure(plots, title='Relabeled image')
    else:
        figure = str()

    return Output(relabeled_image, figure)
Exemplo n.º 10
0
def main(image, threshold, plot=False):
    '''Thresholds an image by applying a given global threshold level.

    Parameters
    ----------
    image: numpy.ndarray
        image of arbitrary data type that should be thresholded
    threshold: int
        threshold level
    plot: bool, optional
        whether a plot should be generated (default: ``False``)

    Returns
    -------
    jtmodules.threshold_manual.Output[Union[numpy.ndarray, str]]
    '''
    logger.info('threshold image at %d', threshold)
    mask = image > threshold

    if plot:
        logger.info('create plot')
        from jtlib import plotting
        outlines = mh.morph.dilate(mh.labeled.bwperim(mask))
        plots = [
            plotting.create_intensity_overlay_image_plot(
                image, outlines, 'ul'),
            plotting.create_mask_image_plot(mask, 'ur')
        ]
        figure = plotting.create_figure(plots,
                                        title='thresholded at %s' % threshold)
    else:
        figure = str()

    return Output(mask, figure)
Exemplo n.º 11
0
def main(image, method='max', plot=False):
    '''Projects an image along the last dimension using the given `method`.

    Parameters
    ----------
    image: numpy.ndarray[Union[numpy.uint8, numpy.uint16]]
        grayscale image
    method: str, optional
        method used for projection
        (default: ``"max"``, options: ``{"max", "sum"}``)
    plot: bool, optional
        whether a figure should be created (default: ``False``)
    '''
    logger.info('project image using "%s" method', method)
    func = projections[method]
    projected_image = func(image, axis=-1)

    projected_image = projected_image.astype(image.dtype)

    if plot:
        logger.info('create plot')
        from jtlib import plotting
        plots = [
	    plotting.create_intensity_image_plot(
                projected_image, 'ul', clip=True
            )
	]
        figure = plotting.create_figure(plots, title='projection image')
    else:
        figure = str()

    return Output(projected_image, figure)
Exemplo n.º 12
0
def main(image, n, plot=False):
    '''Expands objects in `image` by `n` pixels along each axis.

    Parameters
    ----------
    image: numpy.ndarray[numpy.int32]
        2D label image with objects that should be expanded or shrunk
    n: int
        number of pixels by which each connected component should be
        expanded or shrunk
    plot: bool, optional
        whether a plot should be generated (default: ``False``)

    Returns
    -------
    jtmodules.expand_objects.Output
    '''
    # NOTE: code from CellProfiler module "expandorshrink"
    # NOTE (S.B. 25.1.2018): renamed from "expand" to "expand_or_shrink"
    expanded_image = image.copy()
    if (n > 0):
        logger.info('expanding objects by %d pixels', n)
        background = image == 0
        distance, (i, j) = ndi.distance_transform_edt(background,
                                                      return_indices=True)
        mask = background & (distance < n)
        expanded_image[mask] = image[i[mask], j[mask]]

    elif (n < 0):
        logger.info('shrinking objects by %d pixels', abs(n))
        print 'shrinking'
        objects = image != 0
        distance = ndi.distance_transform_edt(objects, return_indices=False)
        mask = np.invert(distance > abs(n))
        expanded_image[mask] = 0

    if plot:
        from jtlib import plotting
        n_objects = len(np.unique(expanded_image)[1:])
        colorscale = plotting.create_colorscale('Spectral',
                                                n=n_objects,
                                                permute=True,
                                                add_background=True)
        plots = [
            plotting.create_mask_image_plot(image, 'ul',
                                            colorscale=colorscale),
            plotting.create_mask_image_plot(expanded_image,
                                            'ur',
                                            colorscale=colorscale)
        ]
        figure = plotting.create_figure(plots, title='expanded image')
    else:
        figure = str()

    return Output(expanded_image, figure)
Exemplo n.º 13
0
def main(image, mask, plot=False, plot_type='objects'):

    mask_image = np.copy(image)
    mask_image[mask == 0] = 0

    if plot:
        logger.info('create plot')
        from jtlib import plotting
        if plot_type == 'objects':
            colorscale = plotting.create_colorscale(
                'Spectral', n=image.max(), permute=True, add_background=True
            )
            data = [
                plotting.create_mask_image_plot(
                    mask, 'ul', colorscale=colorscale
                ),
                plotting.create_mask_image_plot(
                    mask_image, 'ur', colorscale=colorscale
                )
            ]
            figure = plotting.create_figure(
                data,
                title='Masked label image'
            )
        elif plot_type == 'intensity':
            data = [
                plotting.create_mask_image_plot(
                    mask, 'ul'
                ),
                plotting.create_intensity_image_plot(
                    mask_image, 'ur'
                )
            ]
            figure = plotting.create_figure(
                data,
                title='Masked intensity image'
            )
    else:
        figure = str()

    return Output(mask_image, figure)
Exemplo n.º 14
0
def main(image, plot=False):
    '''Inverts `image`.

    Parameters
    ----------
    image: numpy.ndarray[Union[numpy.uint8, numpy.uint16, numpy.bool, numpy.int32]]
        image that should be inverted
    plot: bool, optional
        whether a plot should be generated (default: ``False``)

    Returns
    -------
    jtmodules.invert.Output[Union[numpy.ndarray, str]]

    Note
    ----
    In case `image` is a label image with type ``numpy.int32`` it is binarized
    (casted to ``numpy.bool``) before inversion.
    '''
    if image.dtype == np.int32:
        logger.info('binarize label image before inversion')
        image = image > 0
    logger.info('invert image')
    inverted_image = np.invert(image)

    if plot:
        logger.info('create plot')
        from jtlib import plotting
        if str(image.dtype).startswith('uint'):
            data = [
                plotting.create_intensity_image_plot(
                    image, 'ul', clip=True,
                ),
                plotting.create_intensity_image_plot(
                    inverted_image, 'ur', clip=True,
                ),
            ]
        else:
            data = [
                plotting.create_mask_image_plot(
                    image, 'ul', clip=True,
                ),
                plotting.create_mask_image_plot(
                    inverted_image, 'ur', clip=True,
                ),
            ]
        figure = plotting.create_figure(
            data, title='original and inverted image'
        )
    else:
        figure = str()

    return Output(inverted_image, figure)
Exemplo n.º 15
0
def main(intensity_image, min_value=None, max_value=None, plot=False):
    '''Rescales an image between `min_value` and `max_value`.

    Parameters
    ----------
    intensity_image: numpy.ndarray[Union[numpy.uint8, numpy.uint16]]
        grayscale image
    min: int, optional
        grayscale value to be set as zero in rescaled image (default:
        ``False``)
    max: int, optional
        grayscale value to be set as max in rescaled image (default:
        ``False``)
    plot: bool, optional
        whether a figure should be created (default: ``False``)
    '''

    rescaled_image = np.zeros(shape=intensity_image.shape, dtype=np.int32)

    if min_value is not None:
        logger.info('subtract min_value %s', min_value)
        rescaled_image = intensity_image.astype(np.int32) - min_value
        rescaled_image[rescaled_image < 0] = 0
    else:
        rescaled_image = intensity_image

    if max_value is not None:
        logger.info('set max_value %s', max_value)

        max_for_type = np.iinfo(intensity_image.dtype).max
        rescaled_image = rescaled_image.astype(
            np.float32) / max_value * max_for_type
        rescaled_image[rescaled_image > max_for_type] = max_for_type

    rescaled_image = rescaled_image.astype(intensity_image.dtype)

    if plot:
        logger.info('create plot')
        from jtlib import plotting
        plots = [
            plotting.create_intensity_image_plot(intensity_image,
                                                 'ul',
                                                 clip=True),
            plotting.create_intensity_image_plot(rescaled_image,
                                                 'ur',
                                                 clip=True)
        ]
        figure = plotting.create_figure(plots, title='rescaled image')
    else:
        figure = str()

    return Output(rescaled_image, figure)
Exemplo n.º 16
0
def main(mask_1, mask_2, logical_operation, plot=False):
    '''Combines two binary masks, such that the resulting combined mask
    is ``True`` where either `mask_1` OR `mask_2` is ``True``.
    Parameters
    ----------
    mask_1: numpy.ndarray[Union[numpy.bool, numpy.int32]]
        binary or labeled mask
    mask_2: numpy.ndarray[Union[numpy.bool, numpy.int32]]
        binary or labeled mask
    logical_operation: str
        name of the logical operation to be applied
        (options: ``{"AND", "OR", "EXCLUSIVE_OR"}``)
    Returns
    -------
    jtmodules.combine_objects.Output
    '''
    mask_1 = mask_1 != 0
    mask_2 = mask_2 != 0
    
    if logical_operation == "AND":
        logger.info('Apply logical AND')
        combined_mask = np.logical_and(mask_1, mask_2)
    elif logical_operation == "OR":
        logger.info('Apply logical OR')
        combined_mask = np.logical_or(mask_1, mask_2)
    elif logical_operation == "EXCLUSIVE_OR":
        logger.info('Apply logical XOR')
        combined_mask = np.logical_xor(mask_1, mask_2)
    else:
            raise ValueError(
                'Arugment "logical_operation" can be one of the following:\n'
                '"AND", "OR", "EXCLUSIVE_OR"'
            )

    if plot:
        from jtlib import plotting
        plots = [
            plotting.create_mask_image_plot(mask_1, 'ul'),
            plotting.create_mask_image_plot(mask_2, 'ur'),
            plotting.create_mask_image_plot(combined_mask, 'll')
        ]
        figure = plotting.create_figure(plots, title='combined mask')
    else:
        figure = str()

    return Output(combined_mask, figure)
Exemplo n.º 17
0
def main(image, plot=False):

    if plot:
        logger.info('create plot')
        from jtlib import plotting
        colorscale = plotting.create_colorscale('Spectral',
                                                n=image.max(),
                                                permute=True,
                                                add_background=True)
        data = [
            plotting.create_mask_image_plot(image, 'ul', colorscale=colorscale)
        ]
        figure = plotting.create_figure(
            data, title='LabelImage with "{0}" objects'.format(image.max()))
    else:
        figure = str()

    return Output(figure)
Exemplo n.º 18
0
def main(image, n, plot=False):
    '''Expands objects in `image` by `n` pixels along each axis.

    Parameters
    ----------
    image: numpy.ndarray[numpy.int32]
        2D label image with objects that should be expanded
    n: int
        number of pixels by which each connected component should be expanded
    plot: bool, optional
        whether a plot should be generated (default: ``False``)

    Returns
    -------
    jtmodules.expand_objects.Output
    '''
    # NOTE: code from CellProfiler module "expandorshrink"
    background = image == 0
    distance, (i, j) = distance_transform_edt(background, return_indices=True)
    expanded_image = image.copy()
    mask = background & (distance < n)
    expanded_image[mask] = image[i[mask], j[mask]]

    if plot:
        from jtlib import plotting
        n_objects = len(np.unique(expanded_image)[1:])
        colorscale = plotting.create_colorscale(
            'Spectral', n=n_objects, permute=True, add_background=True
        )
        plots = [
            plotting.create_mask_image_plot(
                image, 'ul', colorscale=colorscale
            ),
            plotting.create_mask_image_plot(
                expanded_image, 'ur', colorscale=colorscale
            )
        ]
        figure = plotting.create_figure(plots, title='expanded image')
    else:
        figure = str()

    return Output(expanded_image, figure)
Exemplo n.º 19
0
def main(mask, connectivity=8, plot=False):
    '''Labels objects in a binary image, i.e. assigns to all pixels of a
    connected component an identifier number that is unique for each object
    in the image.

    Parameters
    ----------
    mask: numpy.ndarray[Union[numpy.bool, numpy.int32]]
        binary image that should labeled
    connectivity: int, optional
        whether a diagonal (``4``) or square (``8``) neighborhood should be
        considered (default: ``8``, options: ``{4, 8}``)
    plot: bool, optional
        whether a plot should be generated (default: ``False``)

    Returns
    -------
    jtmodules.label.Output[Union[numpy.ndarray, str]]

    Note
    ----
    If `mask` is not binary, it will be binarized, i.e. pixels will be set to
    ``True`` if values are greater than zero and ``False`` otherwise.
    '''
    mask = mask > 0
    label_image = label(mask, connectivity)

    n = len(np.unique(label_image)[1:])
    logger.info('identified %d objects', n)

    if plot:
        from jtlib import plotting
        plots = [
            plotting.create_mask_image_plot(mask, 'ul'),
            plotting.create_mask_image_plot(label_image, 'ur')
        ]
        figure = plotting.create_figure(plots, title='Labeled image')
    else:
        figure = str()

    return Output(label_image, figure)
Exemplo n.º 20
0
def main(image, threshold, plot=False):
    '''Thresholds an image by applying a given global threshold level.

    Parameters
    ----------
    image: numpy.ndarray
        image of arbitrary data type that should be thresholded
    threshold: int
        threshold level
    plot: bool, optional
        whether a plot should be generated (default: ``False``)

    Returns
    -------
    jtmodules.threshold_manual.Output[Union[numpy.ndarray, str]]
    '''
    logger.info('threshold image at %d', threshold)
    mask = image > threshold

    if plot:
        logger.info('create plot')
        from jtlib import plotting
        outlines = mh.morph.dilate(mh.labeled.bwperim(mask))
        plots = [
            plotting.create_intensity_overlay_image_plot(
                image, outlines, 'ul'
            ),
            plotting.create_mask_image_plot(mask, 'ur')
        ]
        figure = plotting.create_figure(
            plots, title='thresholded at %s' % threshold
        )
    else:
        figure = str()

    return Output(mask, figure)
Exemplo n.º 21
0
def main(image,
         correction_factor=1,
         min_threshold=None,
         max_threshold=None,
         plot=False):
    '''Thresholds an image by applying an automatically determined global
    threshold level using
    `Otsu's method <https://en.wikipedia.org/wiki/Otsu%27s_method>`_.

    Additional parameters allow correction of the calculated threshold
    level or restricting it to a defined range. This may be useful to prevent
    extreme levels in case the `image` contains artifacts. Setting
    `min_threshold` and `max_threshold` to the same value results in a
    manual thresholding.

    Parameters
    ----------
    image: numpy.ndarray[numpy.uint8 or numpy.unit16]
        grayscale image that should be thresholded
    correction_factor: int, optional
        value by which the calculated threshold level will be multiplied
        (default: ``1``)
    min_threshold: int, optional
        minimal threshold level (default: ``numpy.min(image)``)
    max_threshold: int, optional
        maximal threshold level (default: ``numpy.max(image)``)
    plot: bool, optional
        whether a plot should be generated (default: ``False``)

    Returns
    -------
    jtmodules.threshold_otsu.Output[Union[numpy.ndarray, str]]
    '''
    if max_threshold is None:
        max_threshold = np.max(image)
    logger.debug('set maximal threshold: %d', max_threshold)

    if min_threshold is None:
        min_threshold = np.min(image)
    logger.debug('set minimal threshold: %d', min_threshold)
    logger.debug('set threshold correction factor: %.2f', correction_factor)

    threshold = mh.otsu(image)
    logger.info('calculated threshold level: %d', threshold)

    corr_threshold = threshold * correction_factor
    logger.info('corrected threshold level: %d', corr_threshold)

    if corr_threshold > max_threshold:
        logger.info('set threshold level to maximum: %d', max_threshold)
        corr_threshold = max_threshold
    elif corr_threshold < min_threshold:
        logger.info('set threshold level to minimum: %d', min_threshold)
        corr_threshold = min_threshold

    logger.info('threshold image at %d', corr_threshold)
    mask = image > corr_threshold

    if plot:
        logger.info('create plot')
        from jtlib import plotting
        outlines = mh.morph.dilate(mh.labeled.bwperim(mask))
        plots = [
            plotting.create_intensity_overlay_image_plot(
                image, outlines, 'ul'),
            plotting.create_mask_image_plot(mask, 'ur')
        ]
        figure = plotting.create_figure(plots,
                                        title='thresholded at %s' %
                                        corr_threshold)
    else:
        figure = str()

    return Output(mask, figure)
Exemplo n.º 22
0
def main(image, output_type='16-bit', plot=False):
    '''Converts an arbitrary Image to an IntensityImage

    Parameters
    ----------
    image: numpy.ndarray
        image to be converted
    output_type: numpy.ndarray
        output data type
    plot: bool, optional
        whether a plot should be generated (default: ``False``)

    Returns
    -------
    jtmodules.convert_to_intensity.Output
    '''
    if output_type == '8-bit':
        bit_depth = np.uint8
        max_value = pow(2, 8)
    elif output_type == '16-bit':
        bit_depth = np.uint16
        max_value = pow(2, 16)
    else:
        logger.warn('unrecognised requested output data-type %s, using 16-bit', output_type)
        bit_depth = np.uint16
        max_value = pow(2, 16)

    if image.dtype == np.int32:
        logger.info('Converting label image to intensity image')
        if (np.amax(image) < max_value):
            intensity_image = image.astype(dtype=bit_depth)
        else:
            logger.warn(
                '%d objects in input label image exceeds maximum (%d)',
                np.amax(image),
                max_value
            )
            intensity_image = image
    else:
        logger.info('Converting non-label image to intensity image')
        intensity_image = image.astype(dtype=bit_depth)

    if plot:
        from jtlib import plotting
        n_objects = len(np.unique(image)[1:])
        colorscale = plotting.create_colorscale(
            'Spectral', n=n_objects, permute=True, add_background=True
        )
        plots = [
            plotting.create_mask_image_plot(
                image, 'ul', colorscale=colorscale
            ),
            plotting.create_intensity_image_plot(
                intensity_image, 'ur'
            )
        ]
        figure = plotting.create_figure(plots, title='convert_to_intensity_image')
    else:
        figure = str()

    return Output(intensity_image, figure)
Exemplo n.º 23
0
def main(mask, feature, lower_threshold=None, upper_threshold=None, plot=False):
    '''Filters objects (connected components) based on the specified
    value range for a given `feature`.

    Parameters
    ----------
    mask: numpy.ndarray[Union[numpy.bool, numpy.int32]]
        image that should be filtered
    feature: str
        name of the feature based on which the image should be filtered
        (options: ``{"area", "eccentricity", "circularity", "convecity"}``)
    lower_threshold:
        minimal `feature` value objects must have
        (default: ``None``; type depends on the chosen `feature`)
    upper_threshold:
        maximal `feature` value objects must have
        (default: ``None``; type depends on the chosen `feature`)
    plot: bool, optional
        whether a plot should be generated (default: ``False``)

    Returns
    -------
    jtmodules.filter_objects.Output

    Raises
    ------
    ValueError
        when both `lower_threshold` and `upper_threshold` are ``None``
    ValueError
        when value of `feature` is not one of the supported features

    '''
    if lower_threshold is None and upper_threshold is None:
        raise ValueError(
            'Argument "lower_threshold" or "upper_threshold" must be provided. '
        )
    if feature not in SUPPORTED_FEATURES:
        raise ValueError(
            'Argument "feature" must be one of the following: "%s".'
            % '", "'.join(SUPPORTED_FEATURES)
        )

    name = 'Morphology_{0}'.format(feature.capitalize())

    labeled_image = mh.label(mask > 0)[0]
    f = Morphology(labeled_image)
    measurement = f.extract()[name]
    values = measurement.values

    feature_image = create_feature_image(values, labeled_image)
    if not measurement.empty:
        if lower_threshold is None:
            lower_threshold = np.min(values)
        if upper_threshold is None:
            upper_threshold = np.max(values)
        logger.info(
            'keep objects with "%s" values in the range [%d, %d]',
            feature, lower_threshold, upper_threshold
        )

        condition_image = np.logical_or(
            feature_image < lower_threshold, feature_image > upper_threshold
        )
        filtered_mask = labeled_image.copy()
        filtered_mask[condition_image] = 0
    else:
        logger.warn('no objects detected in image')
        filtered_mask = labeled_image

    mh.labeled.relabel(filtered_mask, inplace=True)

    if plot:
        from jtlib import plotting
        plots = [
            plotting.create_mask_image_plot(mask, 'ul'),
            plotting.create_float_image_plot(feature_image, 'ur'),
            plotting.create_mask_image_plot(filtered_mask, 'll'),
        ]
        n_removed = (
            len(np.unique(labeled_image)) - len(np.unique(filtered_mask))
        )
        figure = plotting.create_figure(
            plots,
            title='Filtered for feature "{0}": {1} objects removed'.format(
                feature, n_removed
            )
        )
    else:
        figure = str()

    return Output(filtered_mask, figure)
Exemplo n.º 24
0
def main(mask,
         intensity_image,
         min_area,
         max_area,
         min_cut_area,
         max_circularity,
         max_convexity,
         plot=False,
         selection_test_mode=False):
    '''Detects clumps in `mask` given criteria provided by the user
    and cuts them along the borders of watershed regions, which are determined
    based on the distance transform of `mask`.

    Parameters
    ----------
    mask: numpy.ndarray[Union[numpy.int32, numpy.bool]]
        2D binary or labele image encoding potential clumps
    intensity_image: numpy.ndarray[numpy.uint8 or numpy.uint16]
        2D grayscale image with intensity values of the objects that should
        be detected
    min_area: int
        minimal area an object must have to be considered a clump
    max_area: int
        maximal area an object can have to be considered a clump
    min_cut_area: int
        minimal area an object must have
        (useful to prevent cuts that would result in too small objects)
    max_circularity: float
        maximal circularity an object can have to be considerd a clump
    max_convexity: float
        maximal convexity an object can have to be considerd a clump
    plot: bool, optional
        whether a plot should be generated
    selection_test_mode: bool, optional
        whether, instead of the normal plot, heatmaps should be generated that
        display values of the selection criteria *area*, *circularity* and
        *convexity* for each individual object in `mask` as well as
        the selected "clumps" based on the criteria provided by the user

    Returns
    -------
    jtmodules.separate_clumps.Output
    '''

    separated_mask = separate_clumped_objects(mask, min_cut_area, min_area,
                                              max_area, max_circularity,
                                              max_convexity)

    if plot:
        from jtlib import plotting
        if selection_test_mode:
            logger.info('create plot for selection test mode')
            labeled_mask, n_objects = mh.label(mask)
            f = Morphology(labeled_mask)
            values = f.extract()
            area_img = create_feature_image(values['Morphology_Area'].values,
                                            labeled_mask)
            convexity_img = create_feature_image(
                values['Morphology_Convexity'].values, labeled_mask)
            circularity_img = create_feature_image(
                values['Morphology_Circularity'].values, labeled_mask)
            area_colorscale = plotting.create_colorscale(
                'Greens',
                n_objects,
                add_background=True,
                background_color='white')
            circularity_colorscale = plotting.create_colorscale(
                'Blues',
                n_objects,
                add_background=True,
                background_color='white')
            convexity_colorscale = plotting.create_colorscale(
                'Reds',
                n_objects,
                add_background=True,
                background_color='white')
            plots = [
                plotting.create_float_image_plot(area_img,
                                                 'ul',
                                                 colorscale=area_colorscale),
                plotting.create_float_image_plot(
                    convexity_img, 'ur', colorscale=convexity_colorscale),
                plotting.create_float_image_plot(
                    circularity_img, 'll', colorscale=circularity_colorscale),
                plotting.create_mask_image_plot(clumps_mask, 'lr'),
            ]
            figure = plotting.create_figure(
                plots,
                title=('Selection criteria: "area" (green), "convexity" (red) '
                       'and "circularity" (blue)'))
        else:
            logger.info('create plot')

            cut_mask = (mask > 0) - (separated_mask > 0)
            clumps_mask = np.zeros(mask.shape, bool)
            initial_objects_label_image, n_initial_objects = mh.label(mask > 0)
            for i in range(1, n_initial_objects + 1):
                index = initial_objects_label_image == i
                if len(np.unique(separated_mask[index])) > 1:
                    clumps_mask[index] = True

            n_objects = len(np.unique(separated_mask[separated_mask > 0]))
            colorscale = plotting.create_colorscale('Spectral',
                                                    n=n_objects,
                                                    permute=True,
                                                    add_background=True)
            outlines = mh.morph.dilate(mh.labeled.bwperim(separated_mask > 0))
            cutlines = mh.morph.dilate(mh.labeled.bwperim(cut_mask))
            plots = [
                plotting.create_mask_image_plot(separated_mask,
                                                'ul',
                                                colorscale=colorscale),
                plotting.create_intensity_overlay_image_plot(
                    intensity_image, outlines, 'ur'),
                plotting.create_mask_overlay_image_plot(
                    clumps_mask, cutlines, 'll')
            ]
            figure = plotting.create_figure(plots, title='separated clumps')
    else:
        figure = str()

    return Output(separated_mask, figure)
def main(image,
         mask,
         threshold=150,
         bead_size=2,
         superpixel_size=4,
         close_surface=False,
         close_disc_size=8,
         plot=False):
    '''Converts an image stack with labelled cell surface to a cell
    `volume` image

    Parameters
    ----------
    image: numpy.ndarray[Union[numpy.uint8, numpy.uint16]]
        grayscale image in which beads should be detected (3D)
    mask: numpy.ndarray[Union[numpy.int32, numpy.bool]]
        binary or labeled image of cell segmentation (2D)
    threshold: int, optional
        intensity of bead (default: ``150``)
    bead_size: int, optional
        minimal size of bead (default: ``2``)
    superpixel_size: int, optional
        size of superpixels for searching the 3D position of a bead
    close_surface: bool, optional
        whether the interpolated surface should be morphologically closed
    close_disc_size: int, optional
        size in pixels of the disc used to morphologically close the
        interpolated surface
    plot: bool, optional
        whether a plot should be generated (default: ``False``)

    Returns
    -------
    jtmodules.generate_volume_image.Output
    '''

    n_slices = image.shape[-1]
    logger.debug('input image has size %d in last dimension', n_slices)

    logger.debug('mask beads inside cell')
    beads_outside_cell = np.copy(image)
    for iz in range(n_slices):
        beads_outside_cell[mask > 0, iz] = 0

    logger.debug('search for 3D position of beads outside cell')
    slide = np.argmax(beads_outside_cell, axis=2)
    slide[slide > np.percentile(slide[mask == 0], 20)] = 0

    logger.debug('determine surface of slide')
    slide_coordinates = array_to_coordinate_list(slide)
    bottom_surface = fit_plane(
        subsample_coordinate_list(slide_coordinates, 2000))

    logger.debug('detect_beads in 2D')
    mip = np.max(image, axis=-1)
    try:
        # TODO: use LOG filter???
        beads, beads_centroids = detect_blobs(image=mip,
                                              mask=np.invert(mask > 0),
                                              threshold=threshold,
                                              min_area=bead_size)
    except:
        logger.warn('detect_blobs failed, returning empty volume image')
        volume_image = np.zeros(shape=mask.shape, dtype=image.dtype)
        figure = str()
        return Output(volume_image, figure)

    n_beads = np.count_nonzero(beads_centroids)
    logger.info('found %d beads on cells', n_beads)

    if n_beads == 0:
        logger.warn('empty volume image')
        volume_image = np.zeros(shape=mask.shape, dtype=image.dtype)
    else:
        logger.debug('locate beads in 3D')
        beads_coords_3D = locate_in_3D(image=image,
                                       mask=beads_centroids,
                                       bin_size=superpixel_size)

        logger.info('interpolate cell surface')
        volume_image = interpolate_surface(coords=beads_coords_3D,
                                           output_shape=np.shape(image[:, :,
                                                                       1]),
                                           method='linear')

        volume_image = volume_image.astype(image.dtype)

        if (close_surface is True):
            import mahotas as mh
            logger.info('morphological closing of cell surface')
            volume_image = mh.close(volume_image, Bc=mh.disk(close_disc_size))
        volume_image[mask == 0] = 0

    if plot:
        logger.debug('convert bottom surface plane to image for plotting')
        bottom_surface_image = np.zeros(slide.shape, dtype=np.uint8)
        for ix in range(slide.shape[0]):
            for iy in range(slide.shape[1]):
                bottom_surface_image[ix, iy] = plane(ix, iy, bottom_surface.x)

        logger.info('create plot')
        from jtlib import plotting
        plots = [
            plotting.create_intensity_image_plot(mip, 'ul', clip=True),
            plotting.create_intensity_image_plot(bottom_surface_image,
                                                 'll',
                                                 clip=True),
            plotting.create_intensity_image_plot(volume_image, 'ur', clip=True)
        ]
        figure = plotting.create_figure(plots,
                                        title='Convert stack to volume image')
    else:
        figure = str()

    return Output(volume_image, figure)
Exemplo n.º 26
0
def main(image,
         method,
         kernel_size,
         constant=0,
         min_threshold=None,
         max_threshold=None,
         plot=False):
    '''Thresholds an image with a locally adaptive threshold method.

    Parameters
    ----------
    image: numpy.ndarray
        grayscale image that should be thresholded
    method: str
        thresholding method (options: ``{"crosscorr", "niblack"}``)
    kernel_size: int
        size of the neighbourhood region that's used to calculate the threshold
        value at each pixel position (must be an odd number)
    constant: Union[float, int], optional
        depends on `method`; in case of ``"crosscorr"`` method the constant
        is subtracted from the computed weighted sum per neighbourhood region
        and in case of ``"niblack"`` the constant is multiplied by the
        standard deviation and this term is then subtracted from the mean
        computed per neighbourhood region
    min_threshold: int, optional
        minimal threshold level (default: ``numpy.min(image)``)
    max_threshold: int, optional
        maximal threshold level (default: ``numpy.max(image)``)
    plot: bool, optional
        whether a plot should be generated (default: ``False``)

    Returns
    -------
    jtmodules.threshold_adaptive.Output

    Raises
    ------
    ValueError
        when `kernel_size` is not an odd number or when `method` is not valid

    Note
    ----
    Typically requires prior filtering to reduce noise in the image.

    References
    ----------
    .. [1] Niblack, W. 1986: An introduction to Digital Image Processing, Prentice-Hall.
    '''
    if kernel_size % 2 == 0:
        raise ValueError('Argument "kernel_size" must be an odd integer.')
    logger.debug('set kernel size: %d', kernel_size)

    if max_threshold is None:
        max_threshold = np.max(image)
    logger.debug('set maximal threshold: %d', max_threshold)

    if min_threshold is None:
        min_threshold = np.min(image)
    logger.debug('set minimal threshold: %d', min_threshold)

    logger.debug('map image intensities to 8-bit range')
    image_8bit = rescale_to_8bit(image, upper=99.99)

    logger.info('threshold image')
    if method == 'crosscorr':
        thresh_image = cv2.adaptiveThreshold(
            image_8bit,
            maxValue=255,
            adaptiveMethod=cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
            thresholdType=cv2.THRESH_BINARY,
            blockSize=kernel_size,
            C=int(constant))
    elif method == 'niblack':
        thresh_image = cv2.ximgproc.niBlackThreshold(image_8bit,
                                                     maxValue=255,
                                                     type=cv2.THRESH_BINARY,
                                                     blockSize=kernel_size,
                                                     delta=constant)
    else:
        raise ValueError('Arugment "method" can be one of the following:\n'
                         '"crosscorr" or "niblack"')
    # OpenCV treats masks as unsigned integer and not as boolean
    thresh_image = thresh_image > 0

    # Manually fine tune automatic thresholding result
    thresh_image[image < min_threshold] = False
    thresh_image[image > max_threshold] = True

    if plot:
        logger.info('create plot')
        from jtlib import plotting
        outlines = mh.morph.dilate(mh.labeled.bwperim(thresh_image))
        plots = [
            plotting.create_intensity_overlay_image_plot(
                image, outlines, 'ul'),
            plotting.create_mask_image_plot(thresh_image, 'ur')
        ]
        figure = plotting.create_figure(
            plots,
            title='thresholded adaptively with kernel size: %d' % kernel_size)
    else:
        figure = str()

    return Output(thresh_image, figure)
Exemplo n.º 27
0
def main(image, mask, threshold=150, bead_size=2, superpixel_size=4,
         close_surface=False, close_disc_size=8, plot=False):
    '''Converts an image stack with labelled cell surface to a cell
    `volume` image

    Parameters
    ----------
    image: numpy.ndarray[Union[numpy.uint8, numpy.uint16]]
        grayscale image in which beads should be detected (3D)
    mask: numpy.ndarray[Union[numpy.int32, numpy.bool]]
        binary or labeled image of cell segmentation (2D)
    threshold: int, optional
        intensity of bead (default: ``150``)
    bead_size: int, optional
        minimal size of bead (default: ``2``)
    superpixel_size: int, optional
        size of superpixels for searching the 3D position of a bead
    close_surface: bool, optional
        whether the interpolated surface should be morphologically closed
    close_disc_size: int, optional
        size in pixels of the disc used to morphologically close the
        interpolated surface
    plot: bool, optional
        whether a plot should be generated (default: ``False``)

    Returns
    -------
    jtmodules.generate_volume_image.Output
    '''

    n_slices = image.shape[-1]
    logger.debug('input image has size %d in last dimension', n_slices)

    logger.debug('mask beads inside cell')
    beads_outside_cell = np.copy(image)
    for iz in range(n_slices):
        beads_outside_cell[mask > 0, iz] = 0

    logger.debug('search for 3D position of beads outside cell')
    slide = np.argmax(beads_outside_cell, axis=2)
    slide[slide > np.percentile(slide[mask == 0], 20)] = 0

    logger.debug('determine surface of slide')
    slide_coordinates = array_to_coordinate_list(slide)
    bottom_surface = fit_plane(subsample_coordinate_list(
        slide_coordinates, 2000)
    )

    logger.debug('detect_beads in 2D')
    mip = np.max(image, axis=-1)
    try:
        # TODO: use LOG filter???
        beads, beads_centroids = detect_blobs(
            image=mip, mask=np.invert(mask > 0), threshold=threshold,
            min_area=bead_size
        )
    except:
        logger.warn('detect_blobs failed, returning empty volume image')
        volume_image = np.zeros(shape=mask.shape, dtype=image.dtype)
        figure = str()
        return Output(volume_image, figure)

    n_beads = np.count_nonzero(beads_centroids)
    logger.info('found %d beads on cells', n_beads)

    if n_beads == 0:
        logger.warn('empty volume image')
        volume_image = np.zeros(shape=mask.shape, dtype=image.dtype)
    else:
        logger.debug('locate beads in 3D')
        beads_coords_3D = locate_in_3D(
            image=image, mask=beads_centroids,
            bin_size=superpixel_size
        )

        logger.info('interpolate cell surface')
        volume_image = interpolate_surface(
            coords=beads_coords_3D,
            output_shape=np.shape(image[:, :, 1]),
            method='linear'
        )

        volume_image = volume_image.astype(image.dtype)

        if (close_surface is True):
            import mahotas as mh
            logger.info('morphological closing of cell surface')
            volume_image = mh.close(volume_image,
                                    Bc=mh.disk(close_disc_size))
        volume_image[mask == 0] = 0

    if plot:
        logger.debug('convert bottom surface plane to image for plotting')
        bottom_surface_image = np.zeros(slide.shape, dtype=np.uint8)
        for ix in range(slide.shape[0]):
            for iy in range(slide.shape[1]):
                bottom_surface_image[ix, iy] = plane(
                    ix, iy, bottom_surface.x)

        logger.info('create plot')
        from jtlib import plotting
        plots = [
            plotting.create_intensity_image_plot(
                mip, 'ul', clip=True
            ),
            plotting.create_intensity_image_plot(
                bottom_surface_image, 'll', clip=True
            ),
            plotting.create_intensity_image_plot(
                volume_image, 'ur', clip=True
            )
        ]
        figure = plotting.create_figure(
            plots, title='Convert stack to volume image'
        )
    else:
        figure = str()

    return Output(volume_image, figure)
Exemplo n.º 28
0
def main(primary_label_image, intensity_image, contrast_threshold,
        min_threshold=None, max_threshold=None, plot=False):
    '''Detects secondary objects in an image by expanding the primary objects
    encoded in `primary_label_image`. The outlines of secondary objects are
    determined based on the watershed transform of `intensity_image` using the
    primary objects in `primary_label_image` as seeds.

    Parameters
    ----------
    primary_label_image: numpy.ndarray[numpy.int32]
        2D labeled array encoding primary objects, which serve as seeds for
        watershed transform
    intensity_image: numpy.ndarray[numpy.uint8 or numpy.uint16]
        2D grayscale array that serves as gradient for watershed transform;
        optimally this image is enhanced with a low-pass filter
    contrast_threshold: int
        contrast threshold for automatic separation of forground from background
        based on locally adaptive thresholding (when ``0`` threshold defaults
        to `min_threshold` manual thresholding)
    min_threshold: int, optional
        minimal foreground value; pixels below `min_threshold` are considered
        background
    max_threshold: int, optional
        maximal foreground value; pixels above `max_threshold` are considered
        foreground
    plot: bool, optional
        whether a plot should be generated

    Returns
    -------
    jtmodules.segment_secondary.Output

    Note
    ----
    Setting `min_threshold` and `max_threshold` to the same value reduces
    to manual thresholding.
    '''
    if np.any(primary_label_image == 0):
        has_background = True
    else:
        has_background = False

    if not has_background:
        secondary_label_image = primary_label_image
    else:
        # A simple, fixed threshold doesn't work for SE stains. Therefore, we
        # use adaptive thresholding to determine background regions,
        # i.e. regions in the intensity_image that should not be covered by
        # secondary objects.
        n_objects = len(np.unique(primary_label_image[1:]))
        logger.info(
            'primary label image has %d objects',
            n_objects - 1
        )
        # SB: Added a catch for images with no primary objects
        # note that background is an 'object'
        if n_objects > 1:
            # TODO: consider using contrast_treshold as input parameter
            background_mask = mh.thresholding.bernsen(
                intensity_image, 5, contrast_threshold
            )
            if min_threshold is not None:
                logger.info(
                    'set lower threshold level to %d', min_threshold
                )
                background_mask[intensity_image < min_threshold] = True

            if max_threshold is not None:
                logger.info(
                    'set upper threshold level to %d', max_threshold
                )
                background_mask[intensity_image > max_threshold] = False
            # background_mask = mh.morph.open(background_mask)
            background_label_image = mh.label(background_mask)[0]
            background_label_image[background_mask] += n_objects

            logger.info('detect secondary objects via watershed transform')
            secondary_label_image = expand_objects_watershed(
                primary_label_image, background_label_image, intensity_image
            )
        else:
            logger.info('skipping secondary segmentation')
            secondary_label_image = np.zeros(
                primary_label_image.shape, dtype=np.int32
            )

    n_objects = len(np.unique(secondary_label_image)[1:])
    logger.info('identified %d objects', n_objects)

    if plot:
        from jtlib import plotting
        colorscale = plotting.create_colorscale(
            'Spectral', n=n_objects, permute=True, add_background=True
        )
        outlines = mh.morph.dilate(mh.labeled.bwperim(secondary_label_image > 0))
        plots = [
            plotting.create_mask_image_plot(
                primary_label_image, 'ul', colorscale=colorscale
                ),
            plotting.create_mask_image_plot(
                secondary_label_image, 'ur', colorscale=colorscale
            ),
            plotting.create_intensity_overlay_image_plot(
                intensity_image, outlines, 'll'
            )
        ]
        figure = plotting.create_figure(plots, title='secondary objects')
    else:
        figure = str()

    return Output(secondary_label_image, figure)
Exemplo n.º 29
0
def main(image, clipping_mask, plot=False):
    '''Clips a labeled image using another image as a mask, such that
    intersecting pixels/voxels are set to background.

    Parameters
    ----------
    image: numpy.ndarray
        image that should be clipped
    clipping_mask: numpy.ndarray[numpy.int32 or numpy.bool]
        image that should be used as clipping mask
    plot: bool, optional
        whether a plot should be generated (default: ``False``)

    Returns
    -------
    jtmodules.clip_objects.Output

    Raises
    ------
    ValueError
        when `image` and `clipping_mask` don't have the same dimensions
    '''
    if image.shape != clipping_mask.shape:
        raise ValueError(
            '"image" and "clipping_mask" must have the same dimensions'
        )
    clipping_mask = clipping_mask > 0

    clipped_image = image.copy()
    clipped_image[clipping_mask] = 0

    if plot:
        from jtlib import plotting
        if str(image.dtype).startswith('uint'):
            plots = [
                plotting.create_intensity_image_plot(
                    image, 'ul', clip=True
                ),
                plotting.create_mask_image_plot(
                    clipping_mask, 'ur'
                ),
                plotting.create_intensity_image_plot(
                    clipped_image, 'll', clip=True
                )
            ]
        else:
            n_objects = len(np.unique(image)[1:])
            colorscale = plotting.create_colorscale(
                'Spectral', n=n_objects, permute=True, add_background=True
            )
            plots = [
                plotting.create_mask_image_plot(
                    image, 'ul', colorscale=colorscale
                ),
                plotting.create_mask_image_plot(
                    clipping_mask, 'ur'
                ),
                plotting.create_mask_image_plot(
                    clipped_image, 'll', colorscale=colorscale
                )
            ]
        figure = plotting.create_figure(plots, title='clipped image')
    else:
        figure = str()

    return Output(clipped_image, figure)
Exemplo n.º 30
0
def main(image, filter_name, filter_size, plot=False):
    '''Smoothes (blurs) `image`.

    Parameters
    ----------
    image: numpy.ndarray
        grayscale image that should be smoothed
    filter_name: str
        name of the filter kernel that should be applied
        (options: ``{"avarage", "gaussian", "median", "bilateral"}``)
    filter_size: int
        size of the kernel
    plot: bool, optional
        whether a plot should be generated (default: ``False``)

    Returns
    -------
    jtmodules.smooth.Output[Union[numpy.ndarray, str]]

    Raises
    ------
    ValueError
        when `filter_name` is not
        ``"avarage"``, ``"gaussian"``, ``"median"`` or ``"bilateral"``
    '''
    se = np.ones((filter_size, filter_size))
    if filter_name == 'average':
        logger.info('apply "average" filter')
        smoothed_image = mh.mean_filter(image, se)
    elif filter_name == 'gaussian':
        logger.info('apply "gaussian" filter')
        smoothed_image = mh.gaussian_filter(image, filter_size)
    elif filter_name == 'median':
        logger.info('apply "median" filter')
        smoothed_image = mh.median_filter(image, se)
    elif filter_name == 'bilateral':
        smoothed_image = cv2.bilateralFilter(
            image.astype(np.float32), d=0,
            sigmaColor=filter_size, sigmaSpace=filter_size
        ).astype(image.dtype)
    else:
        raise ValueError(
            'Arugment "filter_name" can be one of the following:\n'
            '"average", "gaussian", "median" or "bilateral"'
        )
    smoothed_image = smoothed_image.astype(image.dtype)

    if plot:
        logger.info('create plot')
        from jtlib import plotting
        clip_value = np.percentile(image, 99.99)
        data = [
            plotting.create_intensity_image_plot(
                image, 'ul', clip=True, clip_value=clip_value
            ),
            plotting.create_intensity_image_plot(
                smoothed_image, 'ur', clip=True, clip_value=clip_value
            ),
        ]
        figure = plotting.create_figure(
            data,
            title='Smoothed with "{0}" filter (kernel size: {1})'.format(
                filter_name, filter_size
            )
        )
    else:
        figure = str()

    return Output(smoothed_image, figure)
Exemplo n.º 31
0
def main(image, correction_factor=1, min_threshold=None, max_threshold=None,
        plot=False):
    '''Thresholds an image by applying an automatically determined global
    threshold level using
    `Otsu's method <https://en.wikipedia.org/wiki/Otsu%27s_method>`_.

    Additional parameters allow correction of the calculated threshold
    level or restricting it to a defined range. This may be useful to prevent
    extreme levels in case the `image` contains artifacts. Setting
    `min_threshold` and `max_threshold` to the same value results in a
    manual thresholding.

    Parameters
    ----------
    image: numpy.ndarray[numpy.uint8 or numpy.unit16]
        grayscale image that should be thresholded
    correction_factor: int, optional
        value by which the calculated threshold level will be multiplied
        (default: ``1``)
    min_threshold: int, optional
        minimal threshold level (default: ``numpy.min(image)``)
    max_threshold: int, optional
        maximal threshold level (default: ``numpy.max(image)``)
    plot: bool, optional
        whether a plot should be generated (default: ``False``)

    Returns
    -------
    jtmodules.threshold_otsu.Output[Union[numpy.ndarray, str]]
    '''
    if max_threshold is None:
        max_threshold = np.max(image)
    logger.debug('set maximal threshold: %d', max_threshold)

    if min_threshold is None:
        min_threshold = np.min(image)
    logger.debug('set minimal threshold: %d', min_threshold)
    logger.debug('set threshold correction factor: %.2f', correction_factor)

    threshold = mh.otsu(image)
    logger.info('calculated threshold level: %d', threshold)

    corr_threshold = threshold * correction_factor
    logger.info('corrected threshold level: %d', corr_threshold)

    if corr_threshold > max_threshold:
        logger.info('set threshold level to maximum: %d', max_threshold)
        corr_threshold = max_threshold
    elif corr_threshold < min_threshold:
        logger.info('set threshold level to minimum: %d', min_threshold)
        corr_threshold = min_threshold

    logger.info('threshold image at %d', corr_threshold)
    mask = image > corr_threshold

    if plot:
        logger.info('create plot')
        from jtlib import plotting
        outlines = mh.morph.dilate(mh.labeled.bwperim(mask))
        plots = [
            plotting.create_intensity_overlay_image_plot(
                image, outlines, 'ul'
            ),
            plotting.create_mask_image_plot(mask, 'ur')
        ]
        figure = plotting.create_figure(
            plots, title='thresholded at %s' % corr_threshold
        )
    else:
        figure = str()

    return Output(mask, figure)
Exemplo n.º 32
0
def main(mask, intensity_image, min_area, max_area,
        min_cut_area, max_circularity, max_convexity,
        plot=False, selection_test_mode=False):
    '''Detects clumps in `mask` given criteria provided by the user
    and cuts them along the borders of watershed regions, which are determined
    based on the distance transform of `mask`.

    Parameters
    ----------
    mask: numpy.ndarray[Union[numpy.int32, numpy.bool]]
        2D binary or labele image encoding potential clumps
    intensity_image: numpy.ndarray[numpy.uint8 or numpy.uint16]
        2D grayscale image with intensity values of the objects that should
        be detected
    min_area: int
        minimal area an object must have to be considered a clump
    max_area: int
        maximal area an object can have to be considered a clump
    min_cut_area: int
        minimal area an object must have
        (useful to prevent cuts that would result in too small objects)
    max_circularity: float
        maximal circularity an object can have to be considerd a clump
    max_convexity: float
        maximal convexity an object can have to be considerd a clump
    plot: bool, optional
        whether a plot should be generated
    selection_test_mode: bool, optional
        whether, instead of the normal plot, heatmaps should be generated that
        display values of the selection criteria *area*, *circularity* and
        *convexity* for each individual object in `mask` as well as
        the selected "clumps" based on the criteria provided by the user

    Returns
    -------
    jtmodules.separate_clumps.Output
    '''

    separated_mask = separate_clumped_objects(
        mask, min_cut_area, min_area, max_area,
        max_circularity, max_convexity
    )

    if plot:
        from jtlib import plotting
        if selection_test_mode:
            logger.info('create plot for selection test mode')
            labeled_mask, n_objects = mh.label(mask)
            f = Morphology(labeled_mask)
            values = f.extract()
            area_img = create_feature_image(
                values['Morphology_Area'].values, labeled_mask
            )
            convexity_img = create_feature_image(
                values['Morphology_Convexity'].values, labeled_mask
            )
            circularity_img = create_feature_image(
                values['Morphology_Circularity'].values, labeled_mask
            )
            area_colorscale = plotting.create_colorscale(
                'Greens', n_objects,
                add_background=True, background_color='white'
            )
            circularity_colorscale = plotting.create_colorscale(
                'Blues', n_objects,
                add_background=True, background_color='white'
            )
            convexity_colorscale = plotting.create_colorscale(
                'Reds', n_objects,
                add_background=True, background_color='white'
            )
            plots = [
                plotting.create_float_image_plot(
                    area_img, 'ul', colorscale=area_colorscale
                ),
                plotting.create_float_image_plot(
                    convexity_img, 'ur', colorscale=convexity_colorscale
                ),
                plotting.create_float_image_plot(
                    circularity_img, 'll', colorscale=circularity_colorscale
                ),
                plotting.create_mask_image_plot(
                    clumps_mask, 'lr'
                ),
            ]
            figure = plotting.create_figure(
                plots,
                title=(
                    'Selection criteria: "area" (green), "convexity" (red) '
                    'and "circularity" (blue)'
                )
            )
        else:
            logger.info('create plot')

            cut_mask = (mask > 0) - (separated_mask > 0)
            clumps_mask = np.zeros(mask.shape, bool)
            initial_objects_label_image, n_initial_objects = mh.label(mask > 0)
            for i in range(1, n_initial_objects+1):
                index = initial_objects_label_image == i
                if len(np.unique(separated_mask[index])) > 1:
                    clumps_mask[index] = True

            n_objects = len(np.unique(separated_mask[separated_mask > 0]))
            colorscale = plotting.create_colorscale(
                'Spectral', n=n_objects, permute=True, add_background=True
            )
            outlines = mh.morph.dilate(mh.labeled.bwperim(separated_mask > 0))
            cutlines = mh.morph.dilate(mh.labeled.bwperim(cut_mask))
            plots = [
                plotting.create_mask_image_plot(
                    separated_mask, 'ul', colorscale=colorscale
                ),
                plotting.create_intensity_overlay_image_plot(
                    intensity_image, outlines, 'ur'
                ),
                plotting.create_mask_overlay_image_plot(
                    clumps_mask, cutlines, 'll'
                )
            ]
            figure = plotting.create_figure(
                plots, title='separated clumps'
            )
    else:
        figure = str()

    return Output(separated_mask, figure)
Exemplo n.º 33
0
def main(primary_label_image, intensity_image, contrast_threshold,
        min_threshold=None, max_threshold=None, plot=False):
    '''Detects secondary objects in an image by expanding the primary objects
    encoded in `primary_label_image`. The outlines of secondary objects are
    determined based on the watershed transform of `intensity_image` using the
    primary objects in `primary_label_image` as seeds.

    Parameters
    ----------
    primary_label_image: numpy.ndarray[numpy.int32]
        2D labeled array encoding primary objects, which serve as seeds for
        watershed transform
    intensity_image: numpy.ndarray[numpy.uint8 or numpy.uint16]
        2D grayscale array that serves as gradient for watershed transform;
        optimally this image is enhanced with a low-pass filter
    contrast_threshold: int
        contrast threshold for automatic separation of forground from background
        based on locally adaptive thresholding (when ``0`` threshold defaults
        to `min_threshold` manual thresholding)
    min_threshold: int, optional
        minimal foreground value; pixels below `min_threshold` are considered
        background
    max_threshold: int, optional
        maximal foreground value; pixels above `max_threshold` are considered
        foreground
    plot: bool, optional
        whether a plot should be generated

    Returns
    -------
    jtmodules.segment_secondary.Output

    Note
    ----
    Setting `min_threshold` and `max_threshold` to the same value reduces
    to manual thresholding.
    '''
    if np.any(primary_label_image == 0):
        has_background = True
    else:
        has_background = False

    if not has_background:
        secondary_label_image = primary_label_image
    else:
        # A simple, fixed threshold doesn't work for SE stains. Therefore, we
        # use adaptive thresholding to determine background regions,
        # i.e. regions in the intensity_image that should not be covered by
        # secondary objects.
        n_objects = len(np.unique(primary_label_image[1:]))
        logger.info(
            'primary label image has %d objects',
            n_objects - 1
        )
        # SB: Added a catch for images with no primary objects
        # note that background is an 'object'
        if n_objects > 1:
            # TODO: consider using contrast_treshold as input parameter
            background_mask = mh.thresholding.bernsen(
                intensity_image, 5, contrast_threshold
            )
            if min_threshold is not None:
                logger.info(
                    'set lower threshold level to %d', min_threshold
                )
                background_mask[intensity_image < min_threshold] = True

            if max_threshold is not None:
                logger.info(
                    'set upper threshold level to %d', max_threshold
                )
                background_mask[intensity_image > max_threshold] = False
            # background_mask = mh.morph.open(background_mask)
            background_label_image = mh.label(background_mask)[0]
            background_label_image[background_mask] += n_objects

            logger.info('detect secondary objects via watershed transform')
            secondary_label_image = expand_objects_watershed(
                primary_label_image, background_label_image, intensity_image
            )
        else:
            logger.info('skipping secondary segmentation')
            secondary_label_image = np.zeros(
                primary_label_image.shape, dtype=np.int32
            )

    n_objects = len(np.unique(secondary_label_image)[1:])
    logger.info('identified %d objects', n_objects)

    if plot:
        from jtlib import plotting
        colorscale = plotting.create_colorscale(
            'Spectral', n=n_objects, permute=True, add_background=True
        )
        outlines = mh.morph.dilate(mh.labeled.bwperim(secondary_label_image > 0))
        plots = [
            plotting.create_mask_image_plot(
                primary_label_image, 'ul', colorscale=colorscale
                ),
            plotting.create_mask_image_plot(
                secondary_label_image, 'ur', colorscale=colorscale
            ),
            plotting.create_intensity_overlay_image_plot(
                intensity_image, outlines, 'll'
            )
        ]
        figure = plotting.create_figure(plots, title='secondary objects')
    else:
        figure = str()

    return Output(secondary_label_image, figure)
Exemplo n.º 34
0
def main(image, mask, threshold=25,
         mean_size=6, min_size=10,
         filter_type='log_2d',
         minimum_bead_intensity=150,
         z_step=0.333, pixel_size=0.1625,
         alpha=0, plot=False):
    '''Converts an image stack with labelled cell surface to a cell
    `volume` image

    Parameters
    ----------
    image: numpy.ndarray[Union[numpy.uint8, numpy.uint16]]
        grayscale image in which beads should be detected (3D)
    mask: numpy.ndarray[Union[numpy.int32, numpy.bool]]
        binary or labeled image of cell segmentation (2D)
    threshold: int, optional
        intensity of bead in filtered image (default: ``25``)
    mean_size: int, optional
        mean size of bead (default: ``6``)
    min_size: int, optional
        minimal number of connected voxels per bead (default: ``10``)
    filter_type: str, optional
        filter used to emphasise the beads in 3D
        (options: ``log_2d`` (default) or ``log_3d``)
    minimum_bead_intensity: int, optional
        minimum intensity in the original image of an identified bead
        centre. Use to filter low intensity beads.
    z_step: float, optional
        distance between consecutive z-planes (um) (default: ``0.333``)
    pixel_size: float, optional
        size of pixel (um) (default: ``0.1625``)
    alpha: float, optional
        value of parameter for 3D alpha shape calculation
        (default: ``0``, no vertex filtering performed)
    plot: bool, optional
        whether a plot should be generated (default: ``False``)

    Returns
    -------
    jtmodules.generate_volume_image.Output
    '''

    # Check that there are cells identified in image
    if (np.max(mask) > 0):
        volume_image_calculated = True

        n_slices = image.shape[-1]
        logger.debug('input image has z-dimension %d', n_slices)

        # Remove high intensity pixels
        detect_image = image.copy()
        p = np.percentile(detect_image, 99.9)
        detect_image[detect_image > p] = p

        # Perform LoG filtering in 3D to emphasise beads
        if filter_type == 'log_2d':
            logger.info('using stacked 2D LoG filter to detect beads')
            f = -1 * log_2d(size=mean_size, sigma=float(mean_size - 1) / 3)
            filt = np.stack([f for _ in range(mean_size)], axis=2)

        elif filter_type == 'log_3d':
            logger.info('using 3D LoG filter to detect beads')
            filt = -1 * log_3d(mean_size, (float(mean_size - 1) / 3,
                                           float(mean_size - 1) / 3,
                                           4 * float(mean_size - 1) / 3))
        else:
            logger.info('using unfiltered image to detect beads')

        if filter_type == 'log_2d' or filter_type == 'log_3d':
            logger.debug('convolve image with filter kernel')
            detect_image = mh.convolve(detect_image.astype(float), filt)
            detect_image[detect_image < 0] = 0

        logger.debug('threshold beads')
        labeled_beads, n_labels = mh.label(detect_image > threshold)
        logger.info('detected %d beads', n_labels)

        logger.debug('remove small beads')
        sizes = mh.labeled.labeled_size(labeled_beads)
        too_small = np.where(sizes < min_size)
        labeled_beads = mh.labeled.remove_regions(labeled_beads, too_small)
        mh.labeled.relabel(labeled_beads, inplace=True)
        logger.info(
            '%d beads remain after removing small beads', np.max(labeled_beads)
        )

        logger.debug('localise beads in 3D')
        localised_beads = localise_bead_maxima_3D(
            image, labeled_beads, minimum_bead_intensity
        )

        logger.debug('mask beads inside cells')
        '''NOTE: localised_beads.coordinate image is used only for beads
        outside cells and can therefore be modified here. For beads
        inside cells, localised_beads.coordinates are used instead.
        '''
        # expand mask to ensure slide-beads are well away from cells
        slide = localised_beads.coordinate_image
        expand_mask = mh.dilate(
            A=mask > 0,
            Bc=np.ones([25,25], bool)
        )
        slide[expand_mask] = 0

        logger.debug('determine coordinates of slide surface')
        try:
            bottom_surface = slide_surface_params(slide)
        except InvalidSlideError:
            logger.error('slide surface calculation is invalid' +
                         ' returning empty volume image')
            volume_image = np.zeros(shape=image[:,:,0].shape,
                                    dtype=image.dtype)
            figure = str()
            return Output(volume_image, figure)

        logger.debug('subtract slide surface to get absolute bead coordinates')
        bead_coords_abs = []
        for i in range(len(localised_beads.coordinates)):
            bead_height = (
                localised_beads.coordinates[i][2] -
                plane(localised_beads.coordinates[i][0],
                      localised_beads.coordinates[i][1],
                      bottom_surface.x)
            )
            if bead_height > 0:
                bead_coords_abs.append(
                    (localised_beads.coordinates[i][0],
                     localised_beads.coordinates[i][1],
                     bead_height)
                )

        logger.debug('convert absolute bead coordinates to image')
        coord_image_abs = coordinate_list_to_array(
            bead_coords_abs, shape=image[:,:,0].shape, dtype=np.float32
        )

        filtered_coords_global = filter_vertices_per_cell_alpha_shape(
            coord_image_abs=coord_image_abs,
            mask=mask,
            alpha=alpha,
            z_step=z_step,
            pixel_size=pixel_size
        )

        logger.info('interpolate cell surface')
        volume_image = interpolate_surface(
            coords=np.asarray(filtered_coords_global, dtype=np.uint16),
            output_shape=np.shape(image[:,:,0]),
            method='linear'
        )

        volume_image = volume_image.astype(image.dtype)

        logger.debug('set regions outside mask to zero')
        volume_image[mask == 0] = 0

    else:
        logger.warn(
            'no objects in input mask, skipping cell volume calculation.'
        )
        volume_image_calculated = False
        volume_image = np.zeros(shape=image[:,:,0].shape, dtype=image.dtype)

    if (plot and volume_image_calculated):
        logger.debug('convert bottom surface plane to image for plotting')
        dt = np.dtype(float)
        bottom_surface_image = np.zeros(slide.shape, dtype=dt)
        for ix in range(slide.shape[0]):
            for iy in range(slide.shape[1]):
                bottom_surface_image[ix, iy] = plane(
                    ix, iy, bottom_surface.x)
        logger.info('create plot')
        from jtlib import plotting
        plots = [
            plotting.create_intensity_image_plot(
                np.max(image, axis=-1), 'ul', clip=True
            ),
            plotting.create_float_image_plot(
                bottom_surface_image, 'll', clip=True
            ),
            plotting.create_intensity_image_plot(
                volume_image, 'ur', clip=True
            )

        ]
        figure = plotting.create_figure(
            plots, title='Convert stack to volume image'
        )
    else:
        figure = str()

    return Output(volume_image, figure)
Exemplo n.º 35
0
def main(mask,
         intensity_image,
         min_area,
         max_area,
         min_cut_area,
         max_circularity,
         max_convexity,
         plot=False,
         selection_test_mode=False,
         selection_test_show_remaining=False,
         trimming=True):
    '''Detects clumps in `mask` given criteria provided by the user
    and cuts them along the borders of watershed regions, which are determined
    based on the distance transform of `mask`.

    Parameters
    ----------
    mask: numpy.ndarray[Union[numpy.int32, numpy.bool]]
        2D binary or labele image encoding potential clumps
    intensity_image: numpy.ndarray[numpy.uint8 or numpy.uint16]
        2D grayscale image with intensity values of the objects that should
        be detected
    min_area: int
        minimal area an object must have to be considered a clump
    max_area: int
        maximal area an object can have to be considered a clump
    min_cut_area: int
        minimal area an object must have
        (useful to prevent cuts that would result in too small objects)
    max_circularity: float
        maximal circularity an object can have to be considerd a clump
    max_convexity: float
        maximal convexity an object can have to be considerd a clump
    plot: bool, optional
        whether a plot should be generated
    selection_test_mode: bool, optional
        whether, instead of the normal plot, heatmaps should be generated that
        display values of the selection criteria *area*, *circularity* and
        *convexity* for each individual object in `mask` as well as
        the selected "clumps" based on the criteria provided by the user
    selection_test_show_remaining: bool, optional
        whether the selection test plot should be made on the remaining image
        after the cuts were performed (helps to see why some objects were not
        cut, especially if there are complicated clumps that require multiple
        cuts). Defaults to false, thus showing the values in the original image
    trimming: bool
        some cuts may create a tiny third object. If this boolean is true, 
        tertiary objects < trimming_threshold (10) pixels will be removed

    Returns
    -------
    jtmodules.separate_clumps.Output
    '''

    separated_label_image = separate_clumped_objects(mask,
                                                     min_cut_area,
                                                     min_area,
                                                     max_area,
                                                     max_circularity,
                                                     max_convexity,
                                                     allow_trimming=trimming)

    if plot:
        from jtlib import plotting

        clumps_mask = np.zeros(mask.shape, bool)
        initial_objects_label_image, n_initial_objects = mh.label(mask > 0)
        for n in range(1, n_initial_objects + 1):
            obj = (initial_objects_label_image == n)
            if len(np.unique(separated_label_image[obj])) > 1:
                clumps_mask[obj] = True

        cut_mask = (mask > 0) & (separated_label_image == 0)
        cutlines = mh.morph.dilate(mh.labeled.bwperim(cut_mask))

        if selection_test_mode:
            logger.info('create plot for selection test mode')

            # Check if selection_test_show_remaining is active
            # If so, show values on processed image, not original
            if selection_test_show_remaining:
                labeled_mask, n_objects = mh.label(separated_label_image > 0)
                logger.info('Selection test mode plot with processed image')
            else:
                labeled_mask, n_objects = mh.label(mask)
            f = Morphology(labeled_mask)
            values = f.extract()
            area_img = create_feature_image(values['Morphology_Area'].values,
                                            labeled_mask)
            convexity_img = create_feature_image(
                values['Morphology_Convexity'].values, labeled_mask)
            circularity_img = create_feature_image(
                values['Morphology_Circularity'].values, labeled_mask)
            plots = [
                plotting.create_float_image_plot(area_img, 'ul'),
                plotting.create_float_image_plot(convexity_img, 'ur'),
                plotting.create_float_image_plot(circularity_img, 'll'),
                plotting.create_mask_overlay_image_plot(
                    clumps_mask, cutlines, 'lr'),
            ]
            figure = plotting.create_figure(
                plots,
                title=('Selection criteria:'
                       ' "area" (top left),'
                       ' "convexity" (top-right),'
                       ' and "circularity" (bottom-left);'
                       ' cuts made (bottom right).'))
        else:
            logger.info('create plot')

            n_objects = len(
                np.unique(separated_label_image[separated_label_image > 0]))
            colorscale = plotting.create_colorscale('Spectral',
                                                    n=n_objects,
                                                    permute=True,
                                                    add_background=True)
            outlines = mh.morph.dilate(
                mh.labeled.bwperim(separated_label_image > 0))
            plots = [
                plotting.create_mask_image_plot(separated_label_image,
                                                'ul',
                                                colorscale=colorscale),
                plotting.create_intensity_overlay_image_plot(
                    intensity_image, outlines, 'ur'),
                plotting.create_mask_overlay_image_plot(
                    clumps_mask, cutlines, 'll')
            ]
            figure = plotting.create_figure(plots, title='separated clumps')
    else:
        figure = str()

    return Output(separated_label_image, figure)
Exemplo n.º 36
0
def main(image,
         mask,
         threshold=1,
         min_area=3,
         mean_area=5,
         max_area=1000,
         clip_percentile=99.999,
         plot=False):
    '''Detects blobs in `image` using an implementation of
    `SExtractor <http://www.astromatic.net/software/sextractor>`_ [1].
    The `image` is first convolved with a Laplacian of Gaussian filter of size
    `mean_area` to enhance blob-like structures. The enhanced image is
    then thresholded at `threshold` level and connected pixel components are
    subsequently deplended.

    Parameters
    ----------
    image: numpy.ndarray[Union[numpy.uint8, numpy.uint16]]
        grayscale image in which blobs should be detected
    mask: numpy.ndarray[Union[numpy.int32, numpy.bool]]
        binary or labeled image that specifies pixel regions of interest
        in which blobs should be detected
    threshold: int, optional
        threshold level for pixel values in the convolved image
        (default: ``1``)
    min_area: int, optional
        minimal size a blob is allowed to have (default: ``3``)
    mean_area: int, optional
        estimated average size of a blob (default: ``5``)
    max_area: int, optional
        maximal size a blob is allowed to have to be subject to deblending;
        no attempt will be made to deblend blobs larger than `max_area`
        (default: ``100``)
    clip_percentile: float, optional
        clip intensity values in `image` above the given percentile; this may
        help in attenuating artifacts
    plot: bool, optional
        whether a plot should be generated (default: ``False``)

    Returns
    -------
    jtmodules.detect_blobs.Output[Union[numpy.ndarray, str]]

    References
    ----------
    .. [1] Bertin, E. & Arnouts, S. 1996: SExtractor: Software for source
    extraction, Astronomy & Astrophysics Supplement 317, 393
    '''

    logger.info('detect blobs above threshold {0}'.format(threshold))

    detect_image = image.copy()

    p = np.percentile(image, clip_percentile)
    detect_image[image > p] = p

    # Enhance the image for blob detection by convoling it with a LOG filter
    f = -1 * log_2d(size=mean_area, sigma=float(mean_area - 1) / 3)
    detect_image = mh.convolve(detect_image.astype(float), f)
    detect_image[detect_image < 0] = 0

    # Mask regions of too big blobs
    pre_blobs = mh.label(detect_image > threshold)[0]
    bad_blobs, n_bad = mh.labeled.filter_labeled(pre_blobs, min_size=max_area)
    logger.info(
        'remove {0} blobs because they are bigger than {1} pixels'.format(
            n_bad, max_area))
    detect_mask = np.invert(mask > 0)
    detect_mask[bad_blobs > 0] = True
    detect_image[bad_blobs > 0] = 0

    logger.info('deblend blobs')
    blobs, centroids = detect_blobs(image=detect_image,
                                    mask=detect_mask,
                                    threshold=threshold,
                                    min_area=min_area)

    n = len(np.unique(blobs[blobs > 0]))

    logger.info('{0} blobs detected'.format(n))

    if plot:
        logger.info('create plot')
        from jtlib import plotting

        colorscale = plotting.create_colorscale('Spectral',
                                                n=n,
                                                permute=True,
                                                add_background=True)
        plots = [
            plotting.create_float_image_plot(detect_image, 'ul', clip=True),
            plotting.create_mask_image_plot(blobs, 'ur', colorscale=colorscale)
        ]
        figure = plotting.create_figure(
            plots,
            title=('detected #{0} blobs above threshold {1}'
                   ' in LOG filtered image'.format(n, threshold)))
    else:
        figure = str()

    return Output(centroids, blobs, figure)
Exemplo n.º 37
0
def main(image, method, kernel_size, constant=0,
        min_threshold=None, max_threshold=None, plot=False):
    '''Thresholds an image with a locally adaptive threshold method.

    Parameters
    ----------
    image: numpy.ndarray
        grayscale image that should be thresholded
    method: str
        thresholding method (options: ``{"crosscorr", "niblack"}``)
    kernel_size: int
        size of the neighbourhood region that's used to calculate the threshold
        value at each pixel position (must be an odd number)
    constant: Union[float, int], optional
        depends on `method`; in case of ``"crosscorr"`` method the constant
        is subtracted from the computed weighted sum per neighbourhood region
        and in case of ``"niblack"`` the constant is multiplied by the
        standard deviation and this term is then subtracted from the mean
        computed per neighbourhood region
    min_threshold: int, optional
        minimal threshold level (default: ``numpy.min(image)``)
    max_threshold: int, optional
        maximal threshold level (default: ``numpy.max(image)``)
    plot: bool, optional
        whether a plot should be generated (default: ``False``)

    Returns
    -------
    jtmodules.threshold_adaptive.Output

    Raises
    ------
    ValueError
        when `kernel_size` is not an odd number or when `method` is not valid

    Note
    ----
    Typically requires prior filtering to reduce noise in the image.

    References
    ----------
    .. [1] Niblack, W. 1986: An introduction to Digital Image Processing, Prentice-Hall.
    '''
    if kernel_size % 2 == 0:
        raise ValueError('Argument "kernel_size" must be an odd integer.')
    logger.debug('set kernel size: %d', kernel_size)

    if max_threshold is None:
        max_threshold = np.max(image)
    logger.debug('set maximal threshold: %d', max_threshold)

    if min_threshold is None:
        min_threshold = np.min(image)
    logger.debug('set minimal threshold: %d', min_threshold)

    logger.debug('map image intensities to 8-bit range')
    image_8bit = rescale_to_8bit(image, upper=99.99)

    logger.info('threshold image')
    if method == 'crosscorr':
        thresh_image = cv2.adaptiveThreshold(
            image_8bit, maxValue=255,
            adaptiveMethod=cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
            thresholdType=cv2.THRESH_BINARY,
            blockSize=kernel_size, C=int(constant)
        )
    elif method == 'niblack':
        thresh_image = cv2.ximgproc.niBlackThreshold(
            image_8bit, maxValue=255, type=cv2.THRESH_BINARY,
            blockSize=kernel_size, delta=constant
        )
    else:
        raise ValueError(
            'Arugment "method" can be one of the following:\n'
            '"crosscorr" or "niblack"'
        )
    # OpenCV treats masks as unsigned integer and not as boolean
    thresh_image = thresh_image > 0

    # Manually fine tune automatic thresholding result
    thresh_image[image < min_threshold] = False
    thresh_image[image > max_threshold] = True

    if plot:
        logger.info('create plot')
        from jtlib import plotting
        outlines = mh.morph.dilate(mh.labeled.bwperim(thresh_image))
        plots = [
            plotting.create_intensity_overlay_image_plot(
                image, outlines, 'ul'
            ),
            plotting.create_mask_image_plot(thresh_image, 'ur')
        ]
        figure = plotting.create_figure(
            plots,
            title='thresholded adaptively with kernel size: %d' % kernel_size
        )
    else:
        figure = str()

    return Output(thresh_image, figure)
Exemplo n.º 38
0
def main(image, mask, threshold=1, min_area=3, mean_area=5, max_area=1000,
        clip_percentile=99.999, plot=False):
    '''Detects blobs in `image` using an implementation of
    `SExtractor <http://www.astromatic.net/software/sextractor>`_ [1].
    The `image` is first convolved with a Laplacian of Gaussian filter of size
    `mean_area` to enhance blob-like structures. The enhanced image is
    then thresholded at `threshold` level and connected pixel components are
    subsequently deplended.

    Parameters
    ----------
    image: numpy.ndarray[Union[numpy.uint8, numpy.uint16]]
        grayscale image in which blobs should be detected
    mask: numpy.ndarray[Union[numpy.int32, numpy.bool]]
        binary or labeled image that specifies pixel regions of interest
        in which blobs should be detected
    threshold: int, optional
        threshold level for pixel values in the convolved image
        (default: ``1``)
    min_area: int, optional
        minimal size a blob is allowed to have (default: ``3``)
    mean_area: int, optional
        estimated average size of a blob (default: ``5``)
    max_area: int, optional
        maximal size a blob is allowed to have to be subject to deblending;
        no attempt will be made to deblend blobs larger than `max_area`
        (default: ``100``)
    clip_percentile: float, optional
        clip intensity values in `image` above the given percentile; this may
        help in attenuating artifacts
    plot: bool, optional
        whether a plot should be generated (default: ``False``)

    Returns
    -------
    jtmodules.detect_blobs.Output[Union[numpy.ndarray, str]]

    References
    ----------
    .. [1] Bertin, E. & Arnouts, S. 1996: SExtractor: Software for source
    extraction, Astronomy & Astrophysics Supplement 317, 393
    '''

    logger.info('detect blobs above threshold {0}'.format(threshold))

    detect_image = image.copy()

    p = np.percentile(image, clip_percentile)
    detect_image[image > p] = p

    # Enhance the image for blob detection by convoling it with a LOG filter
    f = -1 * log_2d(size=mean_area, sigma=float(mean_area - 1)/3)
    detect_image = mh.convolve(detect_image.astype(float), f)
    detect_image[detect_image < 0] = 0

    # Mask regions of too big blobs
    pre_blobs = mh.label(detect_image > threshold)[0]
    bad_blobs, n_bad = mh.labeled.filter_labeled(pre_blobs, min_size=max_area)
    logger.info(
        'remove {0} blobs because they are bigger than {1} pixels'.format(
            n_bad, max_area
        )
    )
    detect_mask = np.invert(mask > 0)
    detect_mask[bad_blobs > 0] = True
    detect_image[bad_blobs > 0] = 0

    logger.info('deblend blobs')
    blobs, centroids = detect_blobs(
        image=detect_image, mask=detect_mask, threshold=threshold,
        min_area=min_area
    )

    n = len(np.unique(blobs[blobs>0]))

    logger.info('{0} blobs detected'.format(n))

    if plot:
        logger.info('create plot')
        from jtlib import plotting

        colorscale = plotting.create_colorscale(
            'Spectral', n=n, permute=True, add_background=True
        )
        plots = [
            plotting.create_float_image_plot(
                detect_image, 'ul', clip=True
            ),
            plotting.create_mask_image_plot(
                blobs, 'ur', colorscale=colorscale
            )
        ]
        figure = plotting.create_figure(
            plots,
            title=(
                'detected #{0} blobs above threshold {1}'
                ' in LOG filtered image'.format(n, threshold)
            )
        )
    else:
        figure = str()

    return Output(centroids, blobs, figure)
Exemplo n.º 39
0
def main(image_1, image_2, weight_1, weight_2, plot=False):
    '''Combines `image_1` with `image_2`.

    Parameters
    ----------
    input_mask_1: numpy.ndarray[numpy.uint8 or numpy.uint16]
        2D unsigned integer array
    input_mask_2: numpy.ndarray[numpy.uint8 or numpy.uint16]
        2D unsigned integer array
    weight_1: int
        weight for `image_1`
    weight_2: int
        weight for `image_2`

    Returns
    -------
    jtmodules.combine_channels.Output

    Raises
    ------
    ValueError
        when `weight_1` or `weight_2` are not positive integers
    ValueError
        when `image_1` and `image_2` don't have the same dimensions
        and data type and if they don't have unsigned integer type
    '''
    if not isinstance(weight_1, int):
        raise TypeError('Weight #1 must have integer type.')
    if not isinstance(weight_2, int):
        raise TypeError('Weight #2 must have integer type.')
    if weight_1 < 1:
        raise ValueError('Weight #1 must be a positive integer.')
    if weight_2 < 1:
        raise ValueError('Weight #2 must be a positive integer.')
    logger.info('weight for first image: %d', weight_1)
    logger.info('weight for second image: %d', weight_2)

    if image_1.shape != image_2.shape:
        raise ValueError('The two images must have identical dimensions.')
    if image_1.dtype != image_2.dtype:
        raise ValueError('The two images must have identical data type.')

    if image_1.dtype == np.uint8:
        max_val = 2**8 - 1
    elif image_1.dtype == np.uint16:
        max_val = 2**16 - 1
    else:
        raise ValueError('The two images must have unsigned integer type.')

    logger.info('cast images to type float for arythmetics')
    img_1 = mh.stretch(image_1, 0, 1, float)
    img_2 = mh.stretch(image_2, 0, 1, float)
    logger.info('combine images using the provided weights')
    combined_image = img_1 * weight_1 + img_2 * weight_2
    logger.info('cast combined image back to correct data type')
    combined_image = mh.stretch(combined_image, 0, max_val, image_1.dtype)

    if plot:
        from jtlib import plotting
        plots = [
            plotting.create_intensity_image_plot(image_1, 'ul'),
            plotting.create_intensity_image_plot(image_2, 'ur'),
            plotting.create_intensity_image_plot(combined_image, 'll')
        ]
        figure = plotting.create_figure(plots, title='combined image')
    else:
        figure = str()

    return Output(combined_image, figure)