Пример #1
0
def test_invert_bool():
    dtype = 'bool'
    image = np.zeros((3, 3), dtype=dtype)
    image[1, :] = dtype_limits(image)[1]
    expected = np.zeros((3, 3), dtype=dtype) + dtype_limits(image)[1]
    expected[1, :] = 0
    result = invert(image)
    assert_array_equal(expected, result)
    def histogram(self,
                  image,
                  nbins=2**16,
                  source_range='image',
                  normalize=False):
        sh = image.shape
        if len(sh) == 3 and sh[-1] < 4:
            print("This might be a color image. The histogram will be "
                  "computed on the flattened image. You can instead "
                  "apply this function to each color channel.")

        image = image.flatten()
        # For integer types, histogramming with bincount is more efficient.
        if np.issubdtype(image.dtype, np.integer):
            hist, bin_centers = _bincount_histogram(image, source_range)
        else:
            pass
        if source_range == 'image':
            hist_range = None
        elif source_range == 'dtype':
            hist_range = skimage.dtype_limits(image, clip_negative=False)
        else:
            ValueError('Wrong value for the `source_range` argument')
        hist, bin_edges = np.histogram(image, bins=nbins, range=hist_range)
        bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2.

        if normalize:
            hist = hist / np.sum(hist)
        return hist, bin_centers
Пример #3
0
def invert(
        img: np.ndarray
) -> np.ndarray:
    if img.dtype == "bool":
        return ~img
    else:
        return dtype_limits(img, clip_negative=False)[1] - img
Пример #4
0
 def attach(self, image_viewer):
     '''
     Override the attaching of the plugin to the ImageViewer. This utilizes
     nearly identical implementation to https://github.com/scikit-image/
     scikit-image/blob/master/skimage/viewer/plugins/canny.py, but changes
     the limits for parameter selection.
     '''
     image = image_viewer.image
     imin, imax = skimage.dtype_limits(image, clip_negative=False)
     itype = 'float' if np.issubdtype(image.dtype, np.floating) else 'int'
     self.add_widget(Slider('sigma', 0, 2, update_on='release'))
     self.add_widget(
         Slider('low_threshold',
                imin,
                imax,
                value_type=itype,
                update_on='release'))
     self.add_widget(
         Slider('high_threshold',
                imin,
                imax,
                value_type=itype,
                update_on='release'))
     self.add_widget(ComboBox('color', self.color_names, ptype='plugin'))
     # Call parent method at end b/c it calls `filter_image`, which needs
     # the values specified by the widgets. Alternatively, move call to
     # parent method to beginning and add a call to `self.filter_image()`
     super(CannyPlugin, self).attach(image_viewer)
Пример #5
0
def to_unsigned_int(arr: np.ndarray) -> np.ndarray:
    """
    Offset the array to get the lowest value at 0 if there is any negative.
    """
    if arr.dtype is not np.uint8 or arr.dtype is not np.uint16:
        arr_min, arr_max = dtype_limits(arr, clip_negative=False)
        arr, _ = _offset_array(arr, arr_min, arr_max)
    return arr
Пример #6
0
def threshold_sauvola(image, window_size=15, k=0.2, r=None):
    """Applies Sauvola local threshold to an array. Sauvola is a
    modification of Niblack technique.

    In the original method a threshold T is calculated for every pixel
    in the image using the following formula:

    T = m(x,y) * (1 + k * ((s(x,y) / R) - 1))

    where m(x,y) and s(x,y) are the mean and standard deviation of
    pixel (x,y) neighborhood defined by a rectangular window with size w
    times w centered around the pixel. k is a configurable parameter
    that weights the effect of standard deviation.
    R is the maximum standard deviation of a greyscale image.

    Parameters
    ----------
    image: (N, M) ndarray
        Grayscale input image.
    window_size : int, optional
        Odd size of pixel neighborhood window (e.g. 3, 5, 7...).
    k : float, optional
        Value of the positive parameter k.
    r : float, optional
        Value of R, the dynamic range of standard deviation.
        If None, set to the half of the image dtype range.
    offset : float, optional
        Constant subtracted from obtained local thresholds.

    Returns
    -------
    threshold : (N, M) ndarray
        Threshold mask. All pixels with an intensity higher than
        this value are assumed to be foreground.

    Notes
    -----
    This algorithm is originally designed for text recognition.

    References
    ----------
    .. [1] J. Sauvola and M. Pietikainen, "Adaptive document image
           binarization," Pattern Recognition 33(2),
           pp. 225-236, 2000.
           DOI:10.1016/S0031-3203(99)00055-2

    Examples
    --------
    >>> from skimage import data
    >>> image = data.page()
    >>> binary_sauvola = threshold_sauvola(image,
    ...                                    window_size=15, k=0.2)
    """
    if r is None:
        imin, imax = dtype_limits(image, clip_negative=False)
        r = 0.5 * (imax - imin)
    m, s = _mean_std(image, window_size)
    return m * (1 + k * ((s / r) - 1))
def process_sub_image(img,sigma=1.):
    if img.ndim == 3:
        img = rgb2gray(img)
    edges = filter.canny(img)
    #show_sub_image(edges)
    
    low_threshold = 0.1 * dtype_limits(img)[1]
    high_threshold = 0.2 * dtype_limits(img)[1]
    mask = np.ones(img.shape, dtype=bool)
    fsmooth = lambda x: gaussian_filter(x, sigma, mode='constant')
    smoothed = smooth_with_function_and_mask(img, fsmooth, mask)
    jsobel = ndi.sobel(smoothed, axis=1)
    isobel = ndi.sobel(smoothed, axis=0)
    
    thresh = filter.threshold_otsu(img)
    binary = img > thresh
    #show_sub_image(binary)
    return connected_component_labeling(binary)
Пример #8
0
def test_invert_bool():
    dtype = 'bool'
    image = np.zeros((3, 3), dtype=dtype)
    upper_dtype_limit = dtype_limits(image, clip_negative=False)[1]
    image[1, :] = upper_dtype_limit
    expected = np.zeros((3, 3), dtype=dtype) + upper_dtype_limit
    expected[1, :] = 0
    result = invert(image)
    assert_array_equal(expected, result)
Пример #9
0
def test_invert_bool():
    dtype = 'bool'
    image = np.zeros((3, 3), dtype=dtype)
    upper_dtype_limit = dtype_limits(image, clip_negative=False)[1]
    image[1, :] = upper_dtype_limit
    expected = np.zeros((3, 3), dtype=dtype) + upper_dtype_limit
    expected[1, :] = 0
    result = invert(image)
    assert_array_equal(expected, result)
Пример #10
0
def threshold_sauvola(image, window_size=15, k=0.2, r=None):
    """Applies Sauvola local threshold to an array. Sauvola is a
    modification of Niblack technique.

    In the original method a threshold T is calculated for every pixel
    in the image using the following formula:

    T = m(x,y) * (1 + k * ((s(x,y) / R) - 1))

    where m(x,y) and s(x,y) are the mean and standard deviation of
    pixel (x,y) neighborhood defined by a rectangular window with size w
    times w centered around the pixel. k is a configurable parameter
    that weights the effect of standard deviation.
    R is the maximum standard deviation of a greyscale image.

    Parameters
    ----------
    image: (N, M) ndarray
        Grayscale input image.
    window_size : int, optional
        Odd size of pixel neighborhood window (e.g. 3, 5, 7...).
    k : float, optional
        Value of the positive parameter k.
    r : float, optional
        Value of R, the dynamic range of standard deviation.
        If None, set to the half of the image dtype range.

    Returns
    -------
    threshold : (N, M) ndarray
        Threshold mask. All pixels with an intensity higher than
        this value are assumed to be foreground.

    Notes
    -----
    This algorithm is originally designed for text recognition.

    References
    ----------
    .. [1] J. Sauvola and M. Pietikainen, "Adaptive document image
           binarization," Pattern Recognition 33(2),
           pp. 225-236, 2000.
           DOI:10.1016/S0031-3203(99)00055-2

    Examples
    --------
    >>> from skimage import data
    >>> image = data.page()
    >>> binary_sauvola = threshold_sauvola(image,
    ...                                    window_size=15, k=0.2)
    """
    if r is None:
        imin, imax = dtype_limits(image, clip_negative=False)
        r = 0.5 * (imax - imin)
    m, s = _mean_std(image, window_size)
    return m * (1 + k * ((s / r) - 1))
Пример #11
0
def process_image(inFile):

    with skimage.external.tifffile.TiffFile(inFile) as pic:

        pic_array = pic.asarray()
        out_array = pic.asarray()
        #copy dimensions
        global WHITE
        WHITE = skimage.dtype_limits(pic_array, True)[1]
        regions = Compartmentalize(pic_array, 32)

        basicEdge(pic_array, out_array, regions)  # out_array is modified
        skimage.external.tifffile.imsave(
            inFile.replace('.tif', '_edgeBasic.tif'), out_array)
        #skimage.external.tifffile.imsave(inFile.replace('.tif', '_edgeSobel.tif'), skimage.img_as_uint(skimage.filters.sobel(pic_array)))

        regions.setNoiseCompartments(out_array, 0.95)

        enhanceEdges(pic_array, out_array, regions)
        skimage.external.tifffile.imsave(
            inFile.replace('.tif', '_edgeEnhance.tif'), out_array)

        noise_handler = Noise(out_array, iterations=3, binary=True)
        noise_handler.reduce()

        skimage.external.tifffile.imsave(inFile.replace('.tif', '_Binary.tif'),
                                         out_array)

        print("***made binary")

        boundary = findBoundaryPoints(out_array)

        bound = pic.asarray()
        for b in boundary:
            bound[b[0]][b[1]] = 0

    # test = internalBorderTest(pic_array, out_array, boundary)
        skimage.external.tifffile.imsave(inFile.replace('.tif', '_Bound.tif'),
                                         bound)

        Cluster.pic = pic_array
        clusters = makeClusters(out_array, boundary)
        i = -1
        for c in clusters:
            i += 1
            try:
                #c.showCusps(7)
                c.getTrueCusps(7)
            except AssertionError:
                print(i, 'AssertionError')
                pass
            finally:
                c.pruneCusps()
                c.propagateInternalBoundaries()
        skimage.external.tifffile.imsave(
            inFile.replace('.tif', '_BinaryEdged.tif'), out_array)
def _subtract_constant_clip(img, const_value):
    """Subtract constant from image while handling underflow issues."""
    min_dtype, max_dtype = dtype_limits(img, clip_negative=False)

    if const_value > (max_dtype - min_dtype):
        raise ValueError("The subtracted constant is not compatible"
                         "with the image data type.")

    result = img - const_value
    result[img < (const_value + min_dtype)] = min_dtype
    return result
def _add_constant_clip(img, const_value):
    """Add constant to the image while handling overflow issues gracefully."""
    min_dtype, max_dtype = dtype_limits(img, clip_negative=False)

    if const_value > (max_dtype - min_dtype):
        raise ValueError("The added constant is not compatible"
                         "with the image data type.")

    result = img + const_value
    result[img > max_dtype - const_value] = max_dtype
    return result
Пример #14
0
def test_invert_float64_unsigned():
    dtype = 'float64'
    image = np.zeros((3, 3), dtype=dtype)
    lower_dtype_limit, upper_dtype_limit = \
        dtype_limits(image, clip_negative=True)
    image[2, :] = upper_dtype_limit
    expected = np.zeros((3, 3), dtype=dtype)
    expected[0, :] = upper_dtype_limit
    expected[1, :] = upper_dtype_limit
    result = invert(image)
    assert_array_equal(expected, result)
Пример #15
0
 def attach(self, image_viewer):
     image = image_viewer.image
     imin, imax = skimage.dtype_limits(image)
     itype = "float" if np.issubdtype(image.dtype, float) else "int"
     self.add_widget(Slider("sigma", 0, 5, update_on="release"))
     self.add_widget(Slider("low threshold", imin, imax, value_type=itype, update_on="release"))
     self.add_widget(Slider("high threshold", imin, imax, value_type=itype, update_on="release"))
     self.add_widget(ComboBox("color", self.color_names, ptype="plugin"))
     # Call parent method at end b/c it calls `filter_image`, which needs
     # the values specified by the widgets. Alternatively, move call to
     # parent method to beginning and add a call to `self.filter_image()`
     super(CannyPlugin, self).attach(image_viewer)
Пример #16
0
def test_invert_int8():
    dtype = 'int8'
    image = np.zeros((3, 3), dtype=dtype)
    lower_dtype_limit, upper_dtype_limit = \
        dtype_limits(image, clip_negative=False)
    image[1, :] = lower_dtype_limit
    image[2, :] = upper_dtype_limit
    expected = np.zeros((3, 3), dtype=dtype)
    expected[2, :] = lower_dtype_limit
    expected[1, :] = upper_dtype_limit
    expected[0, :] = -1
    result = invert(image)
    assert_array_equal(expected, result)
Пример #17
0
 def attach(self, image_viewer):
     image = image_viewer.image
     imin, imax = skimage.dtype_limits(image)
     itype = 'float' if np.issubdtype(image.dtype, float) else 'int'
     self.add_widget(Slider('sigma', 0, 5, update_on='release'))
     self.add_widget(Slider('low threshold', imin, imax, value_type=itype,
                     update_on='release'))
     self.add_widget(Slider('high threshold', imin, imax, value_type=itype,
                     update_on='release'))
     self.add_widget(ComboBox('color', self.color_names, ptype='plugin'))
     # Call parent method at end b/c it calls `filter_image`, which needs
     # the values specified by the widgets. Alternatively, move call to
     # parent method to beginning and add a call to `self.filter_image()`
     super(CannyPlugin,self).attach(image_viewer)
Пример #18
0
 def attach(self, image_viewer):
     image = image_viewer.image
     imin, imax = skimage.dtype_limits(image, clip_negative=False)
     itype = 'float' if np.issubdtype(image.dtype, float) else 'int'
     self.add_widget(Slider('sigma', 0, 5, update_on='release'))
     self.add_widget(Slider('low threshold', imin, imax, value_type=itype,
                     update_on='release'))
     self.add_widget(Slider('high threshold', imin, imax, value_type=itype,
                     update_on='release'))
     self.add_widget(ComboBox('color', self.color_names, ptype='plugin'))
     # Call parent method at end b/c it calls `filter_image`, which needs
     # the values specified by the widgets. Alternatively, move call to
     # parent method to beginning and add a call to `self.filter_image()`
     super(CannyPlugin,self).attach(image_viewer)
Пример #19
0
def adjust_saturation(rgb_img, factor):
    """
    Adjust the saturation of an RGB image

    Args:
        rgb_img: RGB image data array
        factor: Multiplicative scaling factor to be applied to saturation

    Returns:
        adjusted_img: RGB image with adjusted saturation
    """
    hsv_img = skimage.color.rgb2hsv(rgb_img)
    imin, imax = skimage.dtype_limits(hsv_img)
    hsv_img[:,:,1] = np.clip(hsv_img[:,:,1] * factor, imin, imax)
    return skimage.color.hsv2rgb(hsv_img)
def adjust_brightness(image, mask, gamma):
	"""
	this function is to use gamma correction to adjust the brightness
	of an object of the input image.

	For more information, please refer to https://en.wikipedia.org/wiki/Gamma_correction

	This function did not include the gain parameter

	Input:
		image: a 3-d numpy representation of the image
		mask: 2-d np array of the mask covering the object
		gamma: a non-negative parameter to control the change of brightness of the object;
			   gamma > 1 will increase the brightness and gamma < 1 will decrease
			   brightness; an int

	Output:
		output: the image after the brightness of the object is adjusted
	"""

	assert image.shape[:2] == mask.shape and gamma > 0

	## to increase the number of channel of the mask to three so that we can apply the masks
	## to image
	masks = np.stack([mask, mask, mask], axis = -1)

	scale = float(dtype_limits(image, True)[1] - dtype_limits(image, True)[0])

	output = np.where(masks == 1,
						(image / scale) ** (1 / gamma) * scale,
						image)

	## to make sure the pixel intensity is within the range of uint8
	output = np.clip(output, 0, 255).astype(np.uint8)

	return output
Пример #21
0
def adjust_saturation(rgb_img, factor):
    """
    Adjust the saturation of an RGB image

    Args:
        rgb_img: RGB image data array
        factor: Multiplicative scaling factor to be applied to saturation

    Returns:
        adjusted_img: RGB image with adjusted saturation
    """
    hsv_img = skimage.color.rgb2hsv(rgb_img)
    imin, imax = skimage.dtype_limits(hsv_img)
    hsv_img[:,:,1] = np.clip(hsv_img[:,:,1] * factor, imin, imax)
    return skimage.color.hsv2rgb(hsv_img)
Пример #22
0
def adjust_brightness(rgb_img, delta):
    """
    Adjust the brightness of an RGB image

    Args:
        rgb_img: RGB image data array
        delta: Additive (normalized) gain factor applied to each pixel

    Returns:
        adjusted_img: RGB image with adjusted saturation
    """
    imin, imax = skimage.dtype_limits(rgb_img)
    # Convert delta into the range of the image data
    delta = rgb_img.dtype.type((imax - imin) * delta)

    return np.clip(rgb_img + delta, imin, imax)
Пример #23
0
def adjust_brightness(rgb_img, delta):
    """
    Adjust the brightness of an RGB image

    Args:
        rgb_img: RGB image data array
        delta: Additive (normalized) gain factor applied to each pixel

    Returns:
        adjusted_img: RGB image with adjusted saturation
    """
    imin, imax = skimage.dtype_limits(rgb_img)
    # Convert delta into the range of the image data
    delta = rgb_img.dtype.type((imax - imin) * delta)

    return np.clip(rgb_img + delta, imin, imax)
Пример #24
0
 def apply(self, image, evaluation):
     'ColorNegate[image_Image]'
     pixels = image.pixels
     anchor = numpy.ndarray(pixels.shape, dtype=pixels.dtype)
     anchor.fill(skimage.dtype_limits(pixels)[1])
     return Image(anchor - pixels, image.color_space)
Пример #25
0
def threshold_multiotsu(image, nclass=3, nbins=255):
    """Generates multiple thresholds for an input image. Based on the
    Multi-Otsu approach by Liao, Chen and Chung.

    Parameters
    ----------
    image : (N, M) ndarray
        Grayscale input image.
    nclass : int, optional
        Number of classes to be thresholded, i.e. the number of resulting
        regions. Accepts an integer from 2 to 5. Default is 3.
    nbins : int, optional
        Number of bins used to calculate the histogram. Default is 255.

    Returns
    -------
    idx_thresh : (nclass) array
        Array containing the threshold values for the desired classes.
    max_sigma : float
        Maximum sigma value achieved on the classes.

    References
    ----------
    .. [1] Liao, P-S., Chen, T-S. and Chung, P-C., "A fast algorithm for
    multilevel thresholding", Journal of Information Science and
    Engineering 17 (5): 713-727, 2001. Available at:
    http://www.iis.sinica.edu.tw/page/jise/2001/200109_01.html
    .. [2] Tosa, Y., "Multi-Otsu Threshold", a java plugin for ImageJ.
    Available at:
    http://imagej.net/plugins/download/Multi_OtsuThreshold.java

    Examples
    --------
    >>> from skimage import data
    >>> image = data.camera()
    >>> thresh = threshold_multiotsu(image)
    >>> region1 = image <= thresh[0]
    >>> region2 = (image > thresh[0]) & (image <= thresh[1])
    >>> region3 = image > thresh[1]
    """
    if image.shape[-1] in (3, 4):
        raise TypeError("The input image seems to be RGB (shape: {0}. Please"
                        "use a grayscale image.".format(image.shape))

    if image.min() == image.max():
        raise TypeError("The input image seems to have only one color: {0}."
                        "Please use a grayscale image.".format(image.min()))

    # check if nclass is between 2 and 5.
    if nclass not in np.array((2, 3, 4, 5)):
        raise ValueError("Please choose a number of classes between "
                         "2 and 5.")

    # receiving minimum and maximum values for the image type.
    type_min, type_max = dtype_limits(image)
    # calculating the histogram and the probability of each gray level.
    hist, _ = np.histogram(image.ravel(),
                           bins=nbins,
                           range=(type_min, type_max))
    prob = hist / image.size

    max_sigma = 0
    momP, momS, var_btwcls = [np.zeros((nbins, nbins)) for n in range(3)]

    # building the lookup tables.
    # step 1: calculating the diagonal.
    for u in range(1, nbins):
        momP[u, u] = prob[u]
        momS[u, u] = u * prob[u]

    # step 2: calculating the first row.
    for u in range(1, nbins - 1):
        momP[1, u + 1] = momP[1, u] + prob[u + 1]
        momS[1, u + 1] = momS[1, u] + (u + 1) * prob[u + 1]

    # step 3: calculating the other rows recursively.
    for u in range(2, nbins):
        for v in range(u + 1, nbins):
            momP[u, v] = momP[1, v] - momP[1, u - 1]
            momS[u, v] = momS[1, v] - momS[1, u - 1]

    # step 4: calculating the between class variance.
    for u in range(1, nbins):
        for v in range(u + 1, nbins):
            if (momP[u, v] != 0):
                var_btwcls[u, v] = momS[u, v]**2 / momP[u, v]
            else:
                var_btwcls[u, v] = 0

    # finding max threshold candidates, depending on nclass.
    # number of thresholds is equal to number of classes - 1.
    if nclass == 2:
        for idx in range(1, nbins - nclass):
            part_sigma = var_btwcls[1, idx] + var_btwcls[idx + 1, nbins - 1]
            if max_sigma < part_sigma:
                aux_thresh = idx
                max_sigma = part_sigma

    elif nclass == 3:
        for idx1 in range(1, nbins - nclass):
            for idx2 in range(idx1 + 1, nbins - nclass + 1):
                part_sigma = var_btwcls[1, idx1] + \
                            var_btwcls[idx1+1, idx2] + \
                            var_btwcls[idx2+1, nbins-1]

                if max_sigma < part_sigma:
                    aux_thresh = idx1, idx2
                    max_sigma = part_sigma

    elif nclass == 4:
        for idx1 in range(1, nbins - nclass):
            for idx2 in range(idx1 + 1, nbins - nclass + 1):
                for idx3 in range(idx2 + 1, nbins - nclass + 2):
                    part_sigma = var_btwcls[1, idx1] + \
                                var_btwcls[idx1+1, idx2] + \
                                var_btwcls[idx2+1, idx3] + \
                                var_btwcls[idx3+1, nbins-1]

                    if max_sigma < part_sigma:
                        aux_thresh = idx1, idx2, idx3
                        max_sigma = part_sigma

    elif nclass == 5:
        for idx1 in range(1, nbins - nclass):
            for idx2 in range(idx1 + 1, nbins - nclass + 1):
                for idx3 in range(idx2 + 1, nbins - nclass + 2):
                    for idx4 in range(idx3 + 1, nbins - nclass + 3):
                        part_sigma = var_btwcls[1, idx1] + \
                            var_btwcls[idx1+1, idx2] + \
                            var_btwcls[idx2+1, idx3] + \
                            var_btwcls[idx3+1, idx4] + \
                            var_btwcls[idx4+1, nbins-1]

                        if max_sigma < part_sigma:
                            aux_thresh = idx1, idx2, idx3, idx4
                            max_sigma = part_sigma

    # correcting values according to minimum and maximum values.
    idx_thresh = np.asarray(aux_thresh) * (type_max - type_min) / nbins

    return idx_thresh, max_sigma
Пример #26
0
def contrast_ratio(array):
    ## Returns the ration of the difference of the highest and lowest pixel value
    ## to the highest and lowest possible pixel value for a given dtype
    dlimits = dtype_limits(array, clip_negative=False)
    limits = np.percentile(array, [1, 99])
    return (limits[1] - limits[0]) / (dlimits[1] - dlimits[0])
Пример #27
0
def canny(image, sigma=1., low_threshold=None, high_threshold=None, mask=None):
    """Edge filter an image using the Canny algorithm.

    Parameters
    -----------
    image : 2D array
        Greyscale input image to detect edges on; can be of any dtype.
    sigma : float
        Standard deviation of the Gaussian filter.
    low_threshold : float
        Lower bound for hysteresis thresholding (linking edges).
        If None, low_threshold is set to 10% of dtype's max.
    high_threshold : float
        Upper bound for hysteresis thresholding (linking edges).
        If None, high_threshold is set to 20% of dtype's max.
    mask : array, dtype=bool, optional
        Mask to limit the application of Canny to a certain area.

    Returns
    -------
    output : 2D array (image)
        The binary edge map.

    See also
    --------
    skimage.sobel

    Notes
    -----
    The steps of the algorithm are as follows:

    * Smooth the image using a Gaussian with ``sigma`` width.

    * Apply the horizontal and vertical Sobel operators to get the gradients
      within the image. The edge strength is the norm of the gradient.

    * Thin potential edges to 1-pixel wide curves. First, find the normal
      to the edge at each point. This is done by looking at the
      signs and the relative magnitude of the X-Sobel and Y-Sobel
      to sort the points into 4 categories: horizontal, vertical,
      diagonal and antidiagonal. Then look in the normal and reverse
      directions to see if the values in either of those directions are
      greater than the point in question. Use interpolation to get a mix of
      points instead of picking the one that's the closest to the normal.

    * Perform a hysteresis thresholding: first label all points above the
      high threshold as edges. Then recursively label any point above the
      low threshold that is 8-connected to a labeled point as an edge.

    References
    -----------
    Canny, J., A Computational Approach To Edge Detection, IEEE Trans.
    Pattern Analysis and Machine Intelligence, 8:679-714, 1986

    William Green's Canny tutorial
    http://dasl.mem.drexel.edu/alumni/bGreen/www.pages.drexel.edu/_weg22/can_tut.html

    Examples
    --------
    >>> from skimage import filter
    >>> # Generate noisy image of a square
    >>> im = np.zeros((256, 256))
    >>> im[64:-64, 64:-64] = 1
    >>> im += 0.2 * np.random.random(im.shape)
    >>> # First trial with the Canny filter, with the default smoothing
    >>> edges1 = filter.canny(im)
    >>> # Increase the smoothing for better results
    >>> edges2 = filter.canny(im, sigma=3)
    """

    #
    # The steps involved:
    #
    # * Smooth using the Gaussian with sigma above.
    #
    # * Apply the horizontal and vertical Sobel operators to get the gradients
    #   within the image. The edge strength is the sum of the magnitudes
    #   of the gradients in each direction.
    #
    # * Find the normal to the edge at each point using the arctangent of the
    #   ratio of the Y sobel over the X sobel - pragmatically, we can
    #   look at the signs of X and Y and the relative magnitude of X vs Y
    #   to sort the points into 4 categories: horizontal, vertical,
    #   diagonal and antidiagonal.
    #
    # * Look in the normal and reverse directions to see if the values
    #   in either of those directions are greater than the point in question.
    #   Use interpolation to get a mix of points instead of picking the one
    #   that's the closest to the normal.
    #
    # * Label all points above the high threshold as edges.
    # * Recursively label any point above the low threshold that is 8-connected
    #   to a labeled point as an edge.
    #
    # Regarding masks, any point touching a masked point will have a gradient
    # that is "infected" by the masked point, so it's enough to erode the
    # mask by one and then mask the output. We also mask out the border points
    # because who knows what lies beyond the edge of the image?
    #

    if image.ndim != 2:
        raise TypeError("The input 'image' must be a two-dimensional array.")

    if low_threshold is None:
        low_threshold = 0.1 * dtype_limits(image)[1]

    if high_threshold is None:
        high_threshold = 0.2 * dtype_limits(image)[1]

    if mask is None:
        mask = np.ones(image.shape, dtype=bool)
    fsmooth = lambda x: gaussian_filter(x, sigma, mode='constant')
    smoothed = smooth_with_function_and_mask(image, fsmooth, mask)
    jsobel = ndi.sobel(smoothed, axis=1)
    isobel = ndi.sobel(smoothed, axis=0)
    abs_isobel = np.abs(isobel)
    abs_jsobel = np.abs(jsobel)
    magnitude = np.hypot(isobel, jsobel)

    #
    # Make the eroded mask. Setting the border value to zero will wipe
    # out the image edges for us.
    #
    s = generate_binary_structure(2, 2)
    eroded_mask = binary_erosion(mask, s, border_value=0)
    eroded_mask = eroded_mask & (magnitude > 0)
    #
    #--------- Find local maxima --------------
    #
    # Assign each point to have a normal of 0-45 degrees, 45-90 degrees,
    # 90-135 degrees and 135-180 degrees.
    #
    local_maxima = np.zeros(image.shape, bool)
    #----- 0 to 45 degrees ------
    pts_plus = (isobel >= 0) & (jsobel >= 0) & (abs_isobel >= abs_jsobel)
    pts_minus = (isobel <= 0) & (jsobel <= 0) & (abs_isobel >= abs_jsobel)
    pts = pts_plus | pts_minus
    pts = eroded_mask & pts
    # Get the magnitudes shifted left to make a matrix of the points to the
    # right of pts. Similarly, shift left and down to get the points to the
    # top right of pts.
    c1 = magnitude[1:, :][pts[:-1, :]]
    c2 = magnitude[1:, 1:][pts[:-1, :-1]]
    m = magnitude[pts]
    w = abs_jsobel[pts] / abs_isobel[pts]
    c_plus = c2 * w + c1 * (1 - w) <= m
    c1 = magnitude[:-1, :][pts[1:, :]]
    c2 = magnitude[:-1, :-1][pts[1:, 1:]]
    c_minus = c2 * w + c1 * (1 - w) <= m
    local_maxima[pts] = c_plus & c_minus
    #----- 45 to 90 degrees ------
    # Mix diagonal and vertical
    #
    pts_plus = (isobel >= 0) & (jsobel >= 0) & (abs_isobel <= abs_jsobel)
    pts_minus = (isobel <= 0) & (jsobel <= 0) & (abs_isobel <= abs_jsobel)
    pts = pts_plus | pts_minus
    pts = eroded_mask & pts
    c1 = magnitude[:, 1:][pts[:, :-1]]
    c2 = magnitude[1:, 1:][pts[:-1, :-1]]
    m = magnitude[pts]
    w = abs_isobel[pts] / abs_jsobel[pts]
    c_plus = c2 * w + c1 * (1 - w) <= m
    c1 = magnitude[:, :-1][pts[:, 1:]]
    c2 = magnitude[:-1, :-1][pts[1:, 1:]]
    c_minus = c2 * w + c1 * (1 - w) <= m
    local_maxima[pts] = c_plus & c_minus
    #----- 90 to 135 degrees ------
    # Mix anti-diagonal and vertical
    #
    pts_plus = (isobel <= 0) & (jsobel >= 0) & (abs_isobel <= abs_jsobel)
    pts_minus = (isobel >= 0) & (jsobel <= 0) & (abs_isobel <= abs_jsobel)
    pts = pts_plus | pts_minus
    pts = eroded_mask & pts
    c1a = magnitude[:, 1:][pts[:, :-1]]
    c2a = magnitude[:-1, 1:][pts[1:, :-1]]
    m = magnitude[pts]
    w = abs_isobel[pts] / abs_jsobel[pts]
    c_plus = c2a * w + c1a * (1.0 - w) <= m
    c1 = magnitude[:, :-1][pts[:, 1:]]
    c2 = magnitude[1:, :-1][pts[:-1, 1:]]
    c_minus = c2 * w + c1 * (1.0 - w) <= m
    local_maxima[pts] = c_plus & c_minus
    #----- 135 to 180 degrees ------
    # Mix anti-diagonal and anti-horizontal
    #
    pts_plus = (isobel <= 0) & (jsobel >= 0) & (abs_isobel >= abs_jsobel)
    pts_minus = (isobel >= 0) & (jsobel <= 0) & (abs_isobel >= abs_jsobel)
    pts = pts_plus | pts_minus
    pts = eroded_mask & pts
    c1 = magnitude[:-1, :][pts[1:, :]]
    c2 = magnitude[:-1, 1:][pts[1:, :-1]]
    m = magnitude[pts]
    w = abs_jsobel[pts] / abs_isobel[pts]
    c_plus = c2 * w + c1 * (1 - w) <= m
    c1 = magnitude[1:, :][pts[:-1, :]]
    c2 = magnitude[1:, :-1][pts[:-1, 1:]]
    c_minus = c2 * w + c1 * (1 - w) <= m
    local_maxima[pts] = c_plus & c_minus
    #
    #---- Create two masks at the two thresholds.
    #
    high_mask = local_maxima & (magnitude >= high_threshold)
    low_mask = local_maxima & (magnitude >= low_threshold)
    #
    # Segment the low-mask, then only keep low-segments that have
    # some high_mask component in them
    #
    strel = np.ones((3, 3), bool)
    labels, count = label(low_mask, strel)
    if count == 0:
        return low_mask

    sums = (np.array(ndi.sum(high_mask, labels,
                             np.arange(count, dtype=np.int32) + 1),
                     copy=False, ndmin=1))
    good_label = np.zeros((count + 1,), bool)
    good_label[1:] = sums > 0
    output_mask = good_label[labels]
    return output_mask
Пример #28
0
def file_writer(filename, signal, **kwds):
    """
    Write signal to blockfile.

    Parameters
    ----------
    file : str
        Filename of the file to write to
    signal : instance of hyperspy Signal2D
        The signal to save.
    endianess : str
        '<' (default) or '>' determining how the bits are written to the file
    intensity_scaling : str or 2-Tuple of float/int
        If the signal datatype is not uint8 this argument provides intensity
        linear scaling strategies. If 'dtype', the entire dtype range is mapped
        to 0-255, if 'minmax' the range between the minimum and maximum intensity is
        mapped to 0-255, if 'crop' the range between 0-255 is conserved without
        overflow, if a tuple of values the values between this range is mapped
        to 0-255. If None (default) no rescaling is performed and overflow is
        permitted.
    navigator_signal : str or Signal2D
        A blo file also saves a virtual bright field image for navigation.
        This option determines what kind of data is stored for this image.
        The default option "navigator" uses the navigator image if it was
        previously calculated, else it is calculated here which can take
        some time for large datasets. Alternatively, a Signal2D of the right
        shape may also be provided. If set to None, a zero array is stored
        in the file.
    """
    endianess = kwds.pop("endianess", "<")
    scale_strategy = kwds.pop("intensity_scaling", None)
    vbf_strategy = kwds.pop("navigator_signal", "navigator")
    show_progressbar = kwds.pop("show_progressbar", None)
    if scale_strategy is None:
        # to distinguish from the tuple case
        if signal.data.dtype != "u1":
            warnings.warn(
                "Data does not have uint8 dtype: values outside the "
                "range 0-255 may result in overflow. To avoid this "
                "use the 'intensity_scaling' keyword argument.",
                UserWarning,
            )
    elif scale_strategy == "dtype":
        original_scale = dtype_limits(signal.data)
        if original_scale[1] == 1.0:
            raise ValueError("Signals with float dtype can not use 'dtype'")
    elif scale_strategy == "minmax":
        minimum = signal.data.min()
        maximum = signal.data.max()
        if signal._lazy:
            minimum, maximum = dask.compute(minimum, maximum)
        original_scale = (minimum, maximum)
    elif scale_strategy == "crop":
        original_scale = (0, 255)
    else:
        # we leave the error checking for incorrect tuples to skimage
        original_scale = scale_strategy

    header, note = get_header_from_signal(signal, endianess=endianess)
    with open(filename, "wb") as f:
        # Write header
        header.tofile(f)
        # Write header note field:
        if len(note) > int(header["Data_offset_1"]) - f.tell():
            note = note[:int(header["Data_offset_1"]) - f.tell() - len(note)]
        f.write(note.encode())
        # Zero pad until next data block
        zero_pad = int(header["Data_offset_1"]) - f.tell()
        np.zeros((zero_pad, ), np.byte).tofile(f)
        # Write virtual bright field
        if vbf_strategy is None:
            vbf = np.zeros((signal.data.shape[0], signal.data.shape[1]))
        elif isinstance(vbf_strategy, str) and (vbf_strategy == "navigator"):
            if signal.navigator is not None:
                vbf = signal.navigator.data
            else:
                if signal._lazy:
                    signal.compute_navigator()
                    vbf = signal.navigator.data
                else:
                    # TODO workaround for non-lazy datasets
                    sigints = signal.axes_manager.signal_indices_in_array[:2]
                    vbf = signal.mean(axis=sigints).data
        else:
            vbf = vbf_strategy.data
            # check that the shape is ok
            if vbf.shape != signal.data.shape[:-2]:
                raise ValueError("Size of the provided VBF does not match the "
                                 "navigation dimensions of the dataset.")
        if scale_strategy is not None:
            vbf = rescale_intensity(vbf,
                                    in_range=original_scale,
                                    out_range=np.uint8)
        vbf = vbf.astype(endianess + "u1")
        vbf.tofile(f)
        # Zero pad until next data block
        if f.tell() > int(header["Data_offset_2"]):
            raise ValueError("Signal navigation size does not match "
                             "data dimensions.")
        zero_pad = int(header["Data_offset_2"]) - f.tell()
        np.zeros((zero_pad, ), np.byte).tofile(f)
        file_location = f.tell()

    if scale_strategy is not None:
        signal = signal.map(
            rescale_intensity,
            in_range=original_scale,
            out_range=np.uint8,
            inplace=False,
        )
    array_data = signal.data.astype(endianess + "u1")
    # Write full data stack:
    # We need to pad each image with magic 'AA55', then a u32 serial
    pixels = array_data.shape[-2:]
    records = array_data.shape[:-2]
    record_dtype = [
        ("MAGIC", endianess + "u2"),
        ("ID", endianess + "u4"),
        ("IMG", endianess + "u1", pixels),
    ]
    magics = np.full(records, 0x55AA, dtype=endianess + "u2")
    ids = np.arange(np.product(records),
                    dtype=endianess + "u4").reshape(records)
    file_memmap = np.memmap(filename,
                            dtype=record_dtype,
                            mode="r+",
                            offset=file_location,
                            shape=records)
    file_memmap["MAGIC"] = magics
    file_memmap["ID"] = ids
    if signal._lazy:
        if show_progressbar is None:
            show_progressbar = preferences.General.show_progressbar
        cm = ProgressBar if show_progressbar else dummy_context_manager
        with cm():
            signal.data.store(file_memmap["IMG"])
    else:
        file_memmap["IMG"] = signal.data
    file_memmap.flush()
Пример #29
0
 def extract_image(self, image):
     return mahotas.features.zernike_moments(skimage.dtype_limits(image)[1]-image,
                 radius=self.radius, degree=self.polynomial)
Пример #30
0
def extract_region_props(img_path,
                         section_dataset_id,
                         probes,
                         ish_minval=70
                         ) ->list:
    """Segment neuron cell bodies via thresholding.

    Accepts images from the Allen Brain Institute (ISH or FISH) and segments
    fluorescently labeled neuron cell bodies. Segmentation is accomplished by
    computing a label matrix on the thresholded image (via Otsu's method).

    Args:
        img_path (str): full path to the image.
        section_dataset_id (int): The experiment ID specified by the
            Allen Brain Institute.
        probes (list): list of strings, specifying the RNA target of the
            ISH or FISH stain
        ish_minval (int): applies to ISH images only. Any value below
            this will be ignored by the thresholding algorithm.
            Default value is 70.

    Returns:
        rprops (list): each element is a dictionary of region properties
            as defined by scikit-image's regionprops function
    """
    # get the section dataset imaging params
    params = get_imaging_params(section_dataset_id)

    # user must specify probe(s) (i.e., color channels) to analyze
    # if only one probe is specified, turn it into a list
    if type(probes) != list and type(probes) == str:
        probes = [probes]

    probe_ch = [
        params['red_channel'].lower() in probes,
        params['green_channel'].lower() in probes,
        params['blue_channel'].lower() in probes,
    ]

    # open the image
    img = skio.imread(img_path)

    if params['is_FISH']:
        n_ch_correct = sum(probe_ch) > 0 and sum(probe_ch) <= 3
        assert n_ch_correct, "Did not identify the correct number of channels"
        img = np.array(img[:, :, probe_ch]).max(axis=2)  # max project

        # measure threshold
        thresh = threshold_otsu(img, nbins=256)

    elif params['is_ISH']:
        img = dtype_limits(img)[1] - img  # invert
        assert sum(probe_ch) == 3, "Not all ISH color channels identical"
        img = np.max(img, axis=2)  # max project inverted image

        # measure threshold
        thresh = threshold_otsu(img[img > ish_minval], nbins=256)

    else:
        raise ValueError('Image is neither FISH nor ISH')

    # apply the threshold to the image, which is now just a 2D matrix
    bw = img > thresh

    # label image regions with an integer. Each region gets a unique integer
    label_image = label(bw)
    rprops = regionprops(label_image)

    return rprops
Пример #31
0
def canny(image, sigma=1., low_threshold=None, high_threshold=None, mask=None):
    """Edge filter an image using the Canny algorithm.

    Parameters
    -----------
    image : 2D array
        Greyscale input image to detect edges on; can be of any dtype.
    sigma : float
        Standard deviation of the Gaussian filter.
    low_threshold : float
        Lower bound for hysteresis thresholding (linking edges).
        If None, low_threshold is set to 10% of dtype's max.
    high_threshold : float
        Upper bound for hysteresis thresholding (linking edges).
        If None, high_threshold is set to 20% of dtype's max.
    mask : array, dtype=bool, optional
        Mask to limit the application of Canny to a certain area.

    Returns
    -------
    output : 2D array (image)
        The binary edge map.

    See also
    --------
    skimage.sobel

    Notes
    -----
    The steps of the algorithm are as follows:

    * Smooth the image using a Gaussian with ``sigma`` width.

    * Apply the horizontal and vertical Sobel operators to get the gradients
      within the image. The edge strength is the norm of the gradient.

    * Thin potential edges to 1-pixel wide curves. First, find the normal
      to the edge at each point. This is done by looking at the
      signs and the relative magnitude of the X-Sobel and Y-Sobel
      to sort the points into 4 categories: horizontal, vertical,
      diagonal and antidiagonal. Then look in the normal and reverse
      directions to see if the values in either of those directions are
      greater than the point in question. Use interpolation to get a mix of
      points instead of picking the one that's the closest to the normal.

    * Perform a hysteresis thresholding: first label all points above the
      high threshold as edges. Then recursively label any point above the
      low threshold that is 8-connected to a labeled point as an edge.

    References
    -----------
    Canny, J., A Computational Approach To Edge Detection, IEEE Trans.
    Pattern Analysis and Machine Intelligence, 8:679-714, 1986

    William Green's Canny tutorial
    http://dasl.mem.drexel.edu/alumni/bGreen/www.pages.drexel.edu/_weg22/can_tut.html

    Examples
    --------
    >>> from skimage import filter
    >>> # Generate noisy image of a square
    >>> im = np.zeros((256, 256))
    >>> im[64:-64, 64:-64] = 1
    >>> im += 0.2 * np.random.rand(*im.shape)
    >>> # First trial with the Canny filter, with the default smoothing
    >>> edges1 = filter.canny(im)
    >>> # Increase the smoothing for better results
    >>> edges2 = filter.canny(im, sigma=3)
    """

    #
    # The steps involved:
    #
    # * Smooth using the Gaussian with sigma above.
    #
    # * Apply the horizontal and vertical Sobel operators to get the gradients
    #   within the image. The edge strength is the sum of the magnitudes
    #   of the gradients in each direction.
    #
    # * Find the normal to the edge at each point using the arctangent of the
    #   ratio of the Y sobel over the X sobel - pragmatically, we can
    #   look at the signs of X and Y and the relative magnitude of X vs Y
    #   to sort the points into 4 categories: horizontal, vertical,
    #   diagonal and antidiagonal.
    #
    # * Look in the normal and reverse directions to see if the values
    #   in either of those directions are greater than the point in question.
    #   Use interpolation to get a mix of points instead of picking the one
    #   that's the closest to the normal.
    #
    # * Label all points above the high threshold as edges.
    # * Recursively label any point above the low threshold that is 8-connected
    #   to a labeled point as an edge.
    #
    # Regarding masks, any point touching a masked point will have a gradient
    # that is "infected" by the masked point, so it's enough to erode the
    # mask by one and then mask the output. We also mask out the border points
    # because who knows what lies beyond the edge of the image?
    #

    if image.ndim != 2:
        raise TypeError("The input 'image' must be a two-dimensional array.")

    if low_threshold is None:
        low_threshold = 0.1 * dtype_limits(image)[1]

    if high_threshold is None:
        high_threshold = 0.2 * dtype_limits(image)[1]

    if mask is None:
        mask = np.ones(image.shape, dtype=bool)
    fsmooth = lambda x: gaussian_filter(x, sigma, mode='constant')
    smoothed = smooth_with_function_and_mask(image, fsmooth, mask)
    jsobel = ndi.sobel(smoothed, axis=1)
    isobel = ndi.sobel(smoothed, axis=0)
    abs_isobel = np.abs(isobel)
    abs_jsobel = np.abs(jsobel)
    magnitude = np.hypot(isobel, jsobel)

    #
    # Make the eroded mask. Setting the border value to zero will wipe
    # out the image edges for us.
    #
    s = generate_binary_structure(2, 2)
    eroded_mask = binary_erosion(mask, s, border_value=0)
    eroded_mask = eroded_mask & (magnitude > 0)
    #
    #--------- Find local maxima --------------
    #
    # Assign each point to have a normal of 0-45 degrees, 45-90 degrees,
    # 90-135 degrees and 135-180 degrees.
    #
    local_maxima = np.zeros(image.shape, bool)
    #----- 0 to 45 degrees ------
    pts_plus = (isobel >= 0) & (jsobel >= 0) & (abs_isobel >= abs_jsobel)
    pts_minus = (isobel <= 0) & (jsobel <= 0) & (abs_isobel >= abs_jsobel)
    pts = pts_plus | pts_minus
    pts = eroded_mask & pts
    # Get the magnitudes shifted left to make a matrix of the points to the
    # right of pts. Similarly, shift left and down to get the points to the
    # top right of pts.
    c1 = magnitude[1:, :][pts[:-1, :]]
    c2 = magnitude[1:, 1:][pts[:-1, :-1]]
    m = magnitude[pts]
    w = abs_jsobel[pts] / abs_isobel[pts]
    c_plus = c2 * w + c1 * (1 - w) <= m
    c1 = magnitude[:-1, :][pts[1:, :]]
    c2 = magnitude[:-1, :-1][pts[1:, 1:]]
    c_minus = c2 * w + c1 * (1 - w) <= m
    local_maxima[pts] = c_plus & c_minus
    #----- 45 to 90 degrees ------
    # Mix diagonal and vertical
    #
    pts_plus = (isobel >= 0) & (jsobel >= 0) & (abs_isobel <= abs_jsobel)
    pts_minus = (isobel <= 0) & (jsobel <= 0) & (abs_isobel <= abs_jsobel)
    pts = pts_plus | pts_minus
    pts = eroded_mask & pts
    c1 = magnitude[:, 1:][pts[:, :-1]]
    c2 = magnitude[1:, 1:][pts[:-1, :-1]]
    m = magnitude[pts]
    w = abs_isobel[pts] / abs_jsobel[pts]
    c_plus = c2 * w + c1 * (1 - w) <= m
    c1 = magnitude[:, :-1][pts[:, 1:]]
    c2 = magnitude[:-1, :-1][pts[1:, 1:]]
    c_minus = c2 * w + c1 * (1 - w) <= m
    local_maxima[pts] = c_plus & c_minus
    #----- 90 to 135 degrees ------
    # Mix anti-diagonal and vertical
    #
    pts_plus = (isobel <= 0) & (jsobel >= 0) & (abs_isobel <= abs_jsobel)
    pts_minus = (isobel >= 0) & (jsobel <= 0) & (abs_isobel <= abs_jsobel)
    pts = pts_plus | pts_minus
    pts = eroded_mask & pts
    c1a = magnitude[:, 1:][pts[:, :-1]]
    c2a = magnitude[:-1, 1:][pts[1:, :-1]]
    m = magnitude[pts]
    w = abs_isobel[pts] / abs_jsobel[pts]
    c_plus = c2a * w + c1a * (1.0 - w) <= m
    c1 = magnitude[:, :-1][pts[:, 1:]]
    c2 = magnitude[1:, :-1][pts[:-1, 1:]]
    c_minus = c2 * w + c1 * (1.0 - w) <= m
    local_maxima[pts] = c_plus & c_minus
    #----- 135 to 180 degrees ------
    # Mix anti-diagonal and anti-horizontal
    #
    pts_plus = (isobel <= 0) & (jsobel >= 0) & (abs_isobel >= abs_jsobel)
    pts_minus = (isobel >= 0) & (jsobel <= 0) & (abs_isobel >= abs_jsobel)
    pts = pts_plus | pts_minus
    pts = eroded_mask & pts
    c1 = magnitude[:-1, :][pts[1:, :]]
    c2 = magnitude[:-1, 1:][pts[1:, :-1]]
    m = magnitude[pts]
    w = abs_jsobel[pts] / abs_isobel[pts]
    c_plus = c2 * w + c1 * (1 - w) <= m
    c1 = magnitude[1:, :][pts[:-1, :]]
    c2 = magnitude[1:, :-1][pts[:-1, 1:]]
    c_minus = c2 * w + c1 * (1 - w) <= m
    local_maxima[pts] = c_plus & c_minus
    #
    #---- Create two masks at the two thresholds.
    #
    high_mask = local_maxima & (magnitude >= high_threshold)
    low_mask = local_maxima & (magnitude >= low_threshold)
    #
    # Segment the low-mask, then only keep low-segments that have
    # some high_mask component in them
    #
    strel = np.ones((3, 3), bool)
    labels, count = label(low_mask, strel)
    if count == 0:
        return low_mask

    sums = (np.array(ndi.sum(high_mask, labels,
                             np.arange(count, dtype=np.int32) + 1),
                     copy=False,
                     ndmin=1))
    good_label = np.zeros((count + 1, ), bool)
    good_label[1:] = sums > 0
    output_mask = good_label[labels]
    return output_mask
def contrast(image):
    dlimits = dtype_limits(image, clip_negative=False)
    limits = np.percentile(image, [1,99])
    ratio = (limits[1] - limits[0]) / (dlimits[1] - dlimits[0])
    return ratio