示例#1
0
def test_empty_selem():
    # check that min, max and mean returns zeros if structuring element is
    # empty

    image = np.zeros((5, 5), dtype=np.uint16)
    out = np.zeros_like(image)
    mask = np.ones_like(image, dtype=np.uint8)
    res = np.zeros_like(image)
    image[2, 2] = 255
    image[2, 3] = 128
    image[1, 2] = 16

    elem = np.array([[0, 0, 0], [0, 0, 0]], dtype=np.uint8)

    rank.mean(image=image, selem=elem, out=out, mask=mask,
              shift_x=0, shift_y=0)
    assert_equal(res, out)
    rank.geometric_mean(image=image, selem=elem, out=out, mask=mask,
                        shift_x=0, shift_y=0)
    assert_equal(res, out)
    rank.minimum(image=image, selem=elem, out=out, mask=mask,
                 shift_x=0, shift_y=0)
    assert_equal(res, out)
    rank.maximum(image=image, selem=elem, out=out, mask=mask,
                 shift_x=0, shift_y=0)
    assert_equal(res, out)
示例#2
0
def test_structuring_element8():
    # check the output for a custom structuring element

    r = np.array([[0, 0, 0, 0, 0, 0],
                  [0, 0, 0, 0, 0, 0],
                  [0, 0, 255, 0, 0, 0],
                  [0, 0, 255, 255, 255, 0],
                  [0, 0, 0, 255, 255, 0],
                  [0, 0, 0, 0, 0, 0]])

    # 8-bit
    image = np.zeros((6, 6), dtype=np.uint8)
    image[2, 2] = 255
    elem = np.asarray([[1, 1, 0], [1, 1, 1], [0, 0, 1]], dtype=np.uint8)
    out = np.empty_like(image)
    mask = np.ones(image.shape, dtype=np.uint8)

    rank.maximum(image=image, selem=elem, out=out, mask=mask,
                 shift_x=1, shift_y=1)
    assert_equal(r, out)

    # 16-bit
    image = np.zeros((6, 6), dtype=np.uint16)
    image[2, 2] = 255
    out = np.empty_like(image)

    rank.maximum(image=image, selem=elem, out=out, mask=mask,
                 shift_x=1, shift_y=1)
    assert_equal(r, out)
示例#3
0
def test_smallest_selem16():
    # check that min, max and mean returns identity if structuring element
    # contains only central pixel

    image = np.zeros((5, 5), dtype=np.uint16)
    out = np.zeros_like(image)
    mask = np.ones_like(image, dtype=np.uint8)
    image[2, 2] = 255
    image[2, 3] = 128
    image[1, 2] = 16

    elem = np.array([[1]], dtype=np.uint8)
    rank.mean(image=image,
              selem=elem,
              out=out,
              mask=mask,
              shift_x=0,
              shift_y=0)
    assert_equal(image, out)
    rank.minimum(image=image,
                 selem=elem,
                 out=out,
                 mask=mask,
                 shift_x=0,
                 shift_y=0)
    assert_equal(image, out)
    rank.maximum(image=image,
                 selem=elem,
                 out=out,
                 mask=mask,
                 shift_x=0,
                 shift_y=0)
    assert_equal(image, out)
示例#4
0
def test_empty_selem():
    # check that min, max and mean returns zeros if structuring element is
    # empty

    image = np.zeros((5, 5), dtype=np.uint16)
    out = np.zeros_like(image)
    mask = np.ones_like(image, dtype=np.uint8)
    res = np.zeros_like(image)
    image[2, 2] = 255
    image[2, 3] = 128
    image[1, 2] = 16

    elem = np.array([[0, 0, 0], [0, 0, 0]], dtype=np.uint8)

    rank.mean(image=image,
              selem=elem,
              out=out,
              mask=mask,
              shift_x=0,
              shift_y=0)
    assert_equal(res, out)
    rank.minimum(image=image,
                 selem=elem,
                 out=out,
                 mask=mask,
                 shift_x=0,
                 shift_y=0)
    assert_equal(res, out)
    rank.maximum(image=image,
                 selem=elem,
                 out=out,
                 mask=mask,
                 shift_x=0,
                 shift_y=0)
    assert_equal(res, out)
示例#5
0
 def test_output_same_dtype(self):
     image = (np.random.rand(100, 100) * 256).astype(np.uint8)
     out = np.empty_like(image)
     mask = np.ones(image.shape, dtype=np.uint8)
     elem = np.ones((3, 3), dtype=np.uint8)
     rank.maximum(image=image, selem=elem, out=out, mask=mask)
     assert_equal(image.dtype, out.dtype)
示例#6
0
def test_structuring_element8():
    # check the output for a custom structuring element

    r = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 255, 0, 0, 0],
                  [0, 0, 255, 255, 255, 0], [0, 0, 0, 255, 255, 0],
                  [0, 0, 0, 0, 0, 0]])

    # 8-bit
    image = np.zeros((6, 6), dtype=np.uint8)
    image[2, 2] = 255
    elem = np.asarray([[1, 1, 0], [1, 1, 1], [0, 0, 1]], dtype=np.uint8)
    out = np.empty_like(image)
    mask = np.ones(image.shape, dtype=np.uint8)

    rank.maximum(image=image,
                 selem=elem,
                 out=out,
                 mask=mask,
                 shift_x=1,
                 shift_y=1)
    assert_equal(r, out)

    # 16-bit
    image = np.zeros((6, 6), dtype=np.uint16)
    image[2, 2] = 255
    out = np.empty_like(image)

    rank.maximum(image=image,
                 selem=elem,
                 out=out,
                 mask=mask,
                 shift_x=1,
                 shift_y=1)
    assert_equal(r, out)
示例#7
0
def test_pass_on_bitdepth():
    # should pass because data bitdepth is not too high for the function

    image = np.ones((100, 100), dtype=np.uint16) * 2**11
    elem = np.ones((3, 3), dtype=np.uint8)
    out = np.empty_like(image)
    mask = np.ones(image.shape, dtype=np.uint8)
    with expected_warnings(["Bitdepth of"]):
        rank.maximum(image=image, selem=elem, out=out, mask=mask)
示例#8
0
    def test_pass_on_bitdepth(self):
        # should pass because data bitdepth is not too high for the function

        image = np.full((100, 100), 2**11, dtype=np.uint16)
        elem = np.ones((3, 3), dtype=np.uint8)
        out = np.empty_like(image)
        mask = np.ones(image.shape, dtype=np.uint8)
        with expected_warnings(["Bad rank filter performance"]):
            rank.maximum(image=image, footprint=elem, out=out, mask=mask)
示例#9
0
def test_pass_on_bitdepth():
    # should pass because data bitdepth is not too high for the function

    image = np.ones((100, 100), dtype=np.uint16) * 2 ** 11
    elem = np.ones((3, 3), dtype=np.uint8)
    out = np.empty_like(image)
    mask = np.ones(image.shape, dtype=np.uint8)
    with expected_warnings(["Bitdepth of"]):
        rank.maximum(image=image, selem=elem, out=out, mask=mask)
示例#10
0
def test_compare_with_grey_dilation():
    # compare the result of maximum filter with dilate

    image = (np.random.rand(100, 100) * 256).astype(np.uint8)
    out = np.empty_like(image)
    mask = np.ones(image.shape, dtype=np.uint8)

    for r in range(3, 20, 2):
        elem = np.ones((r, r), dtype=np.uint8)
        rank.maximum(image=image, selem=elem, out=out, mask=mask)
        cm = grey.dilation(image=image, selem=elem)
        assert_equal(out, cm)
示例#11
0
def test_compare_with_grey_dilation():
    # compare the result of maximum filter with dilate

    image = (np.random.rand(100, 100) * 256).astype(np.uint8)
    out = np.empty_like(image)
    mask = np.ones(image.shape, dtype=np.uint8)

    for r in range(3, 20, 2):
        elem = np.ones((r, r), dtype=np.uint8)
        rank.maximum(image=image, selem=elem, out=out, mask=mask)
        cm = grey.dilation(image=image, selem=elem)
        assert_equal(out, cm)
示例#12
0
def test_percentile_max():
    # check that percentile p0 = 1 is identical to local max
    img = data.camera()
    img16 = img.astype(np.uint16)
    selem = disk(15)
    # check for 8bit
    img_p0 = rank.percentile(img, selem=selem, p0=1.)
    img_max = rank.maximum(img, selem=selem)
    assert_equal(img_p0, img_max)
    # check for 16bit
    img_p0 = rank.percentile(img16, selem=selem, p0=1.)
    img_max = rank.maximum(img16, selem=selem)
    assert_equal(img_p0, img_max)
示例#13
0
def test_percentile_max():
    # check that percentile p0 = 1 is identical to local max
    img = data.camera()
    img16 = img.astype(np.uint16)
    selem = disk(15)
    # check for 8bit
    img_p0 = rank.percentile(img, selem=selem, p0=1.)
    img_max = rank.maximum(img, selem=selem)
    assert_equal(img_p0, img_max)
    # check for 16bit
    img_p0 = rank.percentile(img16, selem=selem, p0=1.)
    img_max = rank.maximum(img16, selem=selem)
    assert_equal(img_p0, img_max)
示例#14
0
 def calculate(self, image: np.ndarray, mask: np.ndarray,
               **kwargs) -> np.ndarray:
     # imshow(image)
     # plt.show()
     stream = kwargs["stream"]
     progress = kwargs["progress"]
     lap = maximum(minimum(image, disk(8)), disk(5))
     progress.progress(30)
     # imshow(lap)
     # plt.show()
     stream[1].append(lap)
     stream[0].image(stream[1], width=300)
     res = meijering(lap)
     progress.progress(60)
     # imshow(res)
     # plt.show()
     stream[1].append(res)
     stream[0].image(stream[1], width=300)
     for i, l in enumerate(res):
         for j, v in enumerate(l):
             if v >= 0.15:
                 res[i, j] = 1.0
             else:
                 res[i, j] = 0.0
         progress.progress(60 + int(40 * ((i + 1) / len(res))))
     # imshow(res)
     # plt.show()
     # stream[1].append(res)
     # stream[0].image(stream[1], width=300)
     return res
示例#15
0
def maximum_filter(image, kernel_shape, kernel_size):
    """Apply a maximum filter to a 2-d image.

    Parameters
    ----------
    image : np.ndarray, np.uint
        Image with shape (y, x).
    kernel_shape : str
        Shape of the kernel used to compute the filter ('diamond', 'disk',
        'rectangle' or 'square').
    kernel_size : int or Tuple(int)
        The size of the kernel. For the rectangle we expect two integers
        (width, height).

    Returns
    -------
    image_filtered : np.ndarray, np.uint
        Filtered 2-d image with shape (y, x).

    """
    # check parameters
    check_array(image, ndim=2, dtype=[np.uint8, np.uint16])
    check_parameter(kernel_shape=str, kernel_size=(int, tuple, list))

    # get kernel
    kernel = _define_kernel(shape=kernel_shape,
                            size=kernel_size,
                            dtype=image.dtype)

    # apply filter
    image_filtered = rank.maximum(image, kernel)

    return image_filtered
示例#16
0
    def __otsu_method(
            image: Union[np.ndarray, Iterable, np.uint8]) -> np.ndarray:
        selem = disk(20)
        t_otsu = otsu(image, selem=selem)
        print(t_otsu)
        imshow(t_otsu)
        # th_motsu = threshold_multiotsu(image, classes=2)
        # im = np.digitize(image, bins=th_motsu)
        # imshow(im)
        plt.show()
        test = (image * 255 + 15) <= t_otsu
        result = np.zeros(image.shape, dtype=np.uint8)
        for i, l in enumerate(test):
            for j, v in enumerate(l):
                if v:
                    result[i, j] = 255
        imshow(result)
        plt.show()
        # result = gaussian(gaussian(result, 7), 7)
        result = gaussian(result, 7)
        imshow(result)
        plt.show()
        result = minimum(maximum(result, disk(5)), disk(12))
        imshow(result)
        plt.show()
        result = gaussian(result, 3)
        imshow(result)
        plt.show()

        # return self.__ending(gaussian(self.__ending(result), 7))
        # return self.__ending(result)
        return result
示例#17
0
def test_smallest_selem16():
    # check that min, max and mean returns identity if structuring element
    # contains only central pixel

    image = np.zeros((5, 5), dtype=np.uint16)
    out = np.zeros_like(image)
    mask = np.ones_like(image, dtype=np.uint8)
    image[2, 2] = 255
    image[2, 3] = 128
    image[1, 2] = 16

    elem = np.array([[1]], dtype=np.uint8)
    rank.mean(image=image, selem=elem, out=out, mask=mask, shift_x=0, shift_y=0)
    assert_equal(image, out)
    rank.minimum(image=image, selem=elem, out=out, mask=mask, shift_x=0, shift_y=0)
    assert_equal(image, out)
    rank.maximum(image=image, selem=elem, out=out, mask=mask, shift_x=0, shift_y=0)
    assert_equal(image, out)
示例#18
0
def test_16bit():
    image = np.zeros((21, 21), dtype=np.uint16)
    selem = np.ones((3, 3), dtype=np.uint8)

    for bitdepth in range(17):
        value = 2**bitdepth - 1
        image[10, 10] = value
        assert rank.minimum(image, selem)[10, 10] == 0
        assert rank.maximum(image, selem)[10, 10] == value
        assert rank.mean(image, selem)[10, 10] == int(value / selem.size)
示例#19
0
def test_16bit():
    image = np.zeros((21, 21), dtype=np.uint16)
    selem = np.ones((3, 3), dtype=np.uint8)

    for bitdepth in range(17):
        value = 2 ** bitdepth - 1
        image[10, 10] = value
        assert rank.minimum(image, selem)[10, 10] == 0
        assert rank.maximum(image, selem)[10, 10] == value
        assert rank.mean(image, selem)[10, 10] == int(value / selem.size)
示例#20
0
        def check_all():
            selem = morphology.disk(1)
            refs = np.load(
                os.path.join(skimage.data_dir, "rank_filter_tests.npz"))

            assert_equal(refs["autolevel"], rank.autolevel(self.image, selem))
            assert_equal(refs["autolevel_percentile"],
                         rank.autolevel_percentile(self.image, selem))
            assert_equal(refs["bottomhat"], rank.bottomhat(self.image, selem))
            assert_equal(refs["equalize"], rank.equalize(self.image, selem))
            assert_equal(refs["gradient"], rank.gradient(self.image, selem))
            assert_equal(refs["gradient_percentile"],
                         rank.gradient_percentile(self.image, selem))
            assert_equal(refs["maximum"], rank.maximum(self.image, selem))
            assert_equal(refs["mean"], rank.mean(self.image, selem))
            assert_equal(refs["geometric_mean"],
                         rank.geometric_mean(self.image, selem)),
            assert_equal(refs["mean_percentile"],
                         rank.mean_percentile(self.image, selem))
            assert_equal(refs["mean_bilateral"],
                         rank.mean_bilateral(self.image, selem))
            assert_equal(refs["subtract_mean"],
                         rank.subtract_mean(self.image, selem))
            assert_equal(refs["subtract_mean_percentile"],
                         rank.subtract_mean_percentile(self.image, selem))
            assert_equal(refs["median"], rank.median(self.image, selem))
            assert_equal(refs["minimum"], rank.minimum(self.image, selem))
            assert_equal(refs["modal"], rank.modal(self.image, selem))
            assert_equal(refs["enhance_contrast"],
                         rank.enhance_contrast(self.image, selem))
            assert_equal(refs["enhance_contrast_percentile"],
                         rank.enhance_contrast_percentile(self.image, selem))
            assert_equal(refs["pop"], rank.pop(self.image, selem))
            assert_equal(refs["pop_percentile"],
                         rank.pop_percentile(self.image, selem))
            assert_equal(refs["pop_bilateral"],
                         rank.pop_bilateral(self.image, selem))
            assert_equal(refs["sum"], rank.sum(self.image, selem))
            assert_equal(refs["sum_bilateral"],
                         rank.sum_bilateral(self.image, selem))
            assert_equal(refs["sum_percentile"],
                         rank.sum_percentile(self.image, selem))
            assert_equal(refs["threshold"], rank.threshold(self.image, selem))
            assert_equal(refs["threshold_percentile"],
                         rank.threshold_percentile(self.image, selem))
            assert_equal(refs["tophat"], rank.tophat(self.image, selem))
            assert_equal(refs["noise_filter"],
                         rank.noise_filter(self.image, selem))
            assert_equal(refs["entropy"], rank.entropy(self.image, selem))
            assert_equal(refs["otsu"], rank.otsu(self.image, selem))
            assert_equal(refs["percentile"],
                         rank.percentile(self.image, selem))
            assert_equal(refs["windowed_histogram"],
                         rank.windowed_histogram(self.image, selem))
示例#21
0
    def test_trivial_footprint16(self):
        # check that min, max and mean returns identity if footprint
        # contains only central pixel

        image = np.zeros((5, 5), dtype=np.uint16)
        out = np.zeros_like(image)
        mask = np.ones_like(image, dtype=np.uint8)
        image[2, 2] = 255
        image[2, 3] = 128
        image[1, 2] = 16

        elem = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]], dtype=np.uint8)
        rank.mean(image=image,
                  footprint=elem,
                  out=out,
                  mask=mask,
                  shift_x=0,
                  shift_y=0)
        assert_equal(image, out)
        rank.geometric_mean(image=image,
                            footprint=elem,
                            out=out,
                            mask=mask,
                            shift_x=0,
                            shift_y=0)
        assert_equal(image, out)
        rank.minimum(image=image,
                     footprint=elem,
                     out=out,
                     mask=mask,
                     shift_x=0,
                     shift_y=0)
        assert_equal(image, out)
        rank.maximum(image=image,
                     footprint=elem,
                     out=out,
                     mask=mask,
                     shift_x=0,
                     shift_y=0)
        assert_equal(image, out)
示例#22
0
def test_16bit():
    image = np.zeros((21, 21), dtype=np.uint16)
    selem = np.ones((3, 3), dtype=np.uint8)

    for bitdepth in range(17):
        value = 2**bitdepth - 1
        image[10, 10] = value
        if bitdepth > 11:
            expected = ['Bitdepth of %s' % (bitdepth - 1)]
        else:
            expected = []
        with expected_warnings(expected):
            assert rank.minimum(image, selem)[10, 10] == 0
            assert rank.maximum(image, selem)[10, 10] == value
            assert rank.mean(image, selem)[10, 10] == int(value / selem.size)
示例#23
0
def shanghai_chunk_code_processing(img, row_range, column_range):
    """对于上海分割验证码的处理

    :param img: 上海验证码分割后的图片
    :param row_range: 分割的横坐标范围
    :param column_range: 分割的纵坐标范围
    :return: 二值化后的分割验证码图片
    """
    img = img[column_range[0]:column_range[1], row_range[0]:row_range[1]]
    img = sfr.enhance_contrast(img, morphology.selem.disk(1))
    img = sfr.maximum(img, morphology.selem.rectangle(3, 2))
    img = morphology.dilation(img, morphology.selem.rectangle(2, 1))
    thresh = filters.threshold_otsu(img)
    img = (img >= thresh) * 1.0
    return img
示例#24
0
def test_16bit():
    image = np.zeros((21, 21), dtype=np.uint16)
    selem = np.ones((3, 3), dtype=np.uint8)

    for bitdepth in range(17):
        value = 2 ** bitdepth - 1
        image[10, 10] = value
        if bitdepth > 11:
            expected = ['Bitdepth of %s' % (bitdepth - 1)]
        else:
            expected = []
        with expected_warnings(expected):
            assert rank.minimum(image, selem)[10, 10] == 0
            assert rank.maximum(image, selem)[10, 10] == value
            assert rank.mean(image, selem)[10, 10] == int(value / selem.size)
示例#25
0
def hebei_chunk_code_processing(img, row_range, column_range):
    """对于河北分割验证码的处理

    :param img: 河北验证码分割后的图片
    :param row_range: 分割的横坐标范围
    :param column_range: 分割的纵坐标范围
    :return: 二值化后的分割验证码图片
    """
    img = img[column_range[0]:column_range[1], row_range[0]:row_range[1]]
    img = sfr.enhance_contrast(img, morphology.selem.disk(1))
    img = sfr.maximum(img, morphology.selem.rectangle(3, 2))
    img = morphology.dilation(img, morphology.selem.rectangle(2, 1))
    # thresh = filters.threshold_otsu(img)
    # img = (img >= thresh) * 1.0
    img = morphology.remove_small_objects(img, 5)
    return img
示例#26
0
def basic_pen_mask(image, pen_size_threshold, pen_mask_expansion):
    green_mask = np.bitwise_and(
        image[:, :, RGB_GREEN_CHANNEL] > image[:, :, RGB_GREEN_CHANNEL],
        image[:, :, RGB_GREEN_CHANNEL] - image[:, :, RGB_GREEN_CHANNEL] >
        MIN_COLOR_DIFFERENCE)

    blue_mask = np.bitwise_and(
        image[:, :, RGB_BLUE_CHANNEL] > image[:, :, RGB_GREEN_CHANNEL],
        image[:, :, RGB_BLUE_CHANNEL] - image[:, :, RGB_GREEN_CHANNEL] >
        MIN_COLOR_DIFFERENCE)

    masked_pen = np.bitwise_or(green_mask, blue_mask)
    new_mask_image = remove_small_objects(masked_pen, pen_size_threshold)

    return maximum(np.where(new_mask_image, 1, 0),
                   disk(pen_mask_expansion)).astype(bool)
示例#27
0
    def test_16bit(self):
        image = np.zeros((21, 21), dtype=np.uint16)
        footprint = np.ones((3, 3), dtype=np.uint8)

        for bitdepth in range(17):
            value = 2**bitdepth - 1
            image[10, 10] = value
            if bitdepth >= 11:
                expected = ['Bad rank filter performance']
            else:
                expected = []
            with expected_warnings(expected):
                assert rank.minimum(image, footprint)[10, 10] == 0
                assert rank.maximum(image, footprint)[10, 10] == value
                mean_val = rank.mean(image, footprint)[10, 10]
                assert mean_val == int(value / footprint.size)
示例#28
0
def getseg(imagearray, settings, imgtype,
           preview_mode):  # Segments input images
    automatic, threshold, smoothing, minsize = settings
    multiplier, absolute_min = bit_depth_update(imagearray)
    imagearray2 = imagearray.copy()
    if automatic != "Manual":
        if imgtype == "region":
            if automatic == "High":
                threshold = threshold_li(
                    imagearray2
                )  # li > otsu for finding threshold when background is low
            elif automatic == "Low":
                threshold = threshold_otsu(imagearray2)
        elif imgtype == "spot":
            absolute_min *= 2
            imgmax = maximum(imagearray2 // multiplier, disk(10))
            if automatic == "High":
                threshold = (threshold_li(imgmax) * multiplier
                             )  # Generate otsu threshold for peaks.
            elif automatic == "Low":
                threshold = (threshold_otsu(imgmax) * multiplier)
        if absolute_min > threshold:
            threshold = absolute_min  # Set a minimum threshold in case an image is blank.
    mask = imagearray2 < threshold
    imagearray2[mask] = 0  # Remove background
    binary = imagearray2 > 0
    binary = remove_small_holes(binary, min_size=1000)  # Clear up any holes
    distance = ndi.distance_transform_edt(
        binary)  # Use smoothed distance transform to find the midpoints.
    blurred = ndi.gaussian_filter(distance, sigma=smoothing)
    local_maxi = peak_local_max(blurred, indices=False)
    markers = ndi.label(local_maxi)[0]  # Apply labels to each peak
    labels = watershed(-distance, markers, mask=binary)  # Watershed segment
    segmentation = clear_border(labels)  # Remove segments touching borders
    segmentation = remove_small_objects(segmentation, min_size=minsize)
    if preview_mode:
        labelled = label2rgb(segmentation,
                             image=imagearray2,
                             bg_label=0,
                             bg_color=(0, 0, 0),
                             kind='overlay')
        labelled = (labelled * 256).astype('uint8')
        return labelled
    labels = np.unique(segmentation)[1:]
    properties = skimage.measure.regionprops(segmentation,
                                             intensity_image=imagearray)
    return segmentation, properties, labels
    def _generate_scribble_mask(self, mask):
        """ Generate the skeleton from a mask
        Given an error mask, the medial axis is computed to obtain the
        skeleton of the objects. In order to obtain smoother skeleton and
        remove small objects, an erosion and dilation operations are performed.
        The kernel size used is proportional the squared of the area.

        # Arguments
            mask: Numpy Array. Error mask

        Returns:
            skel: Numpy Array. Skeleton mask
        """
        mask = np.asarray(mask, dtype=np.uint8)
        side = np.sqrt(np.sum(mask > 0))

        mask_ = mask
        # kernel_size = int(self.kernel_size * side)
        kernel_radius = self.kernel_size * side * .5
        kernel_radius = min(kernel_radius, self.max_kernel_radius)
        logging.verbose(
            'Erosion and dilation with kernel radius: {:.1f}'.format(
                kernel_radius), 2)
        compute = True
        while kernel_radius > 1. and compute:
            kernel = disk(kernel_radius)
            mask_ = rank.minimum(mask.copy(), kernel)
            mask_ = rank.maximum(mask_, kernel)
            compute = False
            if mask_.astype(np.bool).sum() == 0:
                compute = True
                prev_kernel_radius = kernel_radius
                kernel_radius *= .9
                logging.verbose(
                    'Reducing kernel radius from {:.1f} '.format(
                        prev_kernel_radius) +
                    'pixels to {:.1f}'.format(kernel_radius), 1)

        mask_ = np.pad(mask_, ((1, 1), (1, 1)),
                       mode='constant',
                       constant_values=False)
        skel = medial_axis(mask_.astype(np.bool))
        skel = skel[1:-1, 1:-1]
        return skel
示例#30
0
def feng(image, mask=None,
         window_size=15, window_size2=30,
         a1=0.12, gamma=2, k1=0.25, k2=0.04,
         return_threshold=False, **kwargs):
    """
    Thresholding method as developed by [Feng2004]_.

    .. [Feng2004] Fend & Tan (2004) IEICE Electronics Express
       DOI: `10.1587/elex.1.501 <https://dx.doi.org/10.1587/elex.1.501>`_

    :param image: Input image
    :param mask: Possible mask denoting a ROI
    :param window_size: Window size
    :param window_size2: Second window size
    :param a1: a1 value
    :param gamma: gamma value
    :param k1: k1 value
    :param k2: k2 value
    :param return_threshold: Whether to return a binarization, or the actual threshold values
    :param kwargs: For compatibility
    :return:
    """

    mean, std = mean_and_std(image, window_size)

    mean2, std2 = mean_and_std(image, window_size2)

    M = rank.minimum(image, np.ones((window_size, window_size)))
    # what exactly do they mean? maximum of window?
    Rs = rank.maximum(std2.astype(np.uint8), np.ones((window_size2, window_size2)))

    if numexpr:
        threshold = numexpr.evaluate("""(
        (1 - a1) * mean + (k1 * (std / Rs) ** gamma) * (std/Rs) * (mean - M) + (k2 * (std / Rs) ** gamma) * M
        )""")
    else:
        a2 = k1 * (std / Rs) ** gamma
        a3 = k2 * (std / Rs) ** gamma
        threshold = (1 - a1) * mean + a2 * (std/Rs) * (mean - M) + a3 * M

    if return_threshold:
        return threshold
    else:
        return image < threshold
    def merge(self, ori_img, mask_img, fil_light=True):
        img = ori_img.copy()
        img_s = cv2.split(img)
        if not self.if_set_border:
            print("merge base circle")
            # hough圆范围矩阵
            border = np.fromfunction(self.func2, mask_img.shape)
        else:
            print("merge base border")
            self.border_mask = cv2.fillConvexPoly(self.border_mask,
                                                  self.border_cnt, 0)
            cv2.imwrite("out.jpg", self.border_mask)
            mask = cv2.resize(self.border_mask,
                              (ori_img.shape[1], ori_img.shape[0]),
                              cv2.INTER_NEAREST)
            # 手选范围矩阵
            border = np.where(mask == 0, True, False)
        # 计算总面积
        self.all_area = np.sum(border == True)
        # 叠加检测结果矩阵
        border = np.where(mask_img > 0, border, False)

        # 叠加局部细纹合并矩阵(局部细纹大于30%认定整个区域为杂质区域)
        bad = sfr.mean(border, disk(15))
        border = np.where(bad > 255 * 0.30, True, border)  # 原0.25
        border = np.where(bad < 255 * 0.08, False, border)  # 原0.08

        if fil_light:  # 叠加过滤光斑矩阵
            light = sfr.maximum(self.orig_gray, disk(15))
            # light_mean = sfr.mean(self.orig_gray, disk(15))
            # light=np.where(light_mean>255*0.9,light,self.orig_gray)
            border = np.where(light < 230, border, False)

        mask = np.where(border, mask_img, 0)
        img_s[0] = np.where(border, 255, img_s[0])
        img_s[1] = np.where(border, 0, img_s[1])
        img_s[2] = np.where(border, 0, img_s[2])
        img = cv2.merge(img_s)
        # 计算杂质面积
        self.dirt_area = np.sum(border == True)
        return img, mask
示例#32
0
def check_all():
    np.random.seed(0)
    image = np.random.rand(25, 25)
    selem = morphology.disk(1)
    refs = np.load(os.path.join(skimage.data_dir, "rank_filter_tests.npz"))

    assert_equal(refs["autolevel"], rank.autolevel(image, selem))
    assert_equal(refs["autolevel_percentile"], rank.autolevel_percentile(image, selem))
    assert_equal(refs["bottomhat"], rank.bottomhat(image, selem))
    assert_equal(refs["equalize"], rank.equalize(image, selem))
    assert_equal(refs["gradient"], rank.gradient(image, selem))
    assert_equal(refs["gradient_percentile"], rank.gradient_percentile(image, selem))
    assert_equal(refs["maximum"], rank.maximum(image, selem))
    assert_equal(refs["mean"], rank.mean(image, selem))
    assert_equal(refs["mean_percentile"], rank.mean_percentile(image, selem))
    assert_equal(refs["mean_bilateral"], rank.mean_bilateral(image, selem))
    assert_equal(refs["subtract_mean"], rank.subtract_mean(image, selem))
    assert_equal(refs["subtract_mean_percentile"], rank.subtract_mean_percentile(image, selem))
    assert_equal(refs["median"], rank.median(image, selem))
    assert_equal(refs["minimum"], rank.minimum(image, selem))
    assert_equal(refs["modal"], rank.modal(image, selem))
    assert_equal(refs["enhance_contrast"], rank.enhance_contrast(image, selem))
    assert_equal(refs["enhance_contrast_percentile"], rank.enhance_contrast_percentile(image, selem))
    assert_equal(refs["pop"], rank.pop(image, selem))
    assert_equal(refs["pop_percentile"], rank.pop_percentile(image, selem))
    assert_equal(refs["pop_bilateral"], rank.pop_bilateral(image, selem))
    assert_equal(refs["sum"], rank.sum(image, selem))
    assert_equal(refs["sum_bilateral"], rank.sum_bilateral(image, selem))
    assert_equal(refs["sum_percentile"], rank.sum_percentile(image, selem))
    assert_equal(refs["threshold"], rank.threshold(image, selem))
    assert_equal(refs["threshold_percentile"], rank.threshold_percentile(image, selem))
    assert_equal(refs["tophat"], rank.tophat(image, selem))
    assert_equal(refs["noise_filter"], rank.noise_filter(image, selem))
    assert_equal(refs["entropy"], rank.entropy(image, selem))
    assert_equal(refs["otsu"], rank.otsu(image, selem))
    assert_equal(refs["percentile"], rank.percentile(image, selem))
    assert_equal(refs["windowed_histogram"], rank.windowed_histogram(image, selem))
    def TestSample(self):  # Run pump, take samples and analyse
        global currentPhoto
        self.im1Count["text"] = "-"  # Reset text fields
        self.im2Count["text"] = "-"
        self.im3Count["text"] = "-"

        self.im1Average["text"] = "-"
        self.im2Average["text"] = "-"
        self.im3Average["text"] = "-"

        self.imFinalCount["text"] = "-"
        self.imFinalAverage["text"] = "-"
        self.sizeAct["text"] = "-"
        self.Confidence["text"] = "-"
        self.ConfDisp["bg"] = "grey"

        ##'''
        global camera
        camera.stop_preview()  # Quit preview if open

        ###########################     Run pump and take Pictures       ###############################

        self.pump_On()  # Turn on pump
        self.update_idletasks()  # Refresh Gui

        for x in range(0, 25):  # Wait 25 seconds
            self.labelCurrentAction["text"] = "Pumping Liquid - %d" % (25 - x)
            self.update_idletasks()
            time.sleep(1)
        self.pump_Off()  # Turn off pump

        for x in range(1, 4):  # Take 3 images
            self.pump_Off()
            self.labelCurrentAction["text"] = "Powder Settle Time"
            self.update_idletasks()
            time.sleep(2)
            self.labelCurrentAction["text"] = "Capturing Image %d" % x
            camera.hflip = True  # Flip camera orientation appropriately
            camera.vflip = True
            camera.capture("/home/pi/PythonTempFolder/OrigPic" + str(x) + ".jpg")  # Save image to default directory

            self.update_idletasks()
            time.sleep(2)

            if x < 3:
                self.pump_On()  # Turn on pump
                for y in range(0, 6):  # Wait 6 seconds
                    self.labelCurrentAction["text"] = "Pumping Liquid - %d" % (6 - y)
                    self.update_idletasks()
                    time.sleep(1)

                self.pump_Off()  # Turn off pump
        ##'''
        ################################################################################################

        ###########################              Analyse Pictures        ###############################
        for x in range(1, 4):
            self.labelCurrentAction["text"] = "Loading image as greyscale - im %d" % x
            self.update_idletasks()

            image1 = io.imread(
                "/home/pi/PythonTempFolder/OrigPic" + str(x) + ".jpg", as_grey=True
            )  # Load image as greyscale

            ##
            ##image1 = io.imread('/home/pi/SDP Project/PowderTests/PPIM169041/169041Pic' + str(x) + '.jpg', as_grey=True)   ##Comment Out
            ##

            self.labelCurrentAction["text"] = "Cropping"  # Crop image
            self.update_idletasks()
            fromFile = np.asarray(image1, dtype=np.float32)
            orig = fromFile[0:1080, 420:1500]
            currentPhoto = orig
            self.showCurrent()
            self.update_idletasks()
            time.sleep(2)

            self.labelCurrentAction["text"] = "Applying minimum filter"  # Apply minimum filter
            self.update_idletasks()
            image2 = minimum(orig, disk(6))
            currentPhoto = image2
            self.t.destroy()
            self.update_idletasks()
            self.showCurrent()
            self.update_idletasks()

            self.labelCurrentAction["text"] = "Applying mean filter"  # Apply mean filter
            self.update_idletasks()
            image3 = mean(image2, disk(22))
            currentPhoto = image3
            self.t.destroy()
            self.update_idletasks()
            self.showCurrent()
            self.update_idletasks()

            self.labelCurrentAction["text"] = "Applying maximum filter"  # Apply maximum filter
            self.update_idletasks()
            image4 = maximum(image3, disk(6))
            currentPhoto = image4
            self.t.destroy()
            self.update_idletasks()
            self.showCurrent()
            self.update_idletasks()
            time.sleep(2)

            self.labelCurrentAction["text"] = "Normalising"  # Subtract filtered image from original
            self.update_idletasks()
            new = np.asarray(image4, dtype=np.float32)
            new[0:, 0:] = new[0:, 0:] / 255

            sub = np.subtract(orig, new)
            sub[0:, 0:] += 128 / 255  # Scale appropriately

            imFinal = sub
            currentPhoto = sub
            self.t.destroy()
            self.update_idletasks()
            self.showCurrent()
            self.update_idletasks()
            time.sleep(1)

            self.labelCurrentAction["text"] = "Thresholding (Otsu)"  # Get Otsu threshold value from image
            self.update_idletasks()
            thresh = threshold_otsu(imFinal)  ##Threshold
            print("T - " + str(thresh))

            intensity = float(self.entryIntensity.get())  # Get manual threshold value from text field

            self.labelCurrentAction[
                "text"
            ] = "Creating Binary Image"  # Create binary image from threshold value (changed to manual - ignore otsu)
            self.update_idletasks()
            binary = sub <= intensity  # 0.095 #(thresh+0.2)
            scipy.misc.imsave(
                "/home/pi/PythonTempFolder/binary" + str(x) + ".jpg", binary
            )  # Save binary image to default directory
            currentPhoto = binary
            self.t.destroy()
            self.update_idletasks()
            self.showCurrent()
            self.update_idletasks()

            labels = label(binary)
            self.labelCurrentAction["text"] = "Analysing Particles"
            self.update_idletasks()

            counter = 0
            areaCount = 0
            Tmin = int(self.entryTmin.get())  # Get size thresholds from text input
            Tmax = int(self.entryTmax.get())
            ################################################################################################

            # Tmin = 10
            # Tmax = 300

            for region in regionprops(labels):  # Iterate through particles in the binary image
                if region.area <= Tmax and region.area >= Tmin:
                    counter = counter + 1  # Count number of particles found
                    areaCount = areaCount + region.area  # Sum area of all particles

            average = areaCount / counter  # Calculate average area
            if x == 1:
                self.im1Count["text"] = counter
                self.im1Average["text"] = round(average, 5)  # Display average image 1
                counter1 = counter
                average1 = average

            if x == 2:
                self.im2Count["text"] = counter
                self.im2Average["text"] = round(average, 5)  # Display average image 2
                counter2 = counter
                average2 = average

            if x == 3:
                self.im3Count["text"] = counter
                self.im3Average["text"] = round(average, 5)  # Display average image 3
                counter3 = counter
                average3 = average

            print(counter)
            average = areaCount / counter
            # print(average)

            self.t.destroy()
            self.update_idletasks()

        finalCount = (counter1 + counter2 + counter3) / 3  # Calculate final count all images
        finalAverage = (average1 + average2 + average3) / 3  # Calculate final average all images

        self.imFinalCount["text"] = finalCount
        self.imFinalAverage["text"] = round(finalAverage, 3)
        microns = (math.sqrt((finalAverage * 113.0989232) / 3.14159265359)) * 2  # Size approximation

        self.sizeAct["text"] = "~ " + str(round(microns, 3)) + " microns"

        maxCount = max(counter1, counter2, counter3)
        Conf = float(finalCount) / float(maxCount)
        self.Confidence["text"] = str(round(Conf, 3)) + " %"
        print(finalCount)
        # print(maxCount)
        print(Conf)

        self.ConfDisp["bg"] = "red"  # Change confidence colours
        if Conf >= 0.84:
            self.ConfDisp["bg"] = "yellow"
        if Conf >= 0.93:
            self.ConfDisp["bg"] = "green"

        self.labelCurrentAction["text"] = "Complete!"
        self.update_idletasks()
        time.sleep(2)
        self.labelCurrentAction["text"] = "idle"
        self.update_idletasks()
示例#34
0
onlyfiles = [ f for f in listdir(mypath) if isfile(join(mypath,f)) ]


for file in onlyfiles[0:len(onlyfiles)]:
    try:
        im = data.imread(mypath+file)

        imGray = rgb2gray(im)

    #io.imshow(imGray)
    #io.show()

        imSegmented = imGray > 0.05

        imSegmented = maximum(imSegmented, disk(10))

    #io.imshow(imSegmented)
    #io.show()

        label_image = label(imSegmented)

        maxArea = 1

        dimensionsX, dimensionsY, z = im.shape

        minY, minX, z = im.shape
        maxX = 0
        maxY = 0
        for region in regionprops(label_image):
            minr, minc, maxr, maxc = region.bbox
示例#35
0
# Local maximum and local minimum are the base operators for gray-level
# morphology.
#
# .. note::
#
#     `skimage.dilate` and `skimage.erode` are equivalent filters (see below
#     for comparison).
#
# Here is an example of the classical morphological gray-level filters:
# opening, closing and morphological gradient.

from skimage.filters.rank import maximum, minimum, gradient

noisy_image = img_as_ubyte(data.camera())

closing = maximum(minimum(noisy_image, disk(5)), disk(5))
opening = minimum(maximum(noisy_image, disk(5)), disk(5))
grad = gradient(noisy_image, disk(5))

# display results
fig, ax = plt.subplots(2, 2, figsize=[10, 7], sharex=True, sharey=True)
ax1, ax2, ax3, ax4 = ax.ravel()

ax1.imshow(noisy_image, cmap=plt.cm.gray)
ax1.set_title('Original')

ax2.imshow(closing, cmap=plt.cm.gray)
ax2.set_title('Gray-level closing')

ax3.imshow(opening, cmap=plt.cm.gray)
ax3.set_title('Gray-level opening')
示例#36
0
 def test_input_boolean_dtype(self):
     image = (np.random.rand(100, 100) * 256).astype(bool)
     elem = np.ones((3, 3), dtype=bool)
     with testing.raises(ValueError):
         rank.maximum(image=image, selem=elem)
示例#37
0
文件: main.py 项目: Andrey-2310/DPSI
    return max([width / height, height / width])


color_map = {0: 50, 1: 100, 2: 150, 3: 200, 4: 250}


def change_colours_for_clusters(labeled_img, k_means_labels,
                                amount_of_objects):
    for i_object in range(1, amount_of_objects + 1):
        np.place(labeled_img, labeled_img == i_object,
                 color_map[k_means_labels[i_object - 1]])


img = img_as_float(imread('P0001460.jpg'))
binary_img = rank.maximum(rank.minimum(image_to_binary(rgb2grey(img)),
                                       selem=disk(2)),
                          selem=disk(2))

show_image(img)
show_image(binary_img)

labeled_array, num_features = ndimage.label(binary_img)

properties = regionprops(labeled_array, coordinates='xy')

k_means_list = list(
    map(
        lambda x: [
            x.area, x.perimeter, x.perimeter**2 / x.area,
            calculate_elongation(x.bbox), x.orientation, x.extent
        ], properties))
            failcount += 1
        if failcount > 200:
            break  # if fail many times in row, probably done now

# resize dimensions by factor of 2
orig_imgs = imgs
imgs = [downscale_local_mean(img, (2, 2, 1)).astype(np.uint8) for img in imgs]

redness_imgs = [
    get_redness_image(img,
                      rgb_coeffs=[1.0, 2.0, 0.5],
                      rgb_offset=16,
                      threshold_offset=0.2) for img in imgs
]

redness_imgs = [maximum(redness_img, disk(2)) for redness_img in redness_imgs]

red_cropped_imgs = [img.copy() for img in imgs]
notred_cropped_imgs = [img.copy() for img in imgs]
for redimg, notredimg, redness_img in \
        zip(red_cropped_imgs, notred_cropped_imgs, redness_imgs):
    red_indices = \
        np.outer(redness_img > RED_THRESHOLD,  # note: more lenient after sqrt
                 np.array([True, True, True])).reshape(*redness_img.shape, 3)
    notred_indices = np.logical_not(red_indices)
    redimg[notred_indices] = 0
    notredimg[red_indices] = 0

img_feature_vectors = [
    feature_vector_fcn(*imgset) for imgset in zip(imgs, redness_imgs)
]
 def max_filter(img, n):
     img = sfr.maximum(img, disk(n))
     return (img_as_ubyte(img))
示例#40
0
def cr_max(image, selem):
    return maximum(image=image, selem=selem)
示例#41
0
def compute_local_features(retinal_image):
    red_channel = retinal_image.preprocessed_image[:, :, 0]
    green_channel = retinal_image.preprocessed_image[:, :, 1]
    blue_channel = retinal_image.preprocessed_image[:, :, 2]
    hue_channel = color.rgb2hsv(retinal_image.preprocessed_image)[:, :, 0]
    saturation_channel = color.rgb2hsv(retinal_image.preprocessed_image)[:, :,
                                                                         1]
    value_channel = color.rgb2hsv(retinal_image.preprocessed_image)[:, :, 2]

    #mean- large
    mean_red_intensity_large = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_blue_intensity_large = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_green_intensity_large = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_hue_large = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_saturation_large = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_value_large = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    #mean- small
    mean_red_intensity = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_green_intensity = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_blue_intensity = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_hue = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_saturation = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_value = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    #minimum- large
    minimum_red_intensity_large = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    minimum_green_intensity_large = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    minimum_blue_intensity_large = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    minimum_hue_large = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    minimum_saturation_large = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    minimum_value_large = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    #minimum- small
    minimum_red_intensity = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    minimum_green_intensity = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    minimum_blue_intensity = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    minimum_hue = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    minimum_saturation = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    minimum_value = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    #maximum- large
    maximum_red_intensity_large = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    maximum_green_intensity_large = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    maximum_blue_intensity_large = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    maximum_hue_large = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    maximum_saturation_large = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    maximum_value_large = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    #maximum- small
    maximum_red_intensity = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    maximum_green_intensity = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    maximum_blue_intensity = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    maximum_hue = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    maximum_saturation = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    maximum_value = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    # std- large
    mean_red_intensity_large1 = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_red_intensity_large_potency = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_green_intensity_large1 = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_green_intensity_large_potency = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_blue_intensity_large1 = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_blue_intensity_large_potency = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_hue_large1 = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_hue_large_potency = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_saturation_large1 = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_saturation_large_potency = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_value_large1 = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_value_large_potency = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    # std- small
    mean_red_intensity_1 = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_red_intensity_potency = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_green_intensity_1 = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_green_intensity_potency = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_blue_intensity_1 = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_blue_intensity_potency = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_hue_1 = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_hue_potency = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_saturation_1 = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_saturation_potency = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_value_1 = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))
    mean_value_potency = np.zeros(
        (retinal_image.labels.shape[0], retinal_image.labels.shape[1]))

    max_labels = np.amax(retinal_image.labels)
    distanceTransform = ndimage.distance_transform_edt(retinal_image.vessels)
    diameter = distanceTransform * retinal_image.skeletonWithoutCrossings
    meanDiameterInRegion = np.zeros(np.max(retinal_image.labels))

    for i in range(1, max_labels + 1):
        meanDiameterInRegion[i - 1] = np.mean(
            diameter[retinal_image.labels == (i)])
        disk_diameter = meanDiameterInRegion[i - 1]
        #        disk_diameter=2;
        disk_diameter_large = 2 * disk_diameter
        labels_points = (retinal_image.labels == i)
        labels_points_indexes = np.nonzero(labels_points)
        labels_points_indexes = list(labels_points_indexes)
        rows = (labels_points_indexes)[0]
        cols = (labels_points_indexes)[1]
        #labels_points_int=labels_points.astype(int)
        #mean_intensity[rows,cols]=mean(img_rgb[labels_points==True], disk(disk_diameter))
        #mean_intensity[rows,cols]=mean(red_channel[rows,cols], disk(disk_diameter))
        #mean- large
        mean_red_intensity_large_iteration = mean(red_channel,
                                                  disk(disk_diameter_large))
        mean_red_intensity_large[
            rows, cols] = mean_red_intensity_large_iteration[rows, cols]
        mean_green_intensity_large_iteration = mean(green_channel,
                                                    disk(disk_diameter_large))
        mean_green_intensity_large[
            rows, cols] = mean_green_intensity_large_iteration[rows, cols]
        mean_blue_intensity_large_iteration = mean(blue_channel,
                                                   disk(disk_diameter_large))
        mean_blue_intensity_large[
            rows, cols] = mean_blue_intensity_large_iteration[rows, cols]
        mean_hue_large_iteration = mean(hue_channel, disk(disk_diameter_large))
        mean_hue_large[rows, cols] = mean_hue_large_iteration[rows, cols]
        mean_saturation_large_iteration = mean(saturation_channel,
                                               disk(disk_diameter_large))
        mean_saturation_large[rows,
                              cols] = mean_saturation_large_iteration[rows,
                                                                      cols]
        mean_value_large_iteration = mean(value_channel,
                                          disk(disk_diameter_large))
        mean_value_large[rows, cols] = mean_value_large_iteration[rows, cols]
        #mean- small
        mean_red_intensity_iteration = mean(red_channel, disk(disk_diameter))
        mean_red_intensity[rows, cols] = mean_red_intensity_iteration[rows,
                                                                      cols]
        mean_green_intensity_iteration = mean(green_channel,
                                              disk(disk_diameter))
        mean_green_intensity[rows, cols] = mean_green_intensity_iteration[rows,
                                                                          cols]
        mean_blue_intensity_iteration = mean(blue_channel, disk(disk_diameter))
        mean_blue_intensity[rows, cols] = mean_blue_intensity_iteration[rows,
                                                                        cols]
        mean_hue_iteration = mean(hue_channel, disk(disk_diameter))
        mean_hue[rows, cols] = mean_hue_iteration[rows, cols]
        mean_saturation_iteration = mean(saturation_channel,
                                         disk(disk_diameter))
        mean_saturation[rows, cols] = mean_saturation_iteration[rows, cols]
        mean_value_iteration = mean(value_channel, disk(disk_diameter))
        mean_value[rows, cols] = mean_value_iteration[rows, cols]
        #minimum- large
        minimum_red_intensity_iteration = minimum(red_channel,
                                                  disk(disk_diameter))
        minimum_red_intensity[rows,
                              cols] = minimum_red_intensity_iteration[rows,
                                                                      cols]
        minimum_green_intensity_iteration = minimum(green_channel,
                                                    disk(disk_diameter))
        minimum_green_intensity[rows,
                                cols] = minimum_green_intensity_iteration[rows,
                                                                          cols]
        minimum_blue_intensity_iteration = minimum(blue_channel,
                                                   disk(disk_diameter))
        minimum_blue_intensity[rows,
                               cols] = minimum_blue_intensity_iteration[rows,
                                                                        cols]
        minimum_hue_iteration = minimum(hue_channel, disk(disk_diameter))
        minimum_hue[rows, cols] = minimum_hue_iteration[rows, cols]
        minimum_saturation_iteration = minimum(saturation_channel,
                                               disk(disk_diameter))
        minimum_saturation[rows, cols] = minimum_saturation_iteration[rows,
                                                                      cols]
        minimum_value_iteration = minimum(value_channel, disk(disk_diameter))
        minimum_value[rows, cols] = minimum_value_iteration[rows, cols]
        #minimum- small
        minimum_red_intensity_large_iteration = minimum(
            red_channel, disk(disk_diameter_large))
        minimum_red_intensity_large[
            rows, cols] = minimum_red_intensity_large_iteration[rows, cols]
        minimum_green_intensity_large_iteration = minimum(
            green_channel, disk(disk_diameter_large))
        minimum_green_intensity_large[
            rows, cols] = minimum_green_intensity_large_iteration[rows, cols]
        minimum_blue_intensity_large_iteration = minimum(
            blue_channel, disk(disk_diameter_large))
        minimum_blue_intensity_large[
            rows, cols] = minimum_blue_intensity_large_iteration[rows, cols]
        minimum_hue_large_iteration = minimum(hue_channel,
                                              disk(disk_diameter_large))
        minimum_hue_large[rows, cols] = minimum_hue_large_iteration[rows, cols]
        minimum_saturation_large_iteration = minimum(saturation_channel,
                                                     disk(disk_diameter_large))
        minimum_saturation_large[
            rows, cols] = minimum_saturation_large_iteration[rows, cols]
        minimum_value_large_iteration = minimum(value_channel,
                                                disk(disk_diameter_large))
        minimum_value_large[rows, cols] = minimum_value_large_iteration[rows,
                                                                        cols]
        #maximum- large
        maximum_red_intensity_large_iteration = maximum(
            red_channel, disk(disk_diameter_large))
        maximum_red_intensity_large[
            rows, cols] = maximum_red_intensity_large_iteration[rows, cols]
        maximum_green_intensity_large_iteration = maximum(
            green_channel, disk(disk_diameter_large))
        maximum_green_intensity_large[
            rows, cols] = maximum_green_intensity_large_iteration[rows, cols]
        maximum_blue_intensity_large_iteration = maximum(
            blue_channel, disk(disk_diameter_large))
        maximum_blue_intensity_large[
            rows, cols] = maximum_blue_intensity_large_iteration[rows, cols]
        maximum_hue_large_iteration = maximum(hue_channel,
                                              disk(disk_diameter_large))
        maximum_hue_large[rows, cols] = maximum_hue_large_iteration[rows, cols]
        maximum_saturation_large_iteration = maximum(saturation_channel,
                                                     disk(disk_diameter_large))
        maximum_saturation_large[
            rows, cols] = maximum_saturation_large_iteration[rows, cols]
        maximum_value_large_iteration = maximum(value_channel,
                                                disk(disk_diameter_large))
        maximum_value_large[rows, cols] = maximum_value_large_iteration[rows,
                                                                        cols]
        #maximum- small
        maximum_red_intensity_iteration = maximum(red_channel,
                                                  disk(disk_diameter))
        maximum_red_intensity[rows,
                              cols] = maximum_red_intensity_iteration[rows,
                                                                      cols]
        maximum_green_intensity_iteration = maximum(green_channel,
                                                    disk(disk_diameter))
        maximum_green_intensity[rows,
                                cols] = maximum_green_intensity_iteration[rows,
                                                                          cols]
        maximum_blue_intensity_iteration = maximum(blue_channel,
                                                   disk(disk_diameter))
        maximum_blue_intensity[rows,
                               cols] = maximum_blue_intensity_iteration[rows,
                                                                        cols]
        maximum_hue_iteration = maximum(hue_channel, disk(disk_diameter))
        maximum_hue[rows, cols] = maximum_hue_iteration[rows, cols]
        maximum_saturation_iteration = maximum(saturation_channel,
                                               disk(disk_diameter))
        maximum_saturation[rows, cols] = maximum_saturation_iteration[rows,
                                                                      cols]
        maximum_value_iteration = maximum(value_channel, disk(disk_diameter))
        maximum_value[rows, cols] = maximum_value_iteration[rows, cols]
        #std-large
        #std red
        mean_red_intensity_large_iteration1 = mean(red_channel**2,
                                                   disk(disk_diameter_large))
        mean_red_intensity_large1[
            rows, cols] = mean_red_intensity_large_iteration1[rows, cols]
        mean_red_intensity_large_potency_iteration = mean(
            red_channel, disk(disk_diameter_large))
        mean_red_intensity_large_potency[
            rows, cols] = mean_red_intensity_large_potency_iteration[rows,
                                                                     cols]**2
        std_red = mean_red_intensity_large_potency - mean_red_intensity_large1
        std_red = np.abs(std_red)
        std_red_final = np.sqrt(std_red)
        #std green
        mean_green_intensity_large_iteration1 = mean(green_channel**2,
                                                     disk(disk_diameter_large))
        mean_green_intensity_large1[
            rows, cols] = mean_green_intensity_large_iteration1[rows, cols]
        mean_green_intensity_large_potency_iteration = mean(
            green_channel, disk(disk_diameter_large))
        mean_green_intensity_large_potency[
            rows, cols] = mean_green_intensity_large_potency_iteration[rows,
                                                                       cols]**2
        std_green = mean_green_intensity_large_potency - mean_green_intensity_large1
        std_green = np.abs(std_green)
        std_green_final = np.sqrt(std_green)
        #std Blue
        mean_blue_intensity_large_iteration1 = mean(blue_channel**2,
                                                    disk(disk_diameter_large))
        mean_blue_intensity_large1[
            rows, cols] = mean_blue_intensity_large_iteration1[rows, cols]
        mean_blue_intensity_large_potency_iteration = mean(
            blue_channel, disk(disk_diameter_large))
        mean_blue_intensity_large_potency[
            rows, cols] = mean_blue_intensity_large_potency_iteration[rows,
                                                                      cols]**2
        std_blue = mean_blue_intensity_large_potency - mean_blue_intensity_large1
        std_blue = np.abs(std_blue)
        std_blue_final = np.sqrt(std_blue)
        #std hue
        mean_hue_large_iteration1 = mean(hue_channel**2,
                                         disk(disk_diameter_large))
        mean_hue_large1[rows, cols] = mean_hue_large_iteration1[rows, cols]
        mean_hue_large_potency_iteration = mean(hue_channel,
                                                disk(disk_diameter_large))
        mean_hue_large_potency[rows, cols] = mean_hue_large_potency_iteration[
            rows, cols]**2
        std_hue = mean_hue_large_potency - mean_hue_large1
        std_hue = np.abs(std_hue)
        std_hue_final = np.sqrt(std_hue)
        #std saturation
        mean_saturation_large_iteration1 = mean(saturation_channel**2,
                                                disk(disk_diameter_large))
        mean_saturation_large1[rows,
                               cols] = mean_saturation_large_iteration1[rows,
                                                                        cols]
        mean_saturation_large_potency_iteration = mean(
            saturation_channel, disk(disk_diameter_large))
        mean_saturation_large_potency[
            rows, cols] = mean_saturation_large_potency_iteration[rows,
                                                                  cols]**2
        std_saturation = mean_saturation_large_potency - mean_saturation_large1
        std_saturation = np.abs(std_saturation)
        std_saturation_final = np.sqrt(std_saturation)
        #std Value
        mean_value_large_iteration1 = mean(value_channel**2,
                                           disk(disk_diameter_large))
        mean_value_large1[rows, cols] = mean_value_large_iteration1[rows, cols]
        mean_value_large_potency_iteration = mean(value_channel,
                                                  disk(disk_diameter_large))
        mean_value_large_potency[
            rows, cols] = mean_value_large_potency_iteration[rows, cols]**2
        std_value = mean_value_large_potency - mean_value_large1
        std_value = np.abs(std_value)
        std_value_final = np.sqrt(std_value)
        #std-small
        #std red
        mean_red_intensity_iteration1 = mean(red_channel**2,
                                             disk(disk_diameter))
        mean_red_intensity_1[rows, cols] = mean_red_intensity_iteration1[rows,
                                                                         cols]
        mean_red_intensity_potency_iteration = mean(red_channel,
                                                    disk(disk_diameter))
        mean_red_intensity_potency[
            rows, cols] = mean_red_intensity_potency_iteration[rows, cols]**2
        std_red_small = mean_red_intensity_potency - mean_red_intensity_1
        std_red_small = np.abs(std_red_small)
        std_red_final_small = np.sqrt(std_red_small)
        #std green
        mean_green_intensity_iteration1 = mean(green_channel**2,
                                               disk(disk_diameter))
        mean_green_intensity_1[rows,
                               cols] = mean_green_intensity_iteration1[rows,
                                                                       cols]
        mean_green_intensity_potency_iteration = mean(green_channel,
                                                      disk(disk_diameter))
        mean_green_intensity_potency[
            rows, cols] = mean_green_intensity_potency_iteration[rows, cols]**2
        std_green_small = mean_green_intensity_potency - mean_green_intensity_1
        std_green_small = np.abs(std_green_small)
        std_green_final_small = np.sqrt(std_green_small)
        #std Blue
        mean_blue_intensity_iteration1 = mean(blue_channel**2,
                                              disk(disk_diameter))
        mean_blue_intensity_1[rows,
                              cols] = mean_blue_intensity_iteration1[rows,
                                                                     cols]
        mean_blue_intensity_potency_iteration = mean(blue_channel,
                                                     disk(disk_diameter))
        mean_blue_intensity_potency[
            rows, cols] = mean_blue_intensity_potency_iteration[rows, cols]**2
        std_blue_small = mean_blue_intensity_potency - mean_blue_intensity_1
        std_blue_small = np.abs(std_blue_small)
        std_blue_final_small = np.sqrt(std_blue_small)
        #std hue
        mean_hue_iteration1 = mean(hue_channel**2, disk(disk_diameter))
        mean_hue_1[rows, cols] = mean_hue_iteration1[rows, cols]
        mean_hue_potency_iteration = mean(hue_channel, disk(disk_diameter))
        mean_hue_potency[rows, cols] = mean_hue_potency_iteration[rows,
                                                                  cols]**2
        std_hue_small = mean_hue_potency - mean_hue_1
        std_hue_small = np.abs(std_hue_small)
        std_hue_final_small = np.sqrt(std_hue_small)
        #std saturation
        mean_saturation_iteration1 = mean(saturation_channel**2,
                                          disk(disk_diameter))
        mean_saturation_1[rows, cols] = mean_saturation_iteration1[rows, cols]
        mean_saturation_potency_iteration = mean(saturation_channel,
                                                 disk(disk_diameter))
        mean_saturation_potency[
            rows, cols] = mean_saturation_potency_iteration[rows, cols]**2
        std_saturation_small = mean_saturation_potency - mean_saturation_1
        std_saturation_small = np.abs(std_saturation_small)
        std_saturation_final_small = np.sqrt(std_saturation_small)
        #std Value
        mean_value_iteration1 = mean(value_channel**2, disk(disk_diameter))
        mean_value_1[rows, cols] = mean_value_iteration1[rows, cols]
        mean_value_potency_iteration = mean(value_channel, disk(disk_diameter))
        mean_value_potency[rows, cols] = mean_value_potency_iteration[rows,
                                                                      cols]**2
        std_value_small = mean_value_potency - mean_value_1
        std_value_small = np.abs(std_value_small)
        std_value_final_small = np.sqrt(std_value_small)

        #print(mean_intensity)
        print(i, ':', disk_diameter)
    return mean_red_intensity_large, mean_green_intensity_large, mean_blue_intensity_large, mean_hue_large, mean_saturation_large, mean_value_large, mean_red_intensity, mean_green_intensity, mean_blue_intensity, mean_hue, mean_saturation, mean_value, minimum_red_intensity_large, minimum_green_intensity_large, minimum_blue_intensity_large, minimum_hue_large, minimum_saturation_large, minimum_value_large, minimum_red_intensity, minimum_green_intensity, minimum_blue_intensity, minimum_hue, minimum_saturation, minimum_value, maximum_red_intensity_large, maximum_green_intensity_large, maximum_blue_intensity_large, maximum_hue_large, maximum_saturation_large, maximum_value_large, maximum_red_intensity, maximum_green_intensity, maximum_blue_intensity, maximum_hue, maximum_saturation, maximum_value, std_red_final, std_green_final, std_blue_final, std_hue_final, std_saturation_final, std_value_final, std_red_final_small, std_green_final_small, std_blue_final_small, std_hue_final_small, std_saturation_final_small, std_value_final_small