Exemple #1
0
def test_selem_dtypes():

    image = np.zeros((5, 5), dtype=np.uint8)
    out = np.zeros_like(image)
    mask = np.ones_like(image, dtype=np.uint8)
    image[2, 2] = 255
    image[2, 3] = 128
    image[1, 2] = 16

    for dtype in (np.uint8, np.uint16, np.int32, np.int64, np.float32,
                  np.float64):
        elem = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]], dtype=dtype)
        rank.mean(image=image,
                  selem=elem,
                  out=out,
                  mask=mask,
                  shift_x=0,
                  shift_y=0)
        assert_equal(image, out)
        rank.mean_percentile(image=image,
                             selem=elem,
                             out=out,
                             mask=mask,
                             shift_x=0,
                             shift_y=0)
        assert_equal(image, out)
Exemple #2
0
def test_random_sizes():
    # make sure the size is not a problem

    elem = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]], dtype=np.uint8)
    for m, n in np.random.random_integers(1, 100, size=(10, 2)):
        mask = np.ones((m, n), dtype=np.uint8)

        image8 = np.ones((m, n), dtype=np.uint8)
        out8 = np.empty_like(image8)
        rank.mean(image=image8, selem=elem, mask=mask, out=out8,
                  shift_x=0, shift_y=0)
        assert_equal(image8.shape, out8.shape)
        rank.mean(image=image8, selem=elem, mask=mask, out=out8,
                  shift_x=+1, shift_y=+1)
        assert_equal(image8.shape, out8.shape)

        image16 = np.ones((m, n), dtype=np.uint16)
        out16 = np.empty_like(image8, dtype=np.uint16)
        rank.mean(image=image16, selem=elem, mask=mask, out=out16,
                  shift_x=0, shift_y=0)
        assert_equal(image16.shape, out16.shape)
        rank.mean(image=image16, selem=elem, mask=mask, out=out16,
                  shift_x=+1, shift_y=+1)
        assert_equal(image16.shape, out16.shape)

        rank.mean_percentile(image=image16, mask=mask, out=out16,
                             selem=elem, shift_x=0, shift_y=0, p0=.1, p1=.9)
        assert_equal(image16.shape, out16.shape)
        rank.mean_percentile(image=image16, mask=mask, out=out16,
                             selem=elem, shift_x=+1, shift_y=+1, p0=.1, p1=.9)
        assert_equal(image16.shape, out16.shape)
Exemple #3
0
def test_random_sizes():
    # make sure the size is not a problem

    niter = 10
    elem = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]], dtype=np.uint8)
    for m, n in np.random.random_integers(1, 100, size=(10, 2)):
        mask = np.ones((m, n), dtype=np.uint8)

        image8 = np.ones((m, n), dtype=np.uint8)
        out8 = np.empty_like(image8)
        rank.mean(image=image8, selem=elem, mask=mask, out=out8,
                  shift_x=0, shift_y=0)
        assert_equal(image8.shape, out8.shape)
        rank.mean(image=image8, selem=elem, mask=mask, out=out8,
                  shift_x=+1, shift_y=+1)
        assert_equal(image8.shape, out8.shape)

        image16 = np.ones((m, n), dtype=np.uint16)
        out16 = np.empty_like(image8, dtype=np.uint16)
        rank.mean(image=image16, selem=elem, mask=mask, out=out16,
                  shift_x=0, shift_y=0)
        assert_equal(image16.shape, out16.shape)
        rank.mean(image=image16, selem=elem, mask=mask, out=out16,
                  shift_x=+1, shift_y=+1)
        assert_equal(image16.shape, out16.shape)

        rank.mean_percentile(image=image16, mask=mask, out=out16,
                             selem=elem, shift_x=0, shift_y=0, p0=.1, p1=.9)
        assert_equal(image16.shape, out16.shape)
        rank.mean_percentile(image=image16, mask=mask, out=out16,
                             selem=elem, shift_x=+1, shift_y=+1, p0=.1, p1=.9)
        assert_equal(image16.shape, out16.shape)
def test_selem_dtypes():

    image = np.zeros((5, 5), dtype=np.uint8)
    out = np.zeros_like(image)
    mask = np.ones_like(image, dtype=np.uint8)
    image[2, 2] = 255
    image[2, 3] = 128
    image[1, 2] = 16

    for dtype in (np.uint8, np.uint16, np.int32, np.int64, np.float32, np.float64):
        elem = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]], dtype=dtype)
        rank.mean(image=image, selem=elem, out=out, mask=mask, shift_x=0, shift_y=0)
        assert_equal(image, out)
        rank.mean_percentile(image=image, selem=elem, out=out, mask=mask, shift_x=0, shift_y=0)
        assert_equal(image, out)
def test_bitdepth():
    # test the different bit depth for rank16

    elem = np.ones((3, 3), dtype=np.uint8)
    out = np.empty((100, 100), dtype=np.uint16)
    mask = np.ones((100, 100), dtype=np.uint8)

    for i in range(5):
        image = np.ones((100, 100), dtype=np.uint16) * 255 * 2 ** i
        if i > 3:
            expected = ["Bitdepth of"]
        else:
            expected = []
        with expected_warnings(expected):
            rank.mean_percentile(image=image, selem=elem, mask=mask, out=out, shift_x=0, shift_y=0, p0=0.1, p1=0.9)
Exemple #6
0
    def test_bitdepth(self):
        # test the different bit depth for rank16

        elem = np.ones((3, 3), dtype=np.uint8)
        out = np.empty((100, 100), dtype=np.uint16)
        mask = np.ones((100, 100), dtype=np.uint8)

        for i in range(5):
            image = np.ones((100, 100), dtype=np.uint16) * 255 * 2 ** i
            if i > 3:
                expected = ["Bitdepth of"]
            else:
                expected = []
            with expected_warnings(expected):
                rank.mean_percentile(image=image, selem=elem, mask=mask,
                                     out=out, shift_x=0, shift_y=0, p0=.1, p1=.9)
Exemple #7
0
    def test_bitdepth(self):
        # test the different bit depth for rank16

        elem = np.ones((3, 3), dtype=np.uint8)
        out = np.empty((100, 100), dtype=np.uint16)
        mask = np.ones((100, 100), dtype=np.uint8)

        for i in range(8, 13):
            max_val = 2 ** i - 1
            image = np.full((100, 100), max_val, dtype=np.uint16)
            if i > 10:
                expected = ["Bad rank filter performance"]
            else:
                expected = []
            with expected_warnings(expected):
                rank.mean_percentile(image=image, selem=elem, mask=mask,
                                     out=out, shift_x=0, shift_y=0, p0=.1, p1=.9)
    def test_bitdepth(self):
        # test the different bit depth for rank16

        elem = np.ones((3, 3), dtype=np.uint8)
        out = np.empty((100, 100), dtype=np.uint16)
        mask = np.ones((100, 100), dtype=np.uint8)

        for i in range(8, 13):
            max_val = 2 ** i - 1
            image = np.full((100, 100), max_val, dtype=np.uint16)
            if i > 10:
                expected = ["Bad rank filter performance"]
            else:
                expected = []
            with expected_warnings(expected):
                rank.mean_percentile(image=image, selem=elem, mask=mask,
                                     out=out, shift_x=0, shift_y=0, p0=.1, p1=.9)
        def check_all():
            selem = morphology.disk(1)
            refs = np.load(
                os.path.join(skimage.data_dir, "rank_filter_tests.npz"))

            assert_equal(refs["autolevel"], rank.autolevel(self.image, selem))
            assert_equal(refs["autolevel_percentile"],
                         rank.autolevel_percentile(self.image, selem))
            assert_equal(refs["bottomhat"], rank.bottomhat(self.image, selem))
            assert_equal(refs["equalize"], rank.equalize(self.image, selem))
            assert_equal(refs["gradient"], rank.gradient(self.image, selem))
            assert_equal(refs["gradient_percentile"],
                         rank.gradient_percentile(self.image, selem))
            assert_equal(refs["maximum"], rank.maximum(self.image, selem))
            assert_equal(refs["mean"], rank.mean(self.image, selem))
            assert_equal(refs["geometric_mean"],
                         rank.geometric_mean(self.image, selem)),
            assert_equal(refs["mean_percentile"],
                         rank.mean_percentile(self.image, selem))
            assert_equal(refs["mean_bilateral"],
                         rank.mean_bilateral(self.image, selem))
            assert_equal(refs["subtract_mean"],
                         rank.subtract_mean(self.image, selem))
            assert_equal(refs["subtract_mean_percentile"],
                         rank.subtract_mean_percentile(self.image, selem))
            assert_equal(refs["median"], rank.median(self.image, selem))
            assert_equal(refs["minimum"], rank.minimum(self.image, selem))
            assert_equal(refs["modal"], rank.modal(self.image, selem))
            assert_equal(refs["enhance_contrast"],
                         rank.enhance_contrast(self.image, selem))
            assert_equal(refs["enhance_contrast_percentile"],
                         rank.enhance_contrast_percentile(self.image, selem))
            assert_equal(refs["pop"], rank.pop(self.image, selem))
            assert_equal(refs["pop_percentile"],
                         rank.pop_percentile(self.image, selem))
            assert_equal(refs["pop_bilateral"],
                         rank.pop_bilateral(self.image, selem))
            assert_equal(refs["sum"], rank.sum(self.image, selem))
            assert_equal(refs["sum_bilateral"],
                         rank.sum_bilateral(self.image, selem))
            assert_equal(refs["sum_percentile"],
                         rank.sum_percentile(self.image, selem))
            assert_equal(refs["threshold"], rank.threshold(self.image, selem))
            assert_equal(refs["threshold_percentile"],
                         rank.threshold_percentile(self.image, selem))
            assert_equal(refs["tophat"], rank.tophat(self.image, selem))
            assert_equal(refs["noise_filter"],
                         rank.noise_filter(self.image, selem))
            assert_equal(refs["entropy"], rank.entropy(self.image, selem))
            assert_equal(refs["otsu"], rank.otsu(self.image, selem))
            assert_equal(refs["percentile"],
                         rank.percentile(self.image, selem))
            assert_equal(refs["windowed_histogram"],
                         rank.windowed_histogram(self.image, selem))
Exemple #10
0
def test_bitdepth():
    # test the different bit depth for rank16

    elem = np.ones((3, 3), dtype=np.uint8)
    out = np.empty((100, 100), dtype=np.uint16)
    mask = np.ones((100, 100), dtype=np.uint8)

    for i in range(5):
        image = np.ones((100, 100), dtype=np.uint16) * 255 * 2 ** i
        r = rank.mean_percentile(image=image, selem=elem, mask=mask,
                                 out=out, shift_x=0, shift_y=0, p0=.1, p1=.9)
Exemple #11
0
def _apply_meijering_filter(image, sigmas):
    smoothed = rank.mean_percentile(iamage, disk(5), p0=0.25, p1=0.75)
    filtered = meijering(smoothed, sigmas=sigmas, black_ridges=False)

    # Meijering filter always evaluates to high values at the image frame;
    # we hence set the filtered image to zero at those locations
    frame = np.ones_like(filtered, dtype=np.bool)
    d = 2 * np.max(sigmas) + 1
    frame[d:-d, d:-d] = False
    filtered[frame] = np.min(filtered)

    return filtered
Exemple #12
0
def test_bitdepth():
    # test the different bit depth for rank16

    elem = np.ones((3, 3), dtype=np.uint8)
    out = np.empty((100, 100), dtype=np.uint16)
    mask = np.ones((100, 100), dtype=np.uint8)

    for i in range(5):
        image = np.ones((100, 100), dtype=np.uint16) * 255 * 2**i
        r = rank.mean_percentile(image=image,
                                 selem=elem,
                                 mask=mask,
                                 out=out,
                                 shift_x=0,
                                 shift_y=0,
                                 p0=.1,
                                 p1=.9)
Exemple #13
0
def pyramid_decomposition(image):
    """
     Разложение начинается с масштаба исходного изображения. Оно делится на
     непересекающиеся квадраты размером 2х2 пикселя, в каждом из которых мы
     получаем значения минимума, максимума и среднего из 4-х пикселей, его
     составляющих. Далее из этих значений формируем три изображения:
     минимумов, максимумов и средних, которые уменьшены в 2 раза по
     горизонтали и вертикали относительно исходного. Повторяем процедуру и
     раскладываем полученные изображения в пирамиды до уровня, на котором
     размер ещё составляет не менее 2 пикселей по горизонтали и вертикали.
    :param image:
    :return:
    """
    width, height = image.shape
    factor = 2
    max_list, min_list, mean_list = [], [], []

    image_2_map = image.copy()

    map_max = maximum_filter(image_2_map, factor)
    map_min = minimum_filter(image_2_map, factor)
    map_mean = mean_percentile(image_2_map, np.ones((2, 2))) / 255.

    while min(width, height) > 3:
        width, height = int(width / factor), int(height / factor)

        map_max = resize(map_max, (width, height))
        map_min = resize(map_min, (width, height))
        map_mean = resize(map_mean, (width, height))

        width, height = map_max.shape

        max_list.append(map_max)
        min_list.append(map_min)
        mean_list.append(map_mean)

        map_max = maximum_filter(map_max, factor)
        map_min = minimum_filter(map_min, factor)
        map_mean = mean(map_mean, np.ones((2, 2)))

    return max_list, min_list, mean_list
Exemple #14
0
def check_all():
    np.random.seed(0)
    image = np.random.rand(25, 25)
    selem = morphology.disk(1)
    refs = np.load(os.path.join(skimage.data_dir, "rank_filter_tests.npz"))

    assert_equal(refs["autolevel"], rank.autolevel(image, selem))
    assert_equal(refs["autolevel_percentile"], rank.autolevel_percentile(image, selem))
    assert_equal(refs["bottomhat"], rank.bottomhat(image, selem))
    assert_equal(refs["equalize"], rank.equalize(image, selem))
    assert_equal(refs["gradient"], rank.gradient(image, selem))
    assert_equal(refs["gradient_percentile"], rank.gradient_percentile(image, selem))
    assert_equal(refs["maximum"], rank.maximum(image, selem))
    assert_equal(refs["mean"], rank.mean(image, selem))
    assert_equal(refs["mean_percentile"], rank.mean_percentile(image, selem))
    assert_equal(refs["mean_bilateral"], rank.mean_bilateral(image, selem))
    assert_equal(refs["subtract_mean"], rank.subtract_mean(image, selem))
    assert_equal(refs["subtract_mean_percentile"], rank.subtract_mean_percentile(image, selem))
    assert_equal(refs["median"], rank.median(image, selem))
    assert_equal(refs["minimum"], rank.minimum(image, selem))
    assert_equal(refs["modal"], rank.modal(image, selem))
    assert_equal(refs["enhance_contrast"], rank.enhance_contrast(image, selem))
    assert_equal(refs["enhance_contrast_percentile"], rank.enhance_contrast_percentile(image, selem))
    assert_equal(refs["pop"], rank.pop(image, selem))
    assert_equal(refs["pop_percentile"], rank.pop_percentile(image, selem))
    assert_equal(refs["pop_bilateral"], rank.pop_bilateral(image, selem))
    assert_equal(refs["sum"], rank.sum(image, selem))
    assert_equal(refs["sum_bilateral"], rank.sum_bilateral(image, selem))
    assert_equal(refs["sum_percentile"], rank.sum_percentile(image, selem))
    assert_equal(refs["threshold"], rank.threshold(image, selem))
    assert_equal(refs["threshold_percentile"], rank.threshold_percentile(image, selem))
    assert_equal(refs["tophat"], rank.tophat(image, selem))
    assert_equal(refs["noise_filter"], rank.noise_filter(image, selem))
    assert_equal(refs["entropy"], rank.entropy(image, selem))
    assert_equal(refs["otsu"], rank.otsu(image, selem))
    assert_equal(refs["percentile"], rank.percentile(image, selem))
    assert_equal(refs["windowed_histogram"], rank.windowed_histogram(image, selem))
Exemple #15
0
# ** alpha-trimmed mean filter: **
# 
# Apply this function on the different corrupted Lena by considering the gray value in the range [0.05, 0.95].

# In[11]:

from skimage.filters.rank import mean_percentile

# Gaussian noise
plt.figure(1)
imgplot = io.imshow(lena_gaussian)
plt.axis('off')
plt.title('Gaussian noise')

plt.figure(2)
lena_mean_1 = mean_percentile(lena_gaussian, disk(1))
imgplot = io.imshow(lena_mean_1)

plt.figure(3)
lena_mean_3 = mean_percentile(lena_gaussian, disk(3))
imgplot = io.imshow(lena_mean_3)


# Salt and pepper noise
plt.figure(4)
imgplot = io.imshow(lena_sp)
plt.axis('off')
plt.title('Salt and pepper noise')

plt.figure(5)
lena_mean_1 = mean_percentile(lena_sp, disk(1))
Exemple #16
0
def mean_filter(image, radius):
    """
    Create smooth boundaries of segmenation thourgh applying a gaussian mean blur
    """
    return rank.mean_percentile(image, selem=disk(radius))
complete image (background and details). Bilateral mean exhibits a high
filtering rate for continuous area (i.e. background) while higher image
frequencies remain untouched.

"""
import numpy as np
import matplotlib.pyplot as plt

from skimage import data
from skimage.morphology import disk
from skimage.filters import rank

image = (data.coins()).astype(np.uint16) * 16
selem = disk(20)

percentile_result = rank.mean_percentile(image, selem=selem, p0=.1, p1=.9)
bilateral_result = rank.mean_bilateral(image, selem=selem, s0=500, s1=500)
normal_result = rank.mean(image, selem=selem)

fig, axes = plt.subplots(nrows=2,
                         ncols=2,
                         figsize=(8, 10),
                         sharex=True,
                         sharey=True)
ax = axes.ravel()

titles = ['Original', 'Percentile mean', 'Bilateral mean', 'Local mean']
imgs = [image, percentile_result, bilateral_result, normal_result]
for n in range(0, len(imgs)):
    ax[n].imshow(imgs[n])
    ax[n].set_title(titles[n])
Exemple #18
0
filtering rate for continuous area (i.e. background) while higher image
frequencies remain untouched.
"""

import matplotlib.pyplot as plt

from skimage import data
from skimage.morphology import disk
from skimage.filters import rank


image = data.coins()
footprint = disk(20)

percentile_result = rank.mean_percentile(
    image, footprint=footprint, p0=.1, p1=.9
)
bilateral_result = rank.mean_bilateral(
    image, footprint=footprint, s0=500, s1=500
)
normal_result = rank.mean(image, footprint=footprint)

fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(10, 10),
                         sharex=True, sharey=True)
ax = axes.ravel()

titles = ['Original', 'Percentile mean', 'Bilateral mean', 'Local mean']
imgs = [image, percentile_result, bilateral_result, normal_result]
for n in range(0, len(imgs)):
    ax[n].imshow(imgs[n], cmap=plt.cm.gray)
    ax[n].set_title(titles[n])
filtering rate for continuous area (i.e. background) while higher image
frequencies remain untouched.

"""
import numpy as np
import matplotlib.pyplot as plt

from skimage import data
from skimage.morphology import disk
from skimage.filters import rank


image = (data.coins()).astype(np.uint16) * 16
selem = disk(20)

percentile_result = rank.mean_percentile(image, selem=selem, p0=0.1, p1=0.9)
bilateral_result = rank.mean_bilateral(image, selem=selem, s0=500, s1=500)
normal_result = rank.mean(image, selem=selem)


fig, axes = plt.subplots(nrows=3, figsize=(8, 10))
ax0, ax1, ax2 = axes

ax0.imshow(np.hstack((image, percentile_result)))
ax0.set_title("Percentile mean")
ax0.axis("off")

ax1.imshow(np.hstack((image, bilateral_result)))
ax1.set_title("Bilateral mean")
ax1.axis("off")
Exemple #20
0
def means_filter_perct(image):
    selem = disk(20)
    return rank.mean_percentile(image, selem=selem, p0=.1, p1=.9)