Пример #1
0
        def check_all():
            selem = morphology.disk(1)
            refs = np.load(
                os.path.join(skimage.data_dir, "rank_filter_tests.npz"))

            assert_equal(refs["autolevel"], rank.autolevel(self.image, selem))
            assert_equal(refs["autolevel_percentile"],
                         rank.autolevel_percentile(self.image, selem))
            assert_equal(refs["bottomhat"], rank.bottomhat(self.image, selem))
            assert_equal(refs["equalize"], rank.equalize(self.image, selem))
            assert_equal(refs["gradient"], rank.gradient(self.image, selem))
            assert_equal(refs["gradient_percentile"],
                         rank.gradient_percentile(self.image, selem))
            assert_equal(refs["maximum"], rank.maximum(self.image, selem))
            assert_equal(refs["mean"], rank.mean(self.image, selem))
            assert_equal(refs["geometric_mean"],
                         rank.geometric_mean(self.image, selem)),
            assert_equal(refs["mean_percentile"],
                         rank.mean_percentile(self.image, selem))
            assert_equal(refs["mean_bilateral"],
                         rank.mean_bilateral(self.image, selem))
            assert_equal(refs["subtract_mean"],
                         rank.subtract_mean(self.image, selem))
            assert_equal(refs["subtract_mean_percentile"],
                         rank.subtract_mean_percentile(self.image, selem))
            assert_equal(refs["median"], rank.median(self.image, selem))
            assert_equal(refs["minimum"], rank.minimum(self.image, selem))
            assert_equal(refs["modal"], rank.modal(self.image, selem))
            assert_equal(refs["enhance_contrast"],
                         rank.enhance_contrast(self.image, selem))
            assert_equal(refs["enhance_contrast_percentile"],
                         rank.enhance_contrast_percentile(self.image, selem))
            assert_equal(refs["pop"], rank.pop(self.image, selem))
            assert_equal(refs["pop_percentile"],
                         rank.pop_percentile(self.image, selem))
            assert_equal(refs["pop_bilateral"],
                         rank.pop_bilateral(self.image, selem))
            assert_equal(refs["sum"], rank.sum(self.image, selem))
            assert_equal(refs["sum_bilateral"],
                         rank.sum_bilateral(self.image, selem))
            assert_equal(refs["sum_percentile"],
                         rank.sum_percentile(self.image, selem))
            assert_equal(refs["threshold"], rank.threshold(self.image, selem))
            assert_equal(refs["threshold_percentile"],
                         rank.threshold_percentile(self.image, selem))
            assert_equal(refs["tophat"], rank.tophat(self.image, selem))
            assert_equal(refs["noise_filter"],
                         rank.noise_filter(self.image, selem))
            assert_equal(refs["entropy"], rank.entropy(self.image, selem))
            assert_equal(refs["otsu"], rank.otsu(self.image, selem))
            assert_equal(refs["percentile"],
                         rank.percentile(self.image, selem))
            assert_equal(refs["windowed_histogram"],
                         rank.windowed_histogram(self.image, selem))
Пример #2
0
def check_all():
    np.random.seed(0)
    image = np.random.rand(25, 25)
    selem = morphology.disk(1)
    refs = np.load(os.path.join(skimage.data_dir, "rank_filter_tests.npz"))

    assert_equal(refs["autolevel"], rank.autolevel(image, selem))
    assert_equal(refs["autolevel_percentile"], rank.autolevel_percentile(image, selem))
    assert_equal(refs["bottomhat"], rank.bottomhat(image, selem))
    assert_equal(refs["equalize"], rank.equalize(image, selem))
    assert_equal(refs["gradient"], rank.gradient(image, selem))
    assert_equal(refs["gradient_percentile"], rank.gradient_percentile(image, selem))
    assert_equal(refs["maximum"], rank.maximum(image, selem))
    assert_equal(refs["mean"], rank.mean(image, selem))
    assert_equal(refs["mean_percentile"], rank.mean_percentile(image, selem))
    assert_equal(refs["mean_bilateral"], rank.mean_bilateral(image, selem))
    assert_equal(refs["subtract_mean"], rank.subtract_mean(image, selem))
    assert_equal(refs["subtract_mean_percentile"], rank.subtract_mean_percentile(image, selem))
    assert_equal(refs["median"], rank.median(image, selem))
    assert_equal(refs["minimum"], rank.minimum(image, selem))
    assert_equal(refs["modal"], rank.modal(image, selem))
    assert_equal(refs["enhance_contrast"], rank.enhance_contrast(image, selem))
    assert_equal(refs["enhance_contrast_percentile"], rank.enhance_contrast_percentile(image, selem))
    assert_equal(refs["pop"], rank.pop(image, selem))
    assert_equal(refs["pop_percentile"], rank.pop_percentile(image, selem))
    assert_equal(refs["pop_bilateral"], rank.pop_bilateral(image, selem))
    assert_equal(refs["sum"], rank.sum(image, selem))
    assert_equal(refs["sum_bilateral"], rank.sum_bilateral(image, selem))
    assert_equal(refs["sum_percentile"], rank.sum_percentile(image, selem))
    assert_equal(refs["threshold"], rank.threshold(image, selem))
    assert_equal(refs["threshold_percentile"], rank.threshold_percentile(image, selem))
    assert_equal(refs["tophat"], rank.tophat(image, selem))
    assert_equal(refs["noise_filter"], rank.noise_filter(image, selem))
    assert_equal(refs["entropy"], rank.entropy(image, selem))
    assert_equal(refs["otsu"], rank.otsu(image, selem))
    assert_equal(refs["percentile"], rank.percentile(image, selem))
    assert_equal(refs["windowed_histogram"], rank.windowed_histogram(image, selem))
Пример #3
0
def colorize_image(in_image, out_dir, pallet, generate_id=False):
    # print(in_image)
    image_m = {}
    parts = in_image.split('/')

    if generate_id:
        img_id = str(uuid.uuid4()).replace('-', '')[:8]
    else:
        img_id = parts[-1:][0].replace(".png", "")

    image_m['id'] = img_id
    print("image_id=", image_m['id'])

    p_colors = get_colors(pallet)
    c_name = p_colors[0].attrib['name']

    img_rgb = img_as_float64(io.imread(in_image))
    grayscale_image = color.rgb2gray(img_rgb)

    # # Create a mask selecting regions with interesting texture.
    # noisy = rank.entropy(img_rgb, np.ones((9, 9)))
    # textured_regions = noisy > 4.25
    # # Note that using `colorize` here is a bit more difficult, since `rgb2hsv`
    # # expects an RGB image (height x width x channel), but fancy-indexing returns
    # # a set of RGB pixels (# pixels x channel).

    color_multiplier_0 = randomize_color(get_color_by_index(p_colors, 0))
    color_multiplier_1 = randomize_color(get_color_by_index(p_colors, 1))
    color_multiplier_2 = randomize_color(get_color_by_index(p_colors, 2))
    color_multiplier_3 = randomize_color(get_color_by_index(p_colors, 3))
    color_multiplier_4 = randomize_color(get_color_by_index(p_colors, 4))
    image_m['colors'] = [
        color_multiplier_0, color_multiplier_1, color_multiplier_2,
        color_multiplier_3, color_multiplier_4
    ]

    thresh = skimage.filters.threshold_otsu(grayscale_image)
    binary = grayscale_image <= thresh

    noisy = rank.threshold(grayscale_image, square(9))

    n_max = noisy.mean()

    textured_regions_0 = noisy < (n_max * .1)
    textured_regions_1 = np.all([noisy >= (n_max * .1), noisy <= (n_max * .3)])
    textured_regions_2 = np.all([noisy >= (n_max * .3), noisy <= (n_max * .5)])
    textured_regions_3 = np.all([noisy >= (n_max * .6), noisy <= (n_max * .8)])
    textured_regions_4 = noisy >= (n_max * .8)

    masked_image = img_rgb.copy()

    masked_image[textured_regions_0, :] *= color_multiplier_0
    masked_image[textured_regions_1, :] *= color_multiplier_1
    masked_image[textured_regions_2, :] *= color_multiplier_2
    masked_image[textured_regions_3, :] *= color_multiplier_3
    masked_image[textured_regions_4, :] *= color_multiplier_4
    masked_image[binary, :] *= color_multiplier_0

    # # will add a border mask
    # mask = np.ones(shape=masked_image.shape[0:2], dtype="bool")
    # rr, cc = skimage.draw.rectangle(start=(4, 4), end=(1020, 1020))
    # mask[rr, cc] = False
    # masked_image[mask] = 0

    out_img = '{}/{}.png'.format(out_dir, image_m['id'])
    io.imsave(out_img, img_as_ubyte(masked_image))
    print('out:{} pallete:{}'.format(out_img, c_name))
    image_m['out_img'] = out_img

    return image_m
Пример #4
0
# Gather images from nXp_data.npy file
print "Getting images"
membrane_images = np.load('nXp_data.npy')

# Assigning one image from array to variable pre_image
pre_image = membrane_images[0]

########
# Binary closing on image
########
print "Running closing"

close_image = closing(pre_image, selem=np.ones((4,4)))

print "Running threshold"
binary_img = threshold(close_image, selem=np.ones((200,200)))
print close_image[0]

image = binary_img
foreground = 1 - image

# image = foreground

########
# Watershed segmentation
########
print "Running watershed"

# Now we want to separate the two objects in image
# Generate the markers as local maxima of the distance to the background
distance = ndi.distance_transform_edt(foreground) # parameter: sampling=[100,100])
Пример #5
0
filtered_images = []
filtered_images.append(sfr.autolevel(original_image, disk(5)))
filtered_images.append(sfr.bottomhat(original_image, disk(5)))
filtered_images.append(sfr.tophat(original_image, disk(5)))
filtered_images.append(sfr.enhance_contrast(original_image, disk(5)))
filtered_images.append(sfr.entropy(original_image, disk(5)))
filtered_images.append(sfr.equalize(original_image, disk(5)))
filtered_images.append(sfr.gradient(original_image, disk(5)))
filtered_images.append(sfr.maximum(original_image, disk(5)))
filtered_images.append(sfr.minimum(original_image, disk(5)))
filtered_images.append(sfr.mean(original_image, disk(5)))
filtered_images.append(sfr.median(original_image, disk(5)))
filtered_images.append(sfr.modal(original_image, disk(5)))
filtered_images.append(sfr.otsu(original_image, disk(5)))
filtered_images.append(sfr.threshold(original_image, disk(5)))
filtered_images.append(sfr.subtract_mean(original_image, disk(5)))
filtered_images.append(sfr.sum(original_image, disk(5)))

name_list = [
    'autolevel', 'bottomhat', 'tophat', 'enhance_contrast', 'entropy',
    'equalize', 'gradient', 'maximum', 'minimum', 'mean', 'median', 'modal',
    'otsu', 'threshold', 'subtract_mean', 'sum'
]

fig, axes = plt.subplots(nrows=4, ncols=4, figsize=(8, 8))
axes = axes.ravel()
for ax, img, name in zip(axes, filtered_images, name_list):
    ax.imshow(img, cmap=plt.cm.gray, interpolation='nearest')
    ax.set_title(name)