Example #1
0
def test_plot_plugin():
    viewer = ImageViewer(data.moon())
    plugin = PlotPlugin(image_filter=lambda x: x)
    viewer += plugin

    assert_equal(viewer.image, data.moon())
    plugin._update_original_image(data.coins())
    assert_equal(viewer.image, data.coins())
    viewer.close()
Example #2
0
 def coins(self):
     """Prepare some example frames using images from the skimage
     library."""
     coins = np.array([data.coins() for i in range(0, 3*61)])
     coins = coins.reshape(3, 61, *data.coins().shape)
     # Adjust each frame to mimic an X-ray edge with a sigmoid
     S = 1/(1+np.exp(-(self.K_Es-8353))) + 0.1*np.sin(4*self.K_Es-4*8353)
     coins = (coins * S.reshape(3, 61,1,1))
     # Add some noise otherwise some functions div by zero.
     coins = coins * (0.975 + np.random.rand(*coins.shape)/20)
     coins = coins.astype(np.int32)
     return coins
Example #3
0
def test_uniform_mode():
    """Verify the computed BRIEF descriptors with expected for uniform mode."""
    img = data.coins()

    keypoints = corner_peaks(corner_harris(img), min_distance=5, threshold_abs=0, threshold_rel=0.1)

    extractor = BRIEF(descriptor_size=8, sigma=2, mode="uniform")

    extractor.extract(img, keypoints[:8])

    expected = np.array(
        [
            [False, False, False, True, True, True, False, False],
            [True, True, True, False, True, False, False, True],
            [True, True, True, False, True, True, False, True],
            [True, True, True, True, False, True, False, True],
            [True, True, True, True, True, True, False, False],
            [True, True, True, True, True, True, True, True],
            [False, False, False, True, True, True, True, True],
            [False, True, False, True, False, True, True, True],
        ],
        dtype=bool,
    )

    assert_array_equal(extractor.descriptors, expected)
Example #4
0
def scikit_example_plot_label():
    image = data.coins()[50:-50, 50:-50]
    
    # apply threshold
    thresh = threshold_otsu(image)
    bw = closing(image > thresh, square(3))
    
    # remove artifacts connected to image border
    cleared = bw.copy()
    clear_border(cleared)
    
    # label image regions
    label_image = label(cleared)
    borders = np.logical_xor(bw, cleared)
    label_image[borders] = -1
    
    fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
    ax.imshow(label_image, cmap='jet')
    
    for region in regionprops(label_image, ['Area', 'BoundingBox']):
    
        # skip small images
        if region['Area'] < 100:
            continue
    
        # draw rectangle around segmented coins
        minr, minc, maxr, maxc = region['BoundingBox']
        rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
                                  fill=False, edgecolor='red', linewidth=2)
        ax.add_patch(rect)
    
    plt.show()
def test_isodata_coins_image():
    coins = skimage.img_as_ubyte(data.coins())

    threshold = threshold_isodata(coins)
    assert np.floor((coins[coins <= threshold].mean() + coins[coins > threshold].mean()) / 2.0) == threshold
    assert threshold == 107

    assert threshold_isodata(coins, return_all=True) == [107]
Example #6
0
def main():
    """Load image, calculate optimal threshold, binarize, plot."""
    # load image
    img = data.coins()
    height, width = img.shape
    nb_pixels = height * width

    # precalculate some values for speedup
    # average pixel value
    g_avg = np.average(img)
    # P(pixel-value), i.e. #pixels-with-value / #all-pixels
    p_g = [0] * 256
    for g in range(0, 256):
        p_g[g] = np.sum(img == g) / nb_pixels

    # Otsu method
    # calculations are based on standard formulas
    q_best = None
    threshold_best = None
    img_bin_best = None
    # iterate over all possible thresholds
    for t in range(1, 255):
        img_bin = np.zeros(img.shape)
        img_bin[img >= t] = 1

        p1 = np.sum(img_bin) / nb_pixels
        p0 = 1 - p1

        g0 = np.average(img[img_bin == 0]) if np.sum(img[img_bin == 0]) > 0 else 0
        g1 = np.average(img[img_bin == 1]) if np.sum(img[img_bin == 1]) > 0 else 0

        var0 = sum([(g-g0)**2 * p_g[g] for g in range(0, t+1)])
        var1 = sum([(g-g1)**2 * p_g[g] for g in range(t+1, 256)])

        var_between = p0 * (g0 - g_avg)**2 + p1 * (g1 - g_avg)**2
        var_inner = p0 * var0**2 + p1 * var1**2

        # q is the relation of variance between classes and variance within classes
        q = var_between / var_inner if var_inner > 0 else 0

        print(t, p0, p1, g0, g1, g_avg, var_between, var_inner, q)
        if q_best is None or q_best < q:
            q_best = q
            threshold_best = t
            img_bin_best = img <= t

    # ground truth, based on scikit-image
    gt_tresh = skifilters.threshold_otsu(img)
    ground_truth = img <= gt_tresh

    # plot
    util.plot_images_grayscale(
        [img, img_bin_best, ground_truth],
        ["Image", "Otsu", "Otsu (Ground Truth)"]
    )
Example #7
0
def only_phase():
    im=data.coins()
    
    imf=np.fft.fft2(im)
    amp=np.abs(imf)
    phase=np.angle(imf)
    
    onlyample=np.uint8(np.abs(np.fft.ifft2(amp)))
    io.imsave('onlyample.png',onlyample)
    onlyphase=np.uint8(np.mean(amp)*np.abs(np.fft.ifft2(np.exp(1j*phase))))
    io.imsave('onlyphase.png',onlyphase)
Example #8
0
def test_li_coins_image():
    image = skimage.img_as_ubyte(data.coins())
    threshold = threshold_li(image)
    ce_actual = _cross_entropy(image, threshold)
    assert 94 < threshold_li(image) < 95
    assert ce_actual < _cross_entropy(image, threshold + 1)
    # in the case of the coins image, the minimum cross-entropy is achieved one
    # threshold below that found by the iterative method. Not sure why that is
    # but `threshold_li` does find the stationary point of the function (ie the
    # tolerance can be reduced arbitrarily but the exact same threshold is
    # found), so my guess some kind of histogram binning effect.
    assert ce_actual < _cross_entropy(image, threshold - 2)
Example #9
0
 def test_mosiac_reference(self):
     """Check that a repeated mosaic of images is converted to optical
     depth.
     
     """
     # Prepare data
     coins = data.coins()
     mosaic = np.tile(coins, (4, 4))
     ref = np.random.rand(*coins.shape) + 1
     od = -np.log(coins/ref)
     expected = np.tile(od, (4, 4))
     # Call the reference correction function
     result = apply_mosaic_reference(mosaic, ref)
     np.testing.assert_almost_equal(result, expected)
Example #10
0
def test_minsize():
    # single-channel:
    img = data.coins()[20:168, 0:128]
    for min_size in np.arange(10, 100, 10):
        segments = felzenszwalb(img, min_size=min_size, sigma=3)
        counts = np.bincount(segments.ravel())
        # actually want to test greater or equal.
        assert_greater(counts.min() + 1, min_size)
    # multi-channel:
    coffee = data.coffee()[::4, ::4]
    for min_size in np.arange(10, 100, 10):
        segments = felzenszwalb(coffee, min_size=min_size, sigma=3)
        counts = np.bincount(segments.ravel())
        # actually want to test greater or equal.
        assert_greater(counts.min() + 1, min_size)
Example #11
0
def _cv_main():
    from skimage.data import camera
    from skimage.data import coins
    #U0 = plt.imread("rgb_tile_014_i01_j05 - crop.png")[:,:,0].astype('float')-0.5
    U0 = coins().astype('float')/255.
    print np.max(U0)
    cv = chan_vese(U0,mu=0.8,lambda1=1,lambda2=1,tol=1,maxiter=15,dt=100)
    print ("Chan-Vese algorithm finished after "+str(len(cv[1]))+" iterations.")
    print cv[1]
    plt.imshow(cv[0])
    plt.colorbar()
    plt.show()
    plt.plot(cv[1])
    plt.show()
    return
Example #12
0
def test_minsize():
    # single-channel:
    img = data.coins()[20:168,0:128]
    for min_size in np.arange(10, 100, 10):
        segments = felzenszwalb(img, min_size=min_size, sigma=3)
        counts = np.bincount(segments.ravel())
        # actually want to test greater or equal.
        assert_greater(counts.min() + 1, min_size)
    # multi-channel:
    coffee = data.coffee()[::4, ::4]
    for min_size in np.arange(10, 100, 10):
        segments = felzenszwalb(coffee, min_size=min_size, sigma=3)
        counts = np.bincount(segments.ravel())
        # actually want to test greater or equal.
        # the construction doesn't guarantee min_size is respected
        # after intersecting the sementations for the colors
        assert_greater(np.mean(counts) + 1, min_size)
Example #13
0
def wiener_filter():
    im=data.coins()
    imf=np.fft.fft2(im)

    kernel=np.ones((1,20))
    kernel=kernel/np.sum(kernel)
    kf=np.fft.fft2(kernel,(im.shape[0],im.shape[1]))
    
    g=imf*kf
    im_g=np.uint8(np.abs(np.fft.ifft2(g)))
    h=np.fft.fft2(im_g)*np.conj(kf)/(0.001+np.abs(kf)**2)
    
    io.imsave('coins-wiener.png',np.uint8(np.abs(np.fft.ifft2(h))))
    
    im_gn=np.uint8(util.noise.random_noise(im_g,var=0.00001)*255)
    h=np.fft.fft2(im_gn)*np.conj(kf)/(0.001+np.abs(kf)**2)
    
    io.imsave('coins-wiener-noise.png',np.uint8(np.abs(np.fft.ifft2(h))))
Example #14
0
def test_viewer():
    astro = data.astronaut()
    coins = data.coins()

    view = ImageViewer(astro)
    import tempfile
    _, filename = tempfile.mkstemp(suffix='.png')

    view.show(False)
    view.close()
    view.save_to_file(filename)
    view.open_file(filename)
    assert_equal(view.image, astro)
    view.image = coins
    assert_equal(view.image, coins),
    view.save_to_file(filename),
    view.open_file(filename),
    view.reset_image(),
    assert_equal(view.image, coins)
Example #15
0
def test_viewer_with_overlay():
    img = data.coins()
    ov = OverlayPlugin(image_filter=sobel)
    viewer = ImageViewer(img)
    viewer += ov

    import tempfile
    _, filename = tempfile.mkstemp(suffix='.png')

    ov.color = 3
    assert_equal(ov.color, 'yellow')
    viewer.save_to_file(filename)
    ov.display_filtered_image(img)
    assert_equal(ov.overlay, img)
    ov.overlay = None
    assert_equal(ov.overlay, None)
    ov.overlay = img
    assert_equal(ov.overlay, img)
    assert_equal(ov.filtered_image, img)
Example #16
0
def test_normal_mode():
    """Verify the computed BRIEF descriptors with expected for normal mode."""
    img = data.coins()

    keypoints = corner_peaks(corner_harris(img), min_distance=5)

    extractor = BRIEF(descriptor_size=8, sigma=2)

    extractor.extract(img, keypoints[:8])

    expected = np.array([[False,  True, False, False,  True, False,  True, False],
                         [ True, False,  True,  True, False,  True, False, False],
                         [ True, False, False,  True, False,  True, False,  True],
                         [ True,  True,  True,  True, False,  True, False,  True],
                         [ True,  True,  True, False, False,  True,  True,  True],
                         [False, False, False, False,  True, False, False, False],
                         [False,  True, False, False,  True, False,  True, False],
                         [False, False, False, False, False, False, False, False]], dtype=bool)

    assert_array_equal(extractor.descriptors, expected)
Example #17
0
def geometry_transform():
    im=data.coins()
    imtf=np.zeros(im.shape,dtype=np.uint8)
    x,y=im.shape
    s=y/2
    for i in range(x):
        for j in range(y):
            newj=s+np.sign(j-s)*(np.abs(j-s)/s)**2*s
            if newj>=0 and newj<y:
                imtf[i,j]=im[i,np.int(newj)]
    io.imsave('coins-tf.png',imtf)
    
    iminv=np.zeros(im.shape,dtype=np.uint8)
    for i in range(x):
        for j in range(y):
            newj=s+np.sign(j-s)*np.sqrt(np.abs(j-s)/s)*s
            if newj>=0 and newj<y:
                iminv[i,j]=imtf[i,np.int(newj)]
    io.imsave('coins-inverse-tf.png',iminv)
    
    
Example #18
0
def inverse_filter():
    im=data.coins()
    imf=np.fft.fft2(im)

    kernel=np.ones((1,20))
    kernel=kernel/np.sum(kernel)
    kf=np.fft.fft2(kernel,(im.shape[0],im.shape[1]))
    
    g=imf*kf
    im_g=np.uint8(np.abs(np.fft.ifft2(g)))
    h=np.fft.fft2(im_g)/(0.01+kf)
    
    io.imsave('coins.png',im)
    io.imsave('coins-blur.png',im_g)
    io.imsave('coins-deblur.png',np.uint8(np.abs(np.fft.ifft2(h))))
    
    im_gn=np.uint8(util.noise.random_noise(im_g,var=0.00001)*255)
    h=np.fft.fft2(im_gn)/(0.01+kf)
    
    io.imsave('coins-blur-noise.png',im_gn)
    io.imsave('coins-deblur-noise.png',np.uint8(np.abs(np.fft.ifft2(h))))
Example #19
0
    def draw(self):
        if not hasattr(self, 'ax'):
            self.axOriginal = self.figure.add_subplot(221)
            self.axGreyScale = self.figure.add_subplot(222)
            self.axFiltered = self.figure.add_subplot(223)
            self.axSegments = self.figure.add_subplot(224)
            self.image = data.coins()
#            self.image = imread(self.image_file)

        self.axOriginal.set_title("Original Image", fontsize=12)
        self.axOriginal.imshow(self.image)
        
        self.axGreyScale.set_title("Greyscale Image", fontsize=12)
        self.grey_image = color.rgb2grey(self.image)
        self.axGreyScale.imshow(self.grey_image, cmap = cm.Greys_r)

        self.filter_image()
#        thresh = threshold_otsu(self.grey_image)
#        self.bw_image = closing(self.grey_image > thresh, square(1))
#        self.axThreshold.imshow(self.bw_image)
        
        self.axSegments.set_title("Segmented Image", fontsize=12)
        
        self.label_image = label(self.filtered)
#        borders = np.logical_xor(self.bw_image, self.cleared)
#        self.label_image[borders] = -1
        
#        fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
        self.axSegments.imshow(self.label_image, cmap='jet')
        
        for region in regionprops(self.label_image, ['Area', 'BoundingBox']):
        
            # skip small images
            if region['Area'] < 100:
                continue
        
            # draw rectangle around segmented coins
            minr, minc, maxr, maxc = region['BoundingBox']
            rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr, fill=False, edgecolor='red', linewidth=2)
            self.axSegments.add_patch(rect)
Example #20
0
def test_line_profile_dynamic():
    """Test a line profile updating after an image transform"""
    image = data.coins()[:-50, :]  # shave some off to make the line lower
    image = skimage.img_as_float(image)
    viewer = ImageViewer(image)

    lp = LineProfile(limits='dtype')
    viewer += lp

    line = lp.get_profiles()[-1][0]
    assert line.size == 129
    assert_almost_equal(np.std(viewer.image), 0.208, 3)
    assert_almost_equal(np.std(line), 0.229, 3)
    assert_almost_equal(np.max(line) - np.min(line), 0.725, 1)

    viewer.image = skimage.img_as_float(median(image,
                                               selem=disk(radius=3)))

    line = lp.get_profiles()[-1][0]
    assert_almost_equal(np.std(viewer.image), 0.198, 3)
    assert_almost_equal(np.std(line), 0.220, 3)
    assert_almost_equal(np.max(line) - np.min(line), 0.639, 1)
Example #21
0
def profile():
    import time
    from iib.simulation import CLContext
    from skimage import io, data, transform
    gs, wgs = 256, 16

    # Load some test data
    r = transform.resize
    sigs = np.empty((gs, gs, 4), np.float32)
    sigs[:, :, 0] = r(data.coins().astype(np.float32) / 255.0, (gs, gs))
    sigs[:, :, 1] = r(data.camera().astype(np.float32) / 255.0, (gs, gs))
    sigs[:, :, 2] = r(data.text().astype(np.float32) / 255.0, (gs, gs))
    sigs[:, :, 3] = r(data.checkerboard().astype(np.float32) / 255.0, (gs, gs))
    sigs[:, :, 2] = r(io.imread("../scoring/corpus/rds/turing_001.png",
                                as_grey=True), (gs, gs))
    sigs[:, :, 3] = io.imread("../scoring/corpus/synthetic/blobs.png",
                              as_grey=True)
    sigs = sigs.reshape(gs*gs*4)

    # Set up OpenCL
    ctx = cl.create_some_context(interactive=False)
    queue = cl.CommandQueue(ctx)
    mf = cl.mem_flags
    ifmt_f = cl.ImageFormat(cl.channel_order.RGBA, cl.channel_type.FLOAT)
    bufi = cl.Image(ctx, mf.READ_ONLY, ifmt_f, (gs, gs))
    cl.enqueue_copy(queue, bufi, sigs, origin=(0, 0), region=(gs, gs))
    clctx = CLContext(ctx, queue, ifmt_f, gs, wgs)

    # Compile the kernels
    feats = cl.Program(ctx, features_cl()).build()
    rdctn = cl.Program(ctx, reduction.reduction_sum_cl()).build()
    blur2 = cl.Program(ctx, convolution.gaussian_cl([np.sqrt(2.0)]*4)).build()
    blur4 = cl.Program(ctx, convolution.gaussian_cl([np.sqrt(4.0)]*4)).build()

    iters = 500
    t0 = time.time()
    for i in range(iters):
        get_features(clctx, feats, rdctn, blur2, blur4, bufi)
    print((time.time() - t0)/iters)
Example #22
0
def skimage_test():

	import numpy as np
	import matplotlib.pyplot as plt

	from skimage import data
	from skimage.feature import match_template


	image = data.coins()
	coin = image[170:220, 75:130]

	result = match_template(image, coin)

	ij = np.unravel_index(np.argmax(result), result.shape)
	x, y = ij[::-1]

	fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(8, 3))

	ax1.imshow(coin)
	ax1.set_axis_off()
	ax1.set_title('template')

	ax2.imshow(image)
	ax2.set_axis_off()
	ax2.set_title('image')
	# highlight matched region
	hcoin, wcoin = coin.shape
	rect = plt.Rectangle((x, y), wcoin, hcoin, edgecolor='r', facecolor='none')
	ax2.add_patch(rect)

	ax3.imshow(result)
	ax3.set_axis_off()
	ax3.set_title('`match_template`\nresult')
	# highlight matched region
	ax3.autoscale(False)
	ax3.plot(x, y, 'o', markeredgecolor='r', markerfacecolor='none', markersize=10)

	plt.show()
Example #23
0
def periodic_noise():
    im=data.coins()
    io.imsave('coins.png',im)
    
    imf=np.fft.fft2(im)
    a=50  # noise frequency
    m=50  # noise amplitude
    imf[0,a]=imf[0,a]*m
    imf[a,0]=imf[a,0]*m
    imf[a,a]=imf[a,a]*m
    imf[imf.shape[0]-a,imf.shape[1]-a]=imf[imf.shape[0]-a,imf.shape[1]-a]*m
    
    im1=np.uint8(np.abs(np.fft.ifft2(imf)))
    io.imsave('coins_periodic_noise.png',im1)
    
    m=0
    imf[0,a]=imf[0,a]*m
    imf[a,0]=imf[a,0]*m
    imf[a,a]=imf[a,a]*m
    imf[imf.shape[0]-a,imf.shape[1]-a]=imf[imf.shape[0]-a,imf.shape[1]-a]*m
    im2=np.uint8(np.abs(np.fft.ifft2(imf)))
    io.imsave('coins_periodic_filted.png',im2)
Example #24
0
 def test_downsample_array(self):
     """Check that image downsampling works as expected."""
     coins = data.coins()
     # Test for exception on negative factors
     with self.assertRaises(ValueError):
         downsample_array(coins, factor=-1)
     # Test for exception on invalid method
     with self.assertRaises(ValueError):
         downsample_array(coins, factor=1, method='ritual sacrifice')
     # Test simple downsampling
     output = downsample_array(coins, factor=1)
     self.assertEqual(output.shape, (151, 192))
     # Test no downsampling (factor=0) returns original array
     output = downsample_array(coins, factor=0)
     self.assertIs(output, coins)
     # Test a 3D array
     coins3d = np.broadcast_to(coins, (5, *coins.shape))
     output = downsample_array(coins3d, factor=1)
     self.assertEqual(output.shape, (2, 151, 192))
     # Test a 3D array with only certain axes
     coins3d = np.broadcast_to(coins, (5, *coins.shape))
     output = downsample_array(coins3d, factor=1, axis=(1, 2))
     self.assertEqual(output.shape, (5, 151, 192))
Example #25
0
# https://statkclee.github.io/trilobite/skimage-numpy.html
# --------
# imshow() needs numpy array nparray
#
\n\n"""
print(__doc__)

import numpy as np
import matplotlib.pyplot as plt

from skimage import data

scales = ['gray', 'magma', 'jet']

# imshow needs 2d,3d ndarray
img_coins = data.coins()
img_coffee = data.coffee()
img_cat = data.chelsea()
img_original = data.camera()

img_face_area = img_original[50:180, 160:290]  # [y,x]
img_random = np.random.random([300, 300])
img_circle = (lambda x, y: np.exp(-(x**2 + y**2) / 15))(*np.ogrid[-5:5:0.1,
                                                                  -5:5:0.1])


def draw_imgs(img, scales=['gray'], figsize=(10, 5)):
    """show 2 bisect imgs - jet / gray scale """

    fig, axs = plt.subplots(ncols=len(scales), nrows=1, figsize=figsize)
Example #26
0
Here, we use morphological reconstruction to create a background image, which can be subtracted from the original image to **isolate bright features (regional maxima).**

First we try reconstruction by dilation starting at the edges of the image. We initialize a seed image to the minimum intensity of the image, and set its border to be the pixel values in the original image. These maximal pixels will get dilated in order to reconstruct the background image.

import numpy as np
import matplotlib.pyplot as plt

from scipy.ndimage import gaussian_filter
from skimage import data
from skimage import img_as_float
from skimage.morphology import reconstruction

### Original image

image = data.coins()
plt.imshow(image)
plt.show()

### Floating point conversion and Gaussian filtering

# Convert to float: Important for subtraction later which won't work with uint8
float_image = img_as_float(image)
filtered_image = gaussian_filter(float_image, 1)

### Reconstruction after seeding with the minimum pixel value

seed = np.copy(filtered_image)
seed[1:-1, 1:-1] = filtered_image.min()
mask = filtered_image
Example #27
0
    def test_this(self):
        from skimage import data
        image = data.coins()

        print compute_shortest_path(image, (20, 20), (25, 20))
Example #28
0
def test_triangle_uint_images():
    assert(threshold_triangle(np.invert(data.text())) == 151)
    assert(threshold_triangle(data.text()) == 104)
    assert(threshold_triangle(data.coins()) == 80)
    assert(threshold_triangle(np.invert(data.coins())) == 175)
Example #29
0
def get_image_viewer():
    image = data.coins()
    viewer = ImageViewer(img_as_float(image))
    viewer += Plugin()
    return viewer
Example #30
0
"""
Display a labels layer above of an image layer using the add_labels and
add_image APIs
"""

from skimage import data
from skimage.filters import threshold_otsu
from skimage.segmentation import clear_border
from skimage.measure import label
from skimage.morphology import closing, square, remove_small_objects
import napari

with napari.gui_qt():
    image = data.coins()[50:-50, 50:-50]

    # apply threshold
    thresh = threshold_otsu(image)
    bw = closing(image > thresh, square(4))

    # remove artifacts connected to image border
    cleared = remove_small_objects(clear_border(bw), 20)

    # label image regions
    label_image = label(cleared)

    # initialise viewer with coins image
    viewer = napari.view(coins=image, multichannel=False)

    # add the labels
    label_layer = viewer.add_labels(label_image, name='segmentation')
Example #31
0
    return new_arr


def final_func(matrice, matrice1, x, y):
    helper = np.zeros((3, 3))
    helper = copy(matrice, x, y)

    helper = produit_mat(helper, matrice1)
    n = somme(helper)

    return float(n)


hx = np.array([[0, -1 / 2, 0], [0, 1 / 2, 0], [0, 0, 0]])
hy = np.array([[0, 0, 0], [-1 / 2, 1 / 2, 0], [0, 0, 0]])
img = (data.coins())
img_outx = img.copy()
img_outy = img.copy()
line, row = img.shape
img_outx = reshaping(img_outx)
img_outy = reshaping(img_outy)
print(img_outx)
line1, row1 = np.shape(img_outx)
helper = np.zeros((3, 3))
for x in range(1, line1 - 1):
    for y in range(1, row - 1):
        img_outx[x][y] = final_func(img_outx, hx, x, y)
        img_outy[x][y] = final_func(img_outy, hy, x, y)
img_outx = decrease(img_outx)
img_outy = decrease(img_outy)
#print(img_outx)
Example #32
0
def test_li_coins_image_as_float():
    coins = skimage.img_as_float(data.coins())
    assert 0.37 < threshold_li(coins) < 0.38
Example #33
0
# -*- coding: utf-8 -*-
# Import library
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import gaussian_filter
from skimage import data
from skimage import img_as_float
from skimage.morphology import reconstruction

# Convert to float: Important for subtraction later which won't work with uint8
image = img_as_float(data.coins())
image = gaussian_filter(image, 1)

seed = np.copy(image)
seed[1:-1, 1:-1] = image.min()
mask = image

dilated = reconstruction(seed, mask, method='dilation')

# Exibe imagens
fig = plt.figure(figsize=(20, 20))
a = fig.add_subplot(1, 3, 1)
plt.imshow(image, cmap=plt.get_cmap('gray'))
a.set_title('Máscara')
plt.axis('off')

a = fig.add_subplot(1, 3, 2)
plt.imshow(seed, cmap=plt.get_cmap('gray'))
a.set_title('Marcador')
plt.axis('off')
Example #34
0
def test_yen_coins_image():
    coins = skimage.img_as_ubyte(data.coins())
    assert 109 < threshold_yen(coins) < 111
Note that the accumulator size is built to be larger than the
original picture in order to detect centers outside the frame.
Its size is extended by two times the larger radius.

"""
import numpy as np
import matplotlib.pyplot as plt

from skimage import data, color
from skimage.transform import hough_circle
from skimage.feature import peak_local_max, canny
from skimage.draw import circle_perimeter
from skimage.util import img_as_ubyte

# Load picture and detect edges
image = img_as_ubyte(data.coins()[0:95, 70:370])
edges = canny(image, sigma=3, low_threshold=10, high_threshold=50)

fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(5, 2))

# Detect two radii
hough_radii = np.arange(15, 30, 2)
hough_res = hough_circle(edges, hough_radii)

centers = []
accums = []
radii = []

for radius, h in zip(hough_radii, hough_res):
    # For each radius, extract two circles
    num_peaks = 2
def test_li_coins_image_as_float():
    coins = util.img_as_float(data.coins())
    assert 94 / 255 < threshold_li(coins) < 95 / 255
Example #37
0
def test_li_coins_image():
    coins = skimage.img_as_ubyte(data.coins())
    assert 95 < threshold_li(coins) < 97
Example #38
0
 def time_rollingball(self, radius):
     restoration.rolling_ball(data.coins(), radius=radius)
Example #39
0
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal, run_module_suite
from skimage.feature import ORB
from skimage import data
from skimage._shared.testing import test_parallel

img = data.coins()


@test_parallel()
def test_keypoints_orb_desired_no_of_keypoints():
    detector_extractor = ORB(n_keypoints=10, fast_n=12, fast_threshold=0.20)
    detector_extractor.detect(img)

    exp_rows = np.array(
        [141., 108., 214.56, 131., 214.272, 67., 206., 177., 108., 141.])
    exp_cols = np.array(
        [323., 328., 282.24, 292., 281.664, 85., 260., 284., 328.8, 267.])

    exp_scales = np.array([1, 1, 1.44, 1, 1.728, 1, 1, 1, 1.2, 1])

    exp_orientations = np.array([
        -53.97446153, 59.5055285, -96.01885186, -149.70789506, -94.70171899,
        -45.76429535, -51.49752849, 113.57081195, 63.30428063, -79.56091118
    ])
    exp_response = np.array([
        1.01168357, 0.82934145, 0.67784179, 0.57176438, 0.56637459, 0.52248355,
        0.43696175, 0.42992376, 0.37700486, 0.36126832
    ])

    assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
def test_otsu_coins_image_as_float():
    coins = skimage.img_as_float(data.coins())
    assert 0.41 < threshold_otsu(coins) < 0.42
Example #41
0
 def setUp(self):
     self.a = data.camera()
     self.b = data.coins()
Example #42
0
def test_li_coins_image():
    coins = skimage.img_as_ubyte(data.coins())
    assert 95 < threshold_li(coins) < 97
Example #43
0
 def setUp(self):
     self.src = data.coins()
     self.subset = self.src[170:220, 75:130]
Example #44
0
"""
===============================================================
Comparing edge-based segmentation and region-based segmentation
===============================================================

In this example, we will see how to segment objects from a background.  We use
the ``coins`` image from ``skimage.data``, which shows several coins outlined
against a darker background.
"""

import numpy as np
import matplotlib.pyplot as plt

from skimage import data

coins = data.coins()
hist = np.histogram(coins, bins=np.arange(0, 256))

plt.figure(figsize=(8, 3))
plt.subplot(121)
plt.imshow(coins, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.subplot(122)
plt.plot(hist[1][:-1], hist[0], lw=2)
plt.title('histogram of grey values')
"""
.. image:: PLOT2RST.current_figure

Thresholding
============
Example #45
0
def test_yen_coins_image():
    coins = skimage.img_as_ubyte(data.coins())
    assert 109 < threshold_yen(coins) < 111
Example #46
0
def test_yen_coins_image_as_float():
    coins = skimage.img_as_float(data.coins())
    assert 0.43 < threshold_yen(coins) < 0.44
Example #47
0
computes the join of two segmentations, in which a pixel is placed in
the same segment if and only if it is in the same segment in _both_
segmentations.

"""
import numpy as np
from scipy import ndimage as ndi
import matplotlib.pyplot as plt

from skimage.filters import sobel
from skimage.segmentation import slic, join_segmentations
from skimage.morphology import watershed
from skimage.color import label2rgb
from skimage import data, img_as_float

coins = img_as_float(data.coins())

# make segmentation using edge-detection and watershed
edges = sobel(coins)
markers = np.zeros_like(coins)
foreground, background = 1, 2
markers[coins < 30.0 / 255] = background
markers[coins > 150.0 / 255] = foreground

ws = watershed(edges, markers)
seg1 = ndi.label(ws == foreground)[0]

# make segmentation using SLIC superpixels
seg2 = slic(coins, n_segments=117, max_iter=160, sigma=1, compactness=0.75,
            multichannel=False)
Example #48
0
def test_triangle_uint_images():
    assert(threshold_triangle(np.invert(data.text())) == 151)
    assert(threshold_triangle(data.text()) == 104)
    assert(threshold_triangle(data.coins()) == 80)
    assert(threshold_triangle(np.invert(data.coins())) == 175)
import napari
import zarr
from skimage.data import coins

save_path = 'labels'
image = coins()
shape = image.shape
labels = zarr.open_array(
                         save_path, 
                         mode='w', 
                         shape=shape,
                         chunks=None, 
                         fill_value=0
                         )

with napari.gui_qt():
    v = napari.Viewer()
    v.add_image(image)
    v.add_labels(labels)
# License: BSD 3 clause

import time

import numpy as np
from scipy.ndimage.filters import gaussian_filter
import matplotlib.pyplot as plt
from skimage.data import coins
from skimage.transform import rescale

from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering


# load the coins as a numpy array
orig_coins = coins()

# Resize it to 20% of the original size to speed up the processing
# Applying a Gaussian filter for smoothing prior to down-scaling
# reduces aliasing artifacts.
smoothened_coins = gaussian_filter(orig_coins, sigma=2)
rescaled_coins = rescale(smoothened_coins, 0.2, mode="reflect")

# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(rescaled_coins)

# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 10
Example #51
0
from skimage.color import rgb2gray, gray2rgb
from skimage.segmentation import mark_boundaries
import time
import matplotlib.image as mpimg
exec(
    open('/Users/Salim_Andre/Desktop/IMA/PRAT/code/pd_segmentation_1.py').read(
    ))

### DATASET

PATH_img = '/Users/Salim_Andre/Desktop/IMA/PRAT/'  # path to my own images

swans = mpimg.imread(PATH_img + 'swans.jpg')
baby = mpimg.imread(PATH_img + 'baby.jpg')

img_set = [data.astronaut(), data.camera(), data.coins(), data.checkerboard(), data.chelsea(), \
 data.coffee(), data.clock(), data.hubble_deep_field(), data.horse(), data.immunohistochemistry(), \
 data.moon(), data.page(), data.rocket(), swans, baby]

### IMAGE

I = img_as_float(img_set[4])

###	PARAMETERS FOR 1-HOMOLOGY GROUPS

n_superpixels = 400
RV_epsilon = 180
gauss_sigma = 0.5
n_events = 6
#n_pxl_min_ = 10;
density_excl = 0.0
def test_otsu_coins_image():
    coins = skimage.img_as_ubyte(data.coins())
    assert 106 < threshold_otsu(coins) < 108
Example #53
0
 def peakmem_rollingball(self, radius):
     restoration.rolling_ball(data.coins(), radius=radius)
  level situated inside g-s0 and g+s1 (here g-500 and g+500)

Percentile and usual mean give here similar results, these filters smooth the
complete image (background and details). Bilateral mean exhibits a high
filtering rate for continuous area (i.e. background) while higher image
frequencies remain untouched.

"""
import numpy as np
import matplotlib.pyplot as plt

from skimage import data
from skimage.morphology import disk
from skimage.filters import rank

image = (data.coins()).astype(np.uint16) * 16
selem = disk(20)

percentile_result = rank.mean_percentile(image, selem=selem, p0=.1, p1=.9)
bilateral_result = rank.mean_bilateral(image, selem=selem, s0=500, s1=500)
normal_result = rank.mean(image, selem=selem)

fig, axes = plt.subplots(nrows=2,
                         ncols=2,
                         figsize=(8, 10),
                         sharex=True,
                         sharey=True)
ax = axes.ravel()

titles = ['Original', 'Percentile mean', 'Bilateral mean', 'Local mean']
imgs = [image, percentile_result, bilateral_result, normal_result]
Example #55
0
def test_li_coins_image_as_float():
    coins = skimage.img_as_float(data.coins())
    assert 0.37 < threshold_li(coins) < 0.38
Example #56
0
 def time_rollingball_nan(self, radius):
     image = data.coins().astype(float)
     pos = np.arange(np.min(image.shape))
     image[pos, pos] = np.NaN
     restoration.rolling_ball(image, radius=radius, nansafe=True)
Example #57
0
def test_yen_coins_image_as_float():
    coins = skimage.img_as_float(data.coins())
    assert 0.43 < threshold_yen(coins) < 0.44
Example #58
0
 def time_rollingball_threads(self, threads):
     restoration.rolling_ball(data.coins(), radius=100, num_threads=threads)
Example #59
0
def get_image_viewer():
    image = data.coins()
    viewer = ImageViewer(img_as_float(image))
    viewer += Plugin()
    return viewer
    denom = X + Y
    denom[denom == 0] = np.infty
    frac = num / denom

    chi_sqr = 0.5 * np.sum(frac, axis=2)

    # Generate a similarity measure. It needs to be low when distance is high
    # and high when distance is low; taking the reciprocal will do this.
    # Chi squared will always be >= 0, add small value to prevent divide by 0.
    similarity = 1 / (chi_sqr + 1.0e-4)

    return similarity


# Load the `skimage.data.coins` image
img = img_as_ubyte(data.coins())

# Quantize to 16 levels of greyscale; this way the output image will have a
# 16-dimensional feature vector per pixel
quantized_img = img // 16

# Select the coin from the 4th column, second row.
# Co-ordinate ordering: [x1,y1,x2,y2]
coin_coords = [184, 100, 228, 148]  # 44 x 44 region
coin = quantized_img[coin_coords[1]:coin_coords[3],
                     coin_coords[0]:coin_coords[2]]

# Compute coin histogram and normalize
coin_hist, _ = np.histogram(coin.flatten(), bins=16, range=(0, 16))
coin_hist = coin_hist.astype(float) / np.sum(coin_hist)