Exemplo n.º 1
0
    def test_convolution_upcast(self):
        i, j = np.mgrid[-5:6, -5:6]
        image = np.load(os.path.join(data_dir, 'lena_GRAY_U8.npy'))

        result1 = F.sobel(image)
        image = image.astype(float)
        result2 = F.sobel(image)

        assert_array_equal(result1, result2)
Exemplo n.º 2
0
def sobel(data, sliceId=2):
    edges = np.zeros(data.shape)
    if sliceId == 2:
        for idx in range(data.shape[2]):
            edges[:, :, idx] = skifil.sobel(data[:, :, idx])
    elif sliceId == 0:
        for idx in range(data.shape[0]):
            edges[idx, :, :] = skifil.sobel(data[idx, :, :])
    return edges
Exemplo n.º 3
0
def get_edges(img):
    edge = np.empty(img.shape)
    if len(img.shape) == 3:
        for i in range(3):
            edge[:, :, i] = imfilt.sobel(img[:, :, i])
    else:
        edge = imfilt.sobel(img)
    edge = rescale_intensity(edge)
    return edge
Exemplo n.º 4
0
    def segment(self, src):
        '''
            pychron: preprocessing cv.Mat
        '''
#        image = pychron.ndarray[:]
#         image = asarray(pychron)
        image = src[:]
        if self.use_adaptive_threshold:
#            block_size = 25
            markers = threshold_adaptive(image, self.block_size)

            n = markers[:].astype('uint8')
            n[markers == True] = 255
            n[markers == False] = 1
            markers = n

        else:
            markers = zeros_like(image)
            markers[image < self.threshold_low] = 1
            markers[image > self.threshold_high] = 255

        elmap = sobel(image, mask=image)
        wsrc = watershed(elmap, markers, mask=image)

#         wsrc = wsrc.astype('uint8')
        return invert(wsrc)
Exemplo n.º 5
0
 def houghLine(img2d):
     "gray input"
     med_filter = ndimg.median_filter(img2d, size = (5,5))
     edges = filter.sobel(med_filter/255.)
     [H,theta,distances] = transform.hough_line(edges);
     imgsize = float(len(theta)*len(distances))
     return H.sum()/imgsize
def watershed_3d(sphere):
    """
    Markers should be int8
    Image should be uint8
    """
   
    sphere = median_filter(sphere, 3)
    thresh = threshold_otsu(sphere)
    sphere = (sphere >= thresh) * 1
    sphere = sobel(sphere)
    
    size = (sphere.shape[0], sphere.shape[1], sphere.shape[2])
    
    marker = np.zeros(size, dtype=np.int16)
    pl.imshow(sphere[:,:,50])
    pl.show()
    # mark everything outside as background
    marker[5, :, :] = -1
    marker[size[0] - 5, :, :] = -1
    marker[:, :, 5] = -1
    marker[:, :, size[2] - 5] = -1
    marker[:, 5, :] = -1
    marker[:, size[1] - 5, :] = -1
    marker[:,0,0] = -1
    # mark everything inside as a sphere
    marker[size[0] / 2., size[1] / 2., size[2] / 2.] = 5

    result = measurements.watershed_ift(sphere.astype(dtype=np.uint16), marker)
    pl.imshow(result[:,:,50])
    pl.show()
    
    return result
Exemplo n.º 7
0
    def segment(self, src):
        image = src.ndarray[:]
        if self.use_adaptive_threshold:
            block_size = 25
            markers = threshold_adaptive(image, block_size) * 255
            markers = invert(markers)

        else:
            markers = zeros_like(image)
            markers[image < self.threshold_low] = 1
            markers[image > self.threshold_high] = 255

        elmap = sobel(image, mask=image)
        wsrc = watershed(elmap, markers, mask=image)

#        elmap = ndimage.distance_transform_edt(image)
#        local_maxi = is_local_maximum(elmap, image,
#                                      ones((3, 3))
#                                      )
#        markers = ndimage.label(local_maxi)[0]
#        wsrc = watershed(-elmap, markers, mask=image)
#        fwsrc = ndimage.binary_fill_holes(out)
#        return wsrc
        if self.use_inverted_image:
            out = invert(wsrc)
        else:
            out = wsrc

#        time.sleep(1)
#        do_later(lambda:self.show_image(image, -elmap, out))
        return out
def detect_edges(image):
    # 1. convert to grayscale image
    image_gray = rgb2gray(image)
    # 2. convolve with Sobel filter
    image_sobel = sobel(image_gray)
    # 3. compute binary edge image with threshold from Otsu's method
    image_edges = image_sobel > threshold_otsu(image_sobel)
    return image_sobel, image_edges
Exemplo n.º 9
0
def test_sobel_vertical():
    """Sobel on a vertical edge should be a vertical line."""
    i, j = np.mgrid[-5:6, -5:6]
    image = (j >= 0).astype(float)
    result = F.sobel(image) * np.sqrt(2)
    j[np.abs(i) == 5] = 10000
    assert (np.all(result[j == 0] == 1))
    assert (np.all(result[np.abs(j) > 1] == 0))
Exemplo n.º 10
0
    def __init__(self):
        self.logo = scipy_logo.ScipyLogo(radius=self.radius)
        self.mask_1 = self.logo.get_mask(self.image.shape, 'upper left')
        self.mask_2 = self.logo.get_mask(self.image.shape, 'lower right')

        edges = np.array([sobel(img) for img in self.image.T]).T
        # truncate and stretch intensity range to enhance contrast
        self.edges = rescale_intensity(edges, in_range=(0, 0.4))
Exemplo n.º 11
0
def GlaremaskSobel(Arrimg,**kwargs):
    highthresh = kwargs.get('highthresh', None)
    if highthresh is None:
        highthresh = 2000    
    sob = filter.sobel(Arrimg)
    sob[sob<highthresh] = 0
    sob[sob>0] = 1
    filled = NDIMG.binary_fill_holes(sob);
    return 1-filled
Exemplo n.º 12
0
def sobelgm(provider):
    """
    mean sobel gradient magnitude
    """
    gray = provider.as_gray()
    mag = sobel(gray)
    mag *= 100.0 / np.max(mag)

    return np.mean(mag)
Exemplo n.º 13
0
 def test_01_01_horizontal(self):
     """Sobel on an edge should be a horizontal line"""
     i, j = np.mgrid[-5:6, -5:6]
     image = (i >= 0).astype(float)
     result = F.sobel(image)
     # Fudge the eroded points
     i[np.abs(j) == 5] = 10000
     assert (np.all(result[i == 0] == 1))
     assert (np.all(result[np.abs(i) > 1] == 0))
Exemplo n.º 14
0
def test_sobel_horizontal():
    """Sobel on a horizontal edge should be a horizontal line."""
    i, j = np.mgrid[-5:6, -5:6]
    image = (i >= 0).astype(float)
    result = F.sobel(image) * np.sqrt(2)
    # Fudge the eroded points
    i[np.abs(j) == 5] = 10000
    assert_allclose(result[i == 0], 1)
    assert (np.all(result[np.abs(i) > 1] == 0))
Exemplo n.º 15
0
	def edge(self):
		'''
		implements a edge detection
		'''

		self.img = self.img.mean(2)
		self.img = filter.sobel(self.img)

		self.refreshimg()
Exemplo n.º 16
0
def dynamic_masking(image,method='edges',filter_size=7,threshold=0.005):
    """ Dynamically masks out the objects in the PIV images
    
    Parameters
    ----------
    image: image
        a two dimensional array of uint16, uint8 or similar type
        
    method: string
        'edges' or 'intensity':
        'edges' method is used for relatively dark and sharp objects, with visible edges, on 
        dark backgrounds, i.e. low contrast
        'intensity' method is useful for smooth bright objects or dark objects or vice versa, 
        i.e. images with high contrast between the object and the background
    
    filter_size: integer
        a scalar that defines the size of the Gaussian filter
    
    threshold: float
        a value of the threshold to segment the background from the object
        default value: None, replaced by sckimage.filter.threshold_otsu value
            
    Returns
    -------
    image : array of the same datatype as the incoming image with the object masked out
        as a completely black region(s) of zeros (integers or floats).
    
    
    Example
    --------
    frame_a  = openpiv.tools.imread( 'Camera1-001.tif' )
    imshow(frame_a) # original
    
    frame_a = dynamic_masking(frame_a,method='edges',filter_size=7,threshold=0.005)
    imshow(frame_a) # masked 
        
    """
    imcopy = np.copy(image)
    # stretch the histogram
    image = exposure.rescale_intensity(img_as_float(image), in_range=(0, 1))
    # blur the image, low-pass
    blurback = gaussian_filter(image,filter_size)
    if method is 'edges':
        # identify edges
        edges = sobel(blurback)
        blur_edges = gaussian_filter(edges,21)
        # create the boolean mask 
        bw = (blur_edges > threshold)
        bw = binary_fill_holes(bw)
        imcopy -= blurback
        imcopy[bw] = 0.0
    elif method is 'intensity':
        background = gaussian_filter(median_filter(image,filter_size),filter_size)
        imcopy[background > threshold_otsu(background)] = 0

        
    return imcopy #image
Exemplo n.º 17
0
	def process(self, src, dest, config):

		try:
			srcImg = imread(src, flatten=True)

			destImg = filter.sobel(srcImg)
			scipy.misc.imsave(dest, destImg)

			return True
		except:
			return False
Exemplo n.º 18
0
def image_features_sobel(img, maxPixel, num_features,imageSize):
     # X is the feature vector with one row of features per image
     #  consisting of the pixel values a, num_featuresnd our metric

     X=np.zeros(num_features, dtype=np.float32)

     image = filter.sobel(img)
     edges = resize(image, (maxPixel, maxPixel))
    # Store the rescaled image pixels
     X[0:imageSize] = np.reshape(edges,(1, imageSize))

     return X
Exemplo n.º 19
0
    def run(self, workspace):
 
        image = workspace.image_set.get_image(self.image_name.value)
        nuclei_image = image.pixel_data[:,:]
        image_collection = []
      
        #
        #Get the global Threshold with Otsu algorithm and smooth nuclei image
        #
#         nuclei_smoothed = self.smooth_image(image_collection[3][0], image.mask, 1)

        global_threshold_nuclei = otsu(nuclei_image, min_threshold=0, max_threshold=1)
        print "the threshold compute by the Otsu algorythm is %f" % global_threshold_nuclei      
        
        
        #
        #Binary thee "DAPI" Image (Nuclei) and labelelize the nuclei
        #

        binary_nuclei = (nuclei_image >= global_threshold_nuclei)
        labeled_nuclei, object_count = scipy.ndimage.label(binary_nuclei, np.ones((3,3), bool))
        print "the image got %d detected" % object_count
        
        #
        #Fill the hole and delete object witch touch the border. 
        #labeled_nuclei is modify after the function
        #Filter small object and split object
        #
        labeled_nuclei = fill_labeled_holes(labeled_nuclei)        
        labeled_nuclei = self.filter_on_border(labeled_nuclei)
        labeled_nuclei = self.filter_on_size(labeled_nuclei, object_count)         
        labeled_nuclei = self.split_object(labeled_nuclei)
        
        #
        #Edge detection of nuclei image and object are more separated 
        #
        labeled_nuclei_canny = skf.sobel(labeled_nuclei)       
        labeled_nuclei[labeled_nuclei_canny > 0] = 0
        labeled_nuclei = skr.minimum(labeled_nuclei.astype(np.uint16), skm.disk(3))
        
        image_collection.append((nuclei_image, "Original"))
        image_collection.append((labeled_nuclei, "Labelized image"))
        workspace.display_data.image_collection = image_collection

        #
        #Create a new object which will be add to the workspace
        #
        objects = cellprofiler.objects.Objects()
        objects.segmented = labeled_nuclei
        objects.parent_image = nuclei_image
        
        workspace.object_set.add_objects(objects, self.object_name.value)
Exemplo n.º 20
0
def label_regions(im, mark_min=200, mark_max=230):
    elevation_map = sobel(im)

    markers = np.zeros_like(im)
    markers[im < mark_min] = 1
    markers[im > mark_max] = 2

    segmentation = morphology.watershed(elevation_map, markers)
    segmentation = ndimage.binary_fill_holes(segmentation - 1)

    labeled_regions, _ = ndimage.label(segmentation)
    
    return labeled_regions
Exemplo n.º 21
0
def morphological_clean_sp(image, segments, diameter=4):
    # remove small / thin segments by morphological closing + watershed
    # extract boundaries
    boundary = boundaries.find_boundaries(segments)
    closed = morphology.binary_closing(boundary, np.ones((diameter, diameter)))
    # extract regions
    labels = morphology.label(closed, neighbors=4, background=1)
    # watershed to get rid of boundaries
    # interestingly we can't use gPb here. It is to sharp.
    edge_image = sobel(rgb2gray(image))
    result = morphology.watershed(edge_image, labels + 1)
    # we want them to start at zero!
    return result - 1
Exemplo n.º 22
0
def delphi_sobel(image_roi, attrs={}, debug=False):
    greyscale = rgb2gray(image_roi)

    edge_magnitude = sobel(greyscale)

    H = np.histogram(edge_magnitude, bins=8, range=(0,1), density=True)[0]

    if debug:
        print "=== Sobel Texture Histogram ==="
        print H
        print

    attrs.update(('sobel%d' % i, v) for i, v in enumerate(H))
    return attrs
Exemplo n.º 23
0
def extract_features(image_path_list):
    feature_list = []
    for image_path in image_path_list:
        image_array = imread(image_path)
        img_size = image_array.size
        red_channel_mean= image_array[...,0].mean()
        green_channel_mean= image_array[...,1].mean()
        blue_channel_mean= image_array[...,2].mean()
        red_channel_sd= image_array[...,0].std()
        green_channel_sd= image_array[...,1].std()
        blue_channel_sd= image_array[...,2].std()

        #Calculating grayscale value
        imgarray1_gray = sp.inner(image_array, [299, 587, 114])
        #Location x and y treated as different features
        max_gray_x_loc = np.argwhere(imgarray1_gray.max() == imgarray1_gray)[...,0].mean()
        max_gray_y_loc = np.argwhere(imgarray1_gray.max() == imgarray1_gray)[...,1].mean()
        min_gray_x_loc = np.argwhere(imgarray1_gray.min() == imgarray1_gray)[...,0].mean()
        min_gray_y_loc = np.argwhere(imgarray1_gray.min() == imgarray1_gray)[...,1].mean()
        min_gray_x_loc_std = np.argwhere(imgarray1_gray.min() == imgarray1_gray)[...,0].std()
        min_gray_y_loc_std = np.argwhere(imgarray1_gray.min() == imgarray1_gray)[...,1].std()
        max_gray_x_loc_std = np.argwhere(imgarray1_gray.max() == imgarray1_gray)[...,0].std()
        max_gray_y_loc_std = np.argwhere(imgarray1_gray.max() == imgarray1_gray)[...,1].std()
        
        imgarray1_gray = np.array(imgarray1_gray, dtype=np.float64)
        edges = sobel(imgarray1_gray)
        edges_height = edges.shape[0]
        edges_width = edges.shape[1]
        
        
        feature_list.append([image_path.split("/")[-2],image_path.split("/")[-1], 
                             img_size,
                             red_channel_mean,
                             green_channel_mean,
                             blue_channel_mean,
                             red_channel_sd,
                             green_channel_sd,
                             blue_channel_sd,
                             max_gray_x_loc,
                             max_gray_y_loc,
                             min_gray_x_loc,
                             min_gray_y_loc,
                             max_gray_x_loc_std,
                             max_gray_y_loc_std,
                             min_gray_x_loc_std,
                             min_gray_y_loc_std,
                             edges_height,
                             edges_width                              
                             ])
    return feature_list
Exemplo n.º 24
0
def features_extractor(trimmed):
    # edge_roberts = filter.roberts(trimmed)
    edge_sobel = filter.sobel(trimmed)
    return [
        # np.median(trimmed),
        # np.sum(trimmed),
        # np.average(trimmed),
        # np.max(trimmed),
        # np.min(trimmed)
        # np.sum([x for x in edge_roberts.flatten() if x > 100]) / np.sum([x for x in edge_roberts.flatten() if x <= 100]),
        # np.sum([x for x in edge_sobel.flatten() if x > 100]) / np.sum([x for x in edge_sobel.flatten() if x <= 100])
        np.sum([x for x in edge_sobel.flatten() if x > 100]),
        len([x for x in edge_sobel.flatten() if x > 100])
    ]
def select_area_for_detector(np_image):
    
    import numpy as np
    import pylab as pl
    
    from skimage.filter import threshold_otsu, sobel
    from skimage.morphology import label
    from skimage.measure import regionprops
    from skimage.filter import denoise_tv_chambolle
    
    pl.close('all')
    
    # Find regions
    
    image_filtered = denoise_tv_chambolle(np_image, weight=0.002)
    edges = sobel(image_filtered)
    
    nbins = 50
    threshold = threshold_otsu(edges, nbins)
    edges_bin = edges >= threshold
    
    label_image = label(edges_bin)
    
    areas = []
    areas_full = []
    bord = []
    
    # Extract information from the regions
    
    for region in regionprops(label_image, ['Area', 'BoundingBox', 'Label']):
        
        # Skip wrong regions
        index = np.where(label_image==region['Label'])
        if index[0].size==0 & index[1].size==0:
            continue
        
        # Skip small regions
        if region['Area'] < 100:
            continue
        
        # Extract the coordinates of regions
        minr, minc, maxr, maxc = region['BoundingBox']
        margin = len(np_image) / 100
        bord.append((minr-margin, maxr+margin, minc-margin, maxc+margin))
        areas.append(edges_bin[minr-margin:maxr+margin,minc-margin:maxc+margin].copy())
        areas_full.append(np_image[minr-margin:maxr+margin,minc-margin:maxc+margin].copy())
    
    return areas, areas_full, bord
Exemplo n.º 26
0
def _region_segment(args):
    """
    Region based segmentation.
    """
    data, args, ind_start, ind_end = args
    low, high = args
    
    for m in range(ind_end-ind_start):
        img = data[m, :, :]
        elevation_map = sobel(img)

        markers = np.zeros_like(img)
        markers[img < low] = 1
        markers[img > high] = 2

        img = morphology.watershed(elevation_map, markers)
        data[m, :, :] = img
    return ind_start, ind_end, data
Exemplo n.º 27
0
def getEdges(image):
    """Computes sobel and robers edges on uint8 single channel grayscale image
    input: image, single channel numpy array, uint8, and 0-255 range
    outputs: 
        edge_roberts, single channel numpy array, uint64, and 0-1 range
        edge_sobel,   single channel numpy array, uint64, and 0-1 range"""
    from skimage.filter import roberts, sobel
    edge_roberts = roberts(image)
    edge_sobel = sobel(image)

    #print 'Image info', type(image),           image.shape,      image.dtype,      image.min(),      image.max()
    #print 'Sobel info', type(edge_sobel), edge_sobel.shape, edge_sobel.dtype, edge_sobel.min(), edge_sobel.max()

    # Change the range to 0-255 and the type to uint8 
    edge_sobel = np.uint8(edge_sobel * 255)
    edge_roberts = np.uint8(edge_roberts * 255)
    cv2.imshow('input || sobel || roberts', np.hstack((image, edge_sobel, edge_roberts)))
    # cv2.waitKey(0)
    return edge_roberts, edge_sobel
Exemplo n.º 28
0
def sobel(parameters):
    """Sobel edge extraction filter.

    This wraps `skimage.filter.sobel`. The `mask` option is not supported.

    This produces correct shape though it expands it by one row of pixels
    on every edge. e.g. ideally if the initial image is::

        0 0 0 0 0 0 0
        0 1 1 1 1 1 0
        0 1 1 1 1 1 0
        0 1 1 1 1 1 0
        0 1 1 1 1 1 0
        0 1 1 1 1 1 0
        0 0 0 0 0 0 0

    then after sobel it will become::

        1 1 1 1 1 1 1
        1 1 1 1 1 1 1
        1 1 0 0 0 1 1
        1 1 0 0 0 1 1
        1 1 0 0 0 1 1
        1 1 1 1 1 1 1
        1 1 1 1 1 1 1

    This is to be expected by the way sobel works.

    The wrapped function returns an array with dtype('float64'). If the result
    is cast to another dtype it will not be this accurate. Keep this in mind
    before saving the object as an image file with dtype e.g. uint8.

    :param parameters['data'][0]: input image
    :type parameters['data'][0]: numpy.array

    :return: numpy.array with dtype('float64')

    """
    img = parameters['data'][0]

    result = filter.sobel(img)

    return result
Exemplo n.º 29
0
def record_mouse(event):
    """
    Save a mouse action
    """
    global counter
    global session

    if event.buttons != 1:
        return

    h, w, _, _, _ = session.console.display.get_screen_resolution(0)
    png = session.console.display.take_screen_shot_png_to_array(0, h, w)

    with open('screenshot.png', 'wb') as f:
        f.write(png)

    IMAGE_FILE_PATH = "screenshot.png"
    FILE_TO_SAVE = "%s%s.png" % (SAVE_FOLDER, counter)

    # load image
    image = data.imread(IMAGE_FILE_PATH)
    image = rgb2gray(image)

    # edges
    image = sobel(image)

    # blur
    image = gaussian_filter(image, sigma=10)

    mpimg.imsave("output.png", image)

    img = Image("output.png")
    img_original = Image(IMAGE_FILE_PATH)
    image = Image(IMAGE_FILE_PATH)
    blobs = img.findBlobs(threshval = -1, minsize=10, maxsize=0, threshblocksize=0, threshconstant=5, appx_level=3)
    if blobs:
        blobs.draw(width=10)
    img.save(FILE_TO_SAVE)
    img_original.save(FILE_TO_SAVE+"_original.png")

    counter += 1
Exemplo n.º 30
0
    def segment(self, src):
        '''
            src: preprocessing cv.Mat
        '''
        image = src.ndarray[:]


        if self.use_adaptive_threshold:
#            block_size = 25
            markers = threshold_adaptive(image, self.block_size)

            n = markers[:].astype('uint8')
            n[markers == True] = 255
            n[markers == False] = 1
            markers = n
#            print markers
#            markers = markers.astype('uint8')
#            n = ones_like(markers)
#            n[markers] = 255
#            print n
#            markers[markers] = 255
#            markers[not markers] = 1
#            print markers
#            markers = n.astype('uint8')
#            markers = invert(markers).astype('uint8')

        else:
            markers = zeros_like(image)
            markers[image < self.threshold_low] = 1
            markers[image > self.threshold_high] = 255

#        global cnt
#        # remove holes
#        if cnt % 2 == 0:
#            markers = binary_closing(markers).astype('uint8') * 255
#        cnt += 1
#        print markers
        elmap = sobel(image, mask=image)
        wsrc = watershed(elmap, markers, mask=image)
        return invert(wsrc)
Exemplo n.º 31
0
def select_area_for_detector(np_image, min_t, max_t):

    pl.close('all')
    image_filtered = denoise_tv_chambolle(np_image, weight=0.005)
    float_img = rescale_intensity(image_filtered,
                                  in_range=(image_filtered.min(),
                                            image_filtered.max()),
                                  out_range='float')

    p2, p98 = np.percentile(float_img, (2, 99))
    normalize = exposure.rescale_intensity(float_img,
                                           in_range=(p2, p98),
                                           out_range='float')

    binary = normalize > threshold_otsu(normalize)
    #     image_filtered = denoise_tv_chambolle(np_image, weight=0.005)
    #     binary = image_filtered.copy()
    #     mask1 = max_t < image_filtered
    #     mask2 = min_t > image_filtered
    #
    #     binary[mask1] = 0
    #
    #     binary[mask2] = 0
    #
    #     binary[binary > 0] = 1

    distance = ndimage.distance_transform_edt(binary)
    local_maxi = peak_local_max(distance, indices=False, labels=binary)

    markers = ndimage.label(local_maxi)[0]

    labeled = watershed(-distance, markers, mask=binary)

    #     pl.subplot(2, 3, 2)
    #     pl.title("equalize")
    #     pl.imshow(equalize)
    #     pl.gray()
    #     pl.subplot(2, 3, 1)
    #     pl.title("np_image")
    #     pl.imshow(np_image)
    #     pl.subplot(2, 3, 4)
    #     pl.title("binary")
    #     pl.imshow(binary)
    #     pl.subplot(2, 3, 5)
    #     pl.title("distance")
    #     pl.imshow(distance)
    #     pl.subplot(2, 3, 6)
    #     pl.title("label")
    #     pl.imshow(labeled)
    #     pl.show()
    #     pl.close('all')

    areas = []
    centroids_fit = []
    radius_fit = []
    edge_coords = []
    bords = []

    # Extract information from the regions

    for region in measure.regionprops(labeled,
                                      ['Area', 'BoundingBox', 'Label']):

        # Skip wrong regions
        index = np.where(labeled == region['Label'])
        if index[0].size == 0 & index[1].size == 0:
            continue

        # Skip small regions
        if region['Area'] < 100:
            continue

        # Extract the coordinates of regions
        minr, minc, maxr, maxc = region.bbox
        margin = 10

        crop = normalize[minr - margin:maxr + margin,
                         minc - margin:maxc + margin].copy()

        #         binary = crop.copy()
        #
        #         mask1 = max_t < binary
        #         mask2 = min_t > binary
        #         binary[mask1] = 0
        #         binary[mask2] = 0
        #         binary[binary > 0] = 1
        binary = crop > threshold_otsu(crop)
        crop = sobel(binary)

        coords = np.column_stack(np.nonzero(crop))
        X = np.array(coords[:, 0]) + minr - margin
        Y = np.array(coords[:, 1]) + minc - margin

        try:
            XC, YC, RAD, RESID = leastsq_circle(X, Y)
            if region.area * 1.3 > np.pi * RAD**2:

                centroids_fit.append((round(XC, 4), round(YC, 4)))
                radius_fit.append(round(RAD, 2))
                bords.append((minr - margin, minc - margin, maxr + margin,
                              maxc + margin))
                areas.append(crop)
        except:
            continue

    return [centroids_fit, radius_fit, bords, areas, normalize]
Exemplo n.º 32
0
import scipy.misc
from skimage import filter
from scipy.misc.pilutil import Image

# opening the image and converting it to grayscale 
a = Image.open('../Figures/cir.png').convert('L')
# performing Sobel filter
b = filter.sobel(a)
# b is converted from an ndarray to an image 
b = scipy.misc.toimage(b)
b.save('../Figures/sobel_cir.png')
Exemplo n.º 33
0
# <headingcell level=1>

# Scikit Image Test

# <codecell>

import numpy as np
from skimage import io, color
from skimage.filter import canny, sobel
import matplotlib.pyplot as plt

#read and convert profiles
imFile = io.imread("D:\\Dropbox\\ankur\\test.tif")
imData = color.rgb2gray(imFile)
edges = canny(imData / 1.)
edges2 = sobel(imData)

print("original rgb size = " + np.str(np.size(imFile)))
print("original gray size = " + np.str(np.size(imData)))

#plots
plt.figure(figsize=(8, 3))

plt.subplot(131)
plt.imshow(imFile)
plt.title('Original rgb')

plt.subplot(132)
plt.imshow(edges)
plt.title('canny')
Exemplo n.º 34
0
def watershed(img, obj_seeds, bg_seeds):
    adj8 = [(0, 0), (-1, -1), (0, -1), (1, -1), (-1, 0), (1, 0), (-1, 1),
            (0, 1), (1, 1)]

    grad = sobel(img)
    # io.imsave("grad.png", grad)
    grad = grad * 255
    grad = grad.astype(int)
    path_val = np.ones(grad.shape) * float("inf")
    label_img = np.zeros(grad.shape)
    color = np.zeros(grad.shape)  # WHITE = 0
    max_value = np.max(grad)

    # path_val = np.ones(img.shape) * float("inf")
    # label_img = np.zeros(img.shape)
    # color = np.zeros(img.shape) # WHITE = 0
    # max_value = 256

    Q = GQueue(max_value)

    for p in bg_seeds:
        color[p] = GREY
        label_img[p] = 0
        path_val[p] = 0
        Q.insert(p, grad[p])
        # Q.insert(p, 0)

    for p in obj_seeds:
        color[p] = GREY  # GREY means that the pixel is inside the queue
        label_img[p] = 1
        path_val[p] = 0
        Q.insert((p), grad[p])
        # Q.insert((p), 0)

    # put the seeds into GQueue

    c_cursor = Q.cursor
    c = 0
    while not Q.is_empty():

        # if Q.cursor != c_cursor:
        #     print str(Q.cursor) + " " + str(c_cursor) + " " + str(c)
        #     c_cursor = Q.cursor
        print "before: " + str(Q.cursor) + " " + str(Q.n_elems) + " " + str(
            len(Q.queue[12])) + " " + str(c)

        p = Q.remove()
        # print "after: " + str(Q.cursor) + " " + str(Q.n_elems) + " " + str(len(Q.queue[12])) + " " + str(c)

        if p == None:
            Q.cursor += 1
            Q.cursor = Q.cursor % len(Q.queue)
            continue

        c += 1

        color[p] = BLACK  # pixel p has the min path cost

        # for each adjacency to p
        for dx, dy in adj8:
            q = (p[0] + dx, p[1] + dy)

            if is_valid(grad, q):
                # if is_valid(img, q):
                # the cost of the path from the root until pixel q through p
                # is the max between the cost of the path until p and the cost
                # of the edge pq
                tmp = int(max(path_val[p], grad[q]))

                # distance = abs(img[p] - img[q])
                # tmp = min(path_val[p] + distance , img[q])

                # print str(p[0]) + " " + str(p[1]) + " | " + str(q[0]) + " " + str(q[1]) + " | " + str(path_val[p]) + " " + str(grad[q]) + " | " + str(path_val[q])

                # if the cost offered is less than the current cost from a
                # path from the root to q, then the pixel p "conquers" q
                # if tmp < path_val[q]:
                if tmp < path_val[q]:

                    path_val[q] = tmp

                    label_img[q] = label_img[p]

                    # only WHITE or GREY colors enter in this if

                    # q never entered into queue
                    # if color[q] == GREY:
                    #     print "entrei efgkfhgwfkhgwefkgrhfehkgfekghrhgkfehg"
                    #     Q.remove_elem(q, path_val[q])

                    # label from pixl p is propagates to q
                    # label_img[q] = label_img[p]
                    # path_val[q] = tmp
                    # print "inserted at " + str(tmp) + " | " + str(Q.cursor) + " | " + str(c)
                    # if tmp != Q.cursor:
                    #     # print str(tmp) + " temp"
                    if color[q] == WHITE:
                        color[q] = GREY
                        print int(path_val[q])
                        Q.insert(q, int(path_val[q]))

        # break

    # for y in range(0, len(img)):
    #     for x in range(0, len(img[0])):
    #         print str(x) + " " + str(y) + " " + str(label_img[y,x])

    return label_img
Exemplo n.º 35
0
from PIL import Image
import numpy
from skimage.filter import sobel
from skimage.morphology import watershed
from scipy import ndimage as nd

grind = numpy.asarray(Image.open('grind.png')).mean(axis=2)

edges = sobel(grind)
markers = numpy.zeros_like(grind)

# Grind is dark on white background (paper)
markers[grind < 70] = 1
markers[grind > 150] = 2

labels, num_features = nd.label(markers == 1)

areas = []
for i in xrange(num_features):
    total = (labels == i).sum()
    if total < 4 or total > 1000:
        continue
    areas.append(total)

print "mean {:.2f}".format(numpy.array(areas).mean())
def select_area_for_detector(np_image):
    float_img = rescale_intensity(np_image.copy(),
                                  in_range=(np_image.min(), np_image.max()),
                                  out_range='float')

    p2, p98 = np.percentile(float_img, (2, 99))
    for_show = exposure.rescale_intensity(float_img,
                                          in_range=(p2, p98),
                                          out_range='float')

    pl.close('all')
    image_filtered = denoise_tv_chambolle(np_image, weight=0.005)
    #     image_filtered = denoise_tv_bregman(np_image, weight=50)
    #     image_filtered = gaussian_filter(np_image, 3)
    float_img = rescale_intensity(image_filtered.copy(),
                                  in_range=(image_filtered.min(),
                                            image_filtered.max()),
                                  out_range='float')

    p2, p98 = np.percentile(float_img, (2, 99))
    normalize = exposure.rescale_intensity(float_img,
                                           in_range=(p2, p98),
                                           out_range='float')

    binary = normalize > threshold_otsu(normalize)
    #     binary = image_filtered.copy()
    #     mask1 = 18 > image_filtered
    #     mask2 = 47 < image_filtered
    #
    #     binary[mask1] = 0
    #
    #     binary[mask2] = 0
    #
    #     binary[binary > 0] = 1

    distance = ndimage.distance_transform_edt(binary)
    local_maxi = peak_local_max(distance, indices=False, labels=binary)

    markers = ndimage.label(local_maxi)[0]

    labeled = watershed(-distance, markers, mask=binary)
    pl.imshow(np_image)
    pl.gray()
    pl.axis('off')
    pl.show()
    pl.imshow(image_filtered)
    pl.gray()
    pl.axis('off')
    pl.show()
    #     pl.subplot(1, 3, 1)
    #     pl.title("Filtered Image")
    #     pl.subplot(1, 3, 2)
    #     pl.title("Binary Image")
    pl.imshow(normalize)
    pl.axis('off')
    pl.show()
    pl.imshow(binary)
    pl.axis('off')
    pl.show()
    #     pl.subplot(1, 3, 3)
    #     pl.title("Watershed segmentation")
    pl.imshow(labeled)
    pl.axis('off')
    pl.show()
    #     pl.close('all')

    areas = []
    centroids_fit = []
    radius_fit = []
    edge_coords = []
    bords = []

    # Extract information from the regions

    for region in measure.regionprops(labeled,
                                      ['Area', 'BoundingBox', 'Label']):

        # Skip wrong regions
        index = np.where(labeled == region['Label'])
        if index[0].size == 0 & index[1].size == 0:
            continue

        # Skip small regions
        if region['Area'] < 100:
            continue

        # Extract the coordinates of regions
        minr, minc, maxr, maxc = region.bbox
        bx = [minc - 10, maxc + 10, maxc + 10, minc - 10, minc - 10]
        by = [minr - 10, minr - 10, maxr + 10, maxr + 10, minr - 10]
        pl.plot(bx, by, '-b', linewidth=2.5)
        pl.axis('off')
        margin = 10

        crop = normalize[minr - margin:maxr + margin,
                         minc - margin:maxc + margin].copy()
        binary = crop > threshold_otsu(crop)
        crop = sobel(binary)

        coords = np.column_stack(np.nonzero(crop))
        X = np.array(coords[:, 0]) + minr - margin
        Y = np.array(coords[:, 1]) + minc - margin

        try:
            XC, YC, RAD, RESID = leastsq_circle(X, Y)
            if region.area * 1.3 > np.pi * (RAD)**2:

                centroids_fit.append((round(XC, 4), round(YC, 4)))
                radius_fit.append(round(RAD, 2))
                #                 edge_coords.append((X, Y))
                bords.append((minr - margin, minc - margin, maxr + margin,
                              maxc + margin))
                areas.append(crop)
        except:
            continue

    pl.imshow(np_image)
    pl.gray()
    pl.show()
    return [centroids_fit, radius_fit, bords, areas, np_image]
Exemplo n.º 37
0
def edges_each(image):
    return filter.sobel(image)
Exemplo n.º 38
0
def edge_map(img, sigma):
    blur = skimage_filter.gaussian(img, sigma)
    return skimage_filter.sobel(blur)
Exemplo n.º 39
0
def test_sobel_mask():
    """Sobel on a masked array should be zero"""
    np.random.seed(0)
    result = F.sobel(np.random.uniform(size=(10, 10)), np.zeros((10, 10),
                                                                bool))
    assert (np.all(result == 0))
Exemplo n.º 40
0
def test_hsv_value():
    filtered = edges_hsv(COLOR_IMAGE)
    value = color.rgb2hsv(COLOR_IMAGE)[:, :, 2]
    assert_allclose(color.rgb2hsv(filtered)[:, :, 2], filter.sobel(value))
Exemplo n.º 41
0
def select_area_for_detector(np_image):
    """
    Takes image as an input and processes it:
    1. TV DENOISING FILTER
    2. RESCALE THE INTENSITY TO FLOAT (SOME FN'S NEED IT)
    3. ENCHANCE CONTRAST USING PERCENTILES 2 TO 99
    4. OTSU THRESHOLD
    5. EUCLIDEAN DISTANCE MAP
    6. GET MAXIMA FROM THE EDM - FOR MARKERS
    7. APPLY WATERSHED ALGORITHM
    
    THEN EXTRACT INFORMATION FROM THE SEGMENTED OBJECTS.
    FOR EVERY CROPPED OBJECT USE LEAST SQUARES, TO
    FIND A RADIUS ESTIMATE FOR THE HOUGH (FASTER) AND USE
    THE APPROXIMATE AREA FOR OUTLIER ELIMINATION.
    
    
    """
    pl.close('all')

    image_filtered = denoise_tv_chambolle(np_image, weight=0.005)

    float_img = rescale_intensity(image_filtered,
                                  in_range=(image_filtered.min(),
                                            image_filtered.max()),
                                  out_range='float')

    p2, p98 = np.percentile(float_img, (2, 99))
    equalize = exposure.rescale_intensity(float_img,
                                          in_range=(p2, p98),
                                          out_range='float')

    binary = equalize > threshold_otsu(equalize)

    distance = ndimage.distance_transform_edt(binary)
    local_maxi = peak_local_max(distance, indices=False, labels=binary)

    markers = ndimage.label(local_maxi)[0]

    labeled = watershed(-distance, markers, mask=binary)

    areas = []
    radius_fit = []
    bords = []

    # Extract information from the regions
    for region in measure.regionprops(labeled, ['Area', 'BoundingBox']):

        # Extract the coordinates of regions
        # Margin used to go beyond the region if it
        # might be too tight on the object
        minr, minc, maxr, maxc = region.bbox
        margin = 10

        # Crop out the Watershed segments and obtain circle edges
        crop = equalize[minr - margin:maxr + margin,
                        minc - margin:maxc + margin].copy()
        binary = crop > threshold_otsu(crop)
        crop = sobel(binary)

        # Get the coordinates of the circle edges
        coords = np.column_stack(np.nonzero(crop))
        X = np.array(coords[:, 0]) + minr - margin
        Y = np.array(coords[:, 1]) + minc - margin

        # Fit a circle and compare measured circle area with
        # area from the amount of pixels to remove trash
        try:
            XC, YC, RAD, RESID = leastsq_circle(X, Y)
            if region.area * 1.3 > np.pi * RAD**2:

                radius_fit.append(round(RAD, 2))
                bords.append((minr - margin, minc - margin, maxr + margin,
                              maxc + margin))
                areas.append(crop)
        except:
            continue

    return [radius_fit, bords, areas, equalize]
Exemplo n.º 42
0
def extract_features(im_cat_path_list):
    '''
    Extract Features takes a list of 2 element lists:
        
        [catagory of image , path to image]
    
    and extracts 15 features from this image. The output is a list of 3 element lists:
    
        [catagory of image , path to image, [list of features]]
        
    The 15 features are:
        
        1     Product of image pixel dimensions (image size)
        2     Mean of Grayscale Image
        3     Area of Sobel Edges Normalized By Image Size
        4     Area of Sobel Edges Above 2x Mean of Sobel Edges Normalized By Image Size
        5     Area of Canny Edges (Sum of booleans) Normalized By Image Size
        6     Number of Harris Corners
        7     Unique Felzenszwalb Image Segmentation Lines
        8     Area of Vertical Sobel Edges Above 2x Mean of Sobel Edges Normalized By Image Size
        9     Area of Horizontal Sobel Edges Above 2x Mean of Sobel Edges Normalized By Image Size
        10-12 Mean of Red/Green/Blue Channels (if grayscale: mean of the only color channel)
        13    Maximum Pixel Value of the Histogram of Oriented Gradients
        14    Percent of image that is light versus dark with adaptive thresholding
        15-17 Percent of image that is red/green/blue with adaptive thresholding
    '''

    cat_path_features = []

    for im_cat, im_path in im_cat_path_list:

        #RAW IMAGE
        im_raw = imread(im_path)  #image matrix

        #RAW IMAGE FLATTENED IF NOT ALREADY FLAT
        if len(np.shape(im_raw)) == 3:
            im_raw_flat = np.median(im_raw, axis=2)
        else:
            im_raw_flat = im_raw

        #Size of image
        im_y = float(im_raw.shape[0])
        im_x = float(im_raw.shape[1])
        im_size = im_y * im_x

        #LIST OF FEATURES
        features = []

        #FEATURE 1: Product of image pixel dimensions (image size)
        features.append(float(im_size))

        #FEATURE 2: Mean of Grayscale Image
        features.append(im_raw_flat.mean())

        #FEATURE 3: Area of Sobel Edges Normalized By Image Size
        im_edge_sobel = filter.sobel(im_raw_flat)
        features.append(im_edge_sobel.sum() / im_size)

        #FEATURE 4: Area of Sobel Edges Above 2x Mean of Sobel Edges Normalized By Image Size
        features.append(
            float((im_edge_sobel > im_edge_sobel.mean() * 2).sum()) / im_size)

        #FEATURE 5: Area of Canny Edges (Sum of booleans) Normalized By Image Size
        im_canny = filter.canny(im_raw_flat, sigma=8)
        features.append(im_canny.sum().astype(float) / im_size)

        #FEATURE 6: Number of Harris Corners
        im_corners = feature.corner_peaks(feature.corner_harris(im_raw_flat),
                                          min_distance=5)
        features.append(float(len(im_corners)))

        #FEATURE 7: Unique Felzenszwalb Image Segmentation Lines
        im_raw_float = util.img_as_float(im_raw[::2, ::2])
        im_felzen_segments = segmentation.felzenszwalb(im_raw_float,
                                                       scale=100,
                                                       sigma=0.5,
                                                       min_size=50)
        features.append(float(len(np.unique(im_felzen_segments))))

        #FEATURE 8: Area of Vertical Sobel Edges Above 2x Mean of Sobel Edges Normalized By Image Size
        im_edge_vsobel = filter.vsobel(im_raw_flat)
        features.append(
            float(
                (im_edge_vsobel > im_edge_vsobel.mean() * 2).sum()) / im_size)

        #FEATURE 9: Area of Horizontal Sobel Edges Above 2x Mean of Sobel Edges Normalized By Image Size
        im_edge_hsobel = filter.hsobel(im_raw_flat)
        features.append(
            float(
                (im_edge_hsobel > im_edge_hsobel.mean() * 2).sum()) / im_size)

        #FEATURE 10-12: Mean of Red/Green/Blue Channels (if grayscale: mean of the only color channel)
        if len(np.shape(im_raw)) == 3:
            features.append(im_raw[..., 0].mean())
            features.append(im_raw[..., 1].mean())
            features.append(im_raw[..., 2].mean())
        else:
            features.append(im_raw_flat.mean())
            features.append(im_raw_flat.mean())
            features.append(im_raw_flat.mean())

        #FEATURE 13: Maximum Pixel Value of the Histogram of Oriented Gradients
        im_fd, im_hog = feature.hog(im_raw_flat,
                                    orientations=8,
                                    pixels_per_cell=(16, 16),
                                    cells_per_block=(1, 1),
                                    visualise=True)
        features.append(im_hog.max())

        #FEATURE 14: Percent of image that is light versus dark with adaptive thresholding
        im_thres_flat = filter.threshold_adaptive(im_raw_flat, 100, 'mean')
        features.append(im_thres_flat.sum() / im_size)

        #FEATURE 15-17: Percent of image that is red/green/blue with adaptive thresholding
        im_thres_red = filter.threshold_adaptive(im_raw[..., 0], 100, 'mean')
        im_thres_green = filter.threshold_adaptive(im_raw[..., 1], 100, 'mean')
        im_thres_blue = filter.threshold_adaptive(im_raw[..., 2], 100, 'mean')
        features.append(im_thres_red.sum() / im_size)
        features.append(im_thres_green.sum() / im_size)
        features.append(im_thres_blue.sum() / im_size)

        #BUILD OUTPUT LIST FOR THIS IMAGE
        cat_path_features.append([im_cat, im_path, features])

        #CLEAR IMAGE PROC DATA
        del im_raw
        del im_raw_flat
        del im_raw_float
        del im_edge_sobel
        del im_canny
        del im_corners
        del im_felzen_segments
        del im_edge_vsobel
        del im_edge_hsobel
        del im_fd
        del im_hog

    return cat_path_features
Exemplo n.º 43
0
def locate_bumps(image, radius=2, searchpoints=1000, display=False):
    '''Locates bumps around the exterior of a shape in a binarized image.
       Works in the frame of reference where the object is white and background 
       is black. Will convert images that don't follow this convention.

    Args:
        image (image or str): binary image in which to find bumps, or path to a 
        file containing the image (can be grayscale)

    Keyword arguments:
        radius (float): size of the oval around the object to calculate minimum 
        distances from. In units of 1/2 the length and width of cropped image.
        
        searchpoints (int): number of points to calculate distances from 
        (default 1000)
                
        display (bool): set to True to see plots of results (defalut False)

    Returns:
        2D ndarray listing coordinates (row, col, strength) of bumps'''
    #prepare image: load, threshold, crop, and find edges
    if type(image) == str:
        image = io.imread(image)
        binary = image > threshold_otsu(image)
    else:
        binary = image

    if binary[0,0] == True:
        binary = -binary

    binary, crop_location = crop(binary, padding = 2) #small pad for edges

    center = np.array(np.shape(binary))/2.
    edges = sobel(binary) #highlights pixels on either side of the edge
    edges = logical_and(edges,binary)
    edgepixs = np.array(np.where(edges>0)).T

    #find bumps using minimum distance to a bounding ellipse
    thetas = np.linspace(0,2*pi,searchpoints)
    ringpoints = np.array([center[0]+radius*center[0]*np.cos(thetas), 
                           center[1]+radius*center[1]*np.sin(thetas)]).T
    d = spatial.distance.cdist(ringpoints,edgepixs*1.0)
    votes = edgepixs[np.argmin(d,1)] #each ringpoint votes once


    # group candidates that are very close together, and probably represent the same bump
    selected_candidates = votes.T
    points = votes
    z = hac.linkage(points, method='single')
    knee = -1 * np.diff(z[::-1, 2], 1)
    num_clust1 = knee[10:].argmax() + 10 + 1 # guess of number of clusters

    n = 3
    while n>2: #don't let any center of mass be more than 2 pixels from the original edge
        part1 = hac.fcluster(z, num_clust1, 'maxclust')
        final = zeros([len(set(part1)),3])

        for cluster in set(part1):
            final[cluster-1,0:2]  = mean(points[part1 == cluster],0)
            final[cluster-1,2] = sum(part1==cluster)

        ed = array(where(edges)).T
        t = spatial.distance.cdist(array([ed[:,0],ed[:,1]]).T,final[:,0:2])
        dist_from_border = amin(t,0)
        n = max(dist_from_border)
        num_clust1 += 1
    # end of reducing candidates


    # translate coordinates to their values in the whole frame
    master_coords = np.array([final[:,1]+crop_location[2],
                              final[:,0]+crop_location[0],final[:,2]]).T

    if display==True:

        plt.figure(figsize=[20,6])

        plt.subplot(1,4,1)
        io.imshow(image)
        plt.title('Raw Data')

        plt.subplot(1,4,2)
        io.imshow(binary)
        plt.plot(edgepixs[:,1],edgepixs[:,0],'g.')
        plt.plot(ringpoints[:,1],ringpoints[:,0],'g.')

        plt.subplot(1,4,3)
        io.imshow(binary)
        plt.gray()
        plt.plot(selected_candidates[1],selected_candidates[0],'ro')
        plt.xlim(0,np.shape(binary)[1])
        plt.ylim(np.shape(binary)[0],0)
        plt.title('First Guess at Peak Locations:\n'+str(np.shape(selected_candidates[1])[0])+ 
                  ' candidates' )

        plt.subplot(1,4,4)
        io.imshow(image)
        plt.plot(master_coords[:,0],master_coords[:,1],'ro')
        plt.title('Refined Peak Locations Shown on Raw Data:\n'+str(np.shape(final)[0])+ 
                  ' peaks')
        plt.xlim(0,np.shape(image)[1])
        plt.ylim(np.shape(image)[0],0)

        io.show()

    return master_coords
Exemplo n.º 44
0
viewer = ImageViewer(fill_coins)
#viewer.show()


label_objects, nb_labels = ndi.label(fill_coins)
sizes = np.bincount(label_objects.ravel())
mask_sizes = sizes > 20
mask_sizes[0] = 0
coins_cleaned = mask_sizes[label_objects]

markers = np.zeros_like(coins)
markers[coins < 30] = 1
markers[coins > 150] = 2

from skimage.filter import sobel
elevation_map = sobel(coins)
viewer = ImageViewer(elevation_map)
#viewer.show()

markers = np.zeros_like(coins)
markers[coins < 30] = 1
markers[coins > 150] = 2

from skimage.morphology import watershed
segmentation = watershed(elevation_map, markers)


segmentation = ndi.binary_fill_holes(segmentation - 1)

labeled_coins, _ = ndi.label(segmentation)
Exemplo n.º 45
0
plt.rcParams['image.cmap'] = 'gray'
plt.rcParams['image.interpolation'] = 'none'

# We've already discussed edge filtering, using the Sobel filter, in the last section.

# In[2]:

import skdemo
from skimage import data
# Rename module so we don't shadow the builtin function
import skimage.filter as filters

image = data.camera()
pixelated = image[::10, ::10]
gradient = filters.sobel(pixelated)
skdemo.imshow_all(pixelated, gradient)

# With the Sobel filter, however, we get back a grayscale image, which essentially tells us the likelihood that a pixel is on the edge of an object.
#
# We can apply a bit more logic to *detect* an edge; i.e. we can use that filtered image to make a *decision* whether or not a pixel is on an edge. The simplest way to do that is with thresholding:

# In[3]:

skdemo.imshow_all(gradient, gradient > 0.4)

# That approach doesn't do a great job. It's noisy and produces thick edges. Furthermore, it doesn't use our *knowledge* of how edges work: They should be thin and tend to be connected along the direction of the edge.

#### Canny edge detector

# The Canny edge detector combines the Sobel filter with a few other steps to give a binary edge image. The steps are as follows:
Exemplo n.º 46
0
def test_sobel_zeros():
    """Sobel on an array of all zeros"""
    result = F.sobel(np.zeros((10, 10)), np.ones((10, 10), bool))
    assert (np.all(result == 0))
Exemplo n.º 47
0
def edges_hsv(image):
    return filter.sobel(image)
Exemplo n.º 48
0

# ---

#### Sobel edge filter

# The Sobel filter, the most commonly used edge filter, should look pretty similar to what you developed above. Take a look at the vertical and horizontal components of the Sobel kernel to see how they differ from your earlier implementation:

# * http://scikit-image.org/docs/dev/api/skimage.filter.html#vsobel
# * http://scikit-image.org/docs/dev/api/skimage.filter.html#hsobel

# The standard Sobel filter gives the gradient magnitude. This is similar to what we saw above, except that horizontal and vertical components are combined such that the direction of the gradient is ignored.

# In[28]:

skdemo.imshow_all(bright_square, filters.sobel(bright_square))


# Notice that the size of the output matches the input, and the edges aren't preferentially shifted to a corner of the image. Furthermore, the weights used in the Sobel filter produce diagonal edges with reponses that are comparable to horizontal or vertical edges.
# 
# Like any derivative, noise can have a strong impact on the result:

# In[29]:

pixelated_gradient = filters.sobel(pixelated)
skdemo.imshow_all(pixelated, pixelated_gradient)


# Smoothing is often used as a preprocessing step in preparation for feature detection and image-enhancement operations because sharp features can distort results.

# In[30]:
Exemplo n.º 49
0
def to_painting(image, saturation=1.4, black=0.006):
    """ transforms any photo into some kind of painting """
    edges = sobel(image.mean(axis=2))
    darkening = black * (255 * np.dstack(3 * [edges]))
    painting = saturation * image - darkening
    return np.maximum(0, np.minimum(255, painting)).astype("uint8")
Exemplo n.º 50
0
from sklearn.cross_validation import train_test_split
import os
import matplotlib.pyplot as plt
get_ipython().magic(u'matplotlib inline')

path = '/Users/heymanhn/Virginia/Zipfian/Capstone_Project/Test_Output_Images/boots'
file_name = 'barneys_158585078.jpg'
image = io.imread('%s/%s' % (path, file_name))
plt.imshow(image)
image_grey = color.rgb2gray(image)

# In[58]:

edge_roberts = roberts(image_grey)
edge_canny = canny(image_grey)
edge_sobel = sobel(image_grey)
edge_scharr = scharr(image_grey)

# In[59]:

fig, ((ax0, ax1), (ax3, ax4)) = plt.subplots(2, 2, figsize=(12, 7))

ax0.imshow(edge_roberts, cmap=plt.cm.gray)
ax0.set_title('Roberts Edge Detection')
ax0.axis('off')

ax1.imshow(edge_canny, cmap=plt.cm.gray)
ax1.set_title('Canny Edge Detection')
ax1.axis('off')

ax3.imshow(edge_sobel, cmap=plt.cm.gray)
Exemplo n.º 51
0
def seeds(args):
    """
    %prog seeds [pngfile|jpgfile]

    Extract seed metrics from [pngfile|jpgfile]. Use --rows and --cols to crop image.
    """
    p = OptionParser(seeds.__doc__)
    p.set_outfile()
    opts, args, iopts = add_seeds_options(p, args)

    if len(args) != 1:
        sys.exit(not p.print_help())

    pngfile, = args
    pf = opts.prefix or op.basename(pngfile).rsplit(".", 1)[0]
    sigma, kernel = opts.sigma, opts.kernel
    rows, cols = opts.rows, opts.cols
    labelrows, labelcols = opts.labelrows, opts.labelcols
    ff = opts.filter
    calib = opts.calibrate
    outdir = opts.outdir
    if outdir != '.':
        mkdir(outdir)
    if calib:
        calib = json.load(must_open(calib))
        pixel_cm_ratio, tr = calib["PixelCMratio"], calib["RGBtransform"]
        tr = np.array(tr)

    resizefile, mainfile, labelfile, exif = \
                      convert_image(pngfile, pf, outdir=outdir,
                                    rotate=opts.rotate,
                                    rows=rows, cols=cols,
                                    labelrows=labelrows, labelcols=labelcols)

    oimg = load_image(resizefile)
    img = load_image(mainfile)

    fig, (ax1, ax2, ax3, ax4) = plt.subplots(ncols=4,
                                             nrows=1,
                                             figsize=(iopts.w, iopts.h))

    # Edge detection
    img_gray = rgb2gray(img)
    logging.debug("Running {0} edge detection ...".format(ff))
    if ff == "canny":
        edges = canny(img_gray, sigma=opts.sigma)
    elif ff == "roberts":
        edges = roberts(img_gray)
    elif ff == "sobel":
        edges = sobel(img_gray)
    edges = clear_border(edges, buffer_size=opts.border)
    selem = disk(kernel)
    closed = closing(edges, selem) if kernel else edges
    filled = binary_fill_holes(closed)

    # Watershed algorithm
    if opts.watershed:
        distance = distance_transform_edt(filled)
        local_maxi = peak_local_max(distance, threshold_rel=.05, indices=False)
        coordinates = peak_local_max(distance, threshold_rel=.05)
        markers, nmarkers = label(local_maxi, return_num=True)
        logging.debug("Identified {0} watershed markers".format(nmarkers))
        labels = watershed(closed, markers, mask=filled)
    else:
        labels = label(filled)

    # Object size filtering
    w, h = img_gray.shape
    canvas_size = w * h
    min_size = int(round(canvas_size * opts.minsize / 100))
    max_size = int(round(canvas_size * opts.maxsize / 100))
    logging.debug("Find objects with pixels between {0} ({1}%) and {2} ({3}%)"\
                    .format(min_size, opts.minsize, max_size, opts.maxsize))

    # Plotting
    ax1.set_title('Original picture')
    ax1.imshow(oimg)

    params = "{0}, $\sigma$={1}, $k$={2}".format(ff, sigma, kernel)
    if opts.watershed:
        params += ", watershed"
    ax2.set_title('Edge detection\n({0})'.format(params))
    closed = gray2rgb(closed)
    ax2_img = labels
    if opts.edges:
        ax2_img = closed
    elif opts.watershed:
        ax2.plot(coordinates[:, 1], coordinates[:, 0], 'g.')
    ax2.imshow(ax2_img, cmap=iopts.cmap)

    ax3.set_title('Object detection')
    ax3.imshow(img)

    filename = op.basename(pngfile)
    if labelfile:
        accession = extract_label(labelfile)
    else:
        accession = pf

    # Calculate region properties
    rp = regionprops(labels)
    rp = [x for x in rp if min_size <= x.area <= max_size]
    nb_labels = len(rp)
    logging.debug("A total of {0} objects identified.".format(nb_labels))
    objects = []
    for i, props in enumerate(rp):
        i += 1
        if i > opts.count:
            break

        y0, x0 = props.centroid
        orientation = props.orientation
        major, minor = props.major_axis_length, props.minor_axis_length
        major_dx = cos(orientation) * major / 2
        major_dy = sin(orientation) * major / 2
        minor_dx = sin(orientation) * minor / 2
        minor_dy = cos(orientation) * minor / 2
        ax2.plot((x0 - major_dx, x0 + major_dx),
                 (y0 + major_dy, y0 - major_dy), 'r-')
        ax2.plot((x0 - minor_dx, x0 + minor_dx),
                 (y0 - minor_dy, y0 + minor_dy), 'r-')

        npixels = int(props.area)
        # Sample the center of the blob for color
        d = min(int(round(minor / 2 * .35)) + 1, 50)
        square = img[(y0 - d):(y0 + d), (x0 - d):(x0 + d)]
        pixels = []
        for row in square:
            pixels.extend(row)
        logging.debug("Seed #{0}: {1} pixels ({2} sampled) - {3:.2f}%".\
                        format(i, npixels, len(pixels), 100. * npixels / canvas_size))

        rgb = pixel_stats(pixels)
        objects.append(Seed(filename, accession, i, rgb, props, exif))
        minr, minc, maxr, maxc = props.bbox
        rect = Rectangle((minc, minr),
                         maxc - minc,
                         maxr - minr,
                         fill=False,
                         ec='w',
                         lw=1)
        ax3.add_patch(rect)
        mc, mr = (minc + maxc) / 2, (minr + maxr) / 2
        ax3.text(mc,
                 mr,
                 "{0}".format(i),
                 color='w',
                 ha="center",
                 va="center",
                 size=6)

    for ax in (ax2, ax3):
        ax.set_xlim(0, h)
        ax.set_ylim(w, 0)

    # Output identified seed stats
    ax4.text(.1, .92, "File: {0}".format(latex(filename)), color='g')
    ax4.text(.1, .86, "Label: {0}".format(latex(accession)), color='m')
    yy = .8
    fw = must_open(opts.outfile, "w")
    if not opts.noheader:
        print >> fw, Seed.header(calibrate=calib)
    for o in objects:
        if calib:
            o.calibrate(pixel_cm_ratio, tr)
        print >> fw, o
        i = o.seedno
        if i > 7:
            continue
        ax4.text(.01, yy, str(i), va="center", bbox=dict(fc='none', ec='k'))
        ax4.text(.1, yy, o.pixeltag, va="center")
        yy -= .04
        ax4.add_patch(
            Rectangle((.1, yy - .025), .12, .05, lw=0, fc=rgb_to_hex(o.rgb)))
        ax4.text(.27, yy, o.hashtag, va="center")
        yy -= .06
    ax4.text(.1,
             yy,
             "(A total of {0} objects displayed)".format(nb_labels),
             color="darkslategrey")
    normalize_axes(ax4)

    for ax in (ax1, ax2, ax3):
        xticklabels = [int(x) for x in ax.get_xticks()]
        yticklabels = [int(x) for x in ax.get_yticks()]
        ax.set_xticklabels(xticklabels, family='Helvetica', size=8)
        ax.set_yticklabels(yticklabels, family='Helvetica', size=8)

    image_name = op.join(outdir, pf + "." + iopts.format)
    savefig(image_name, dpi=iopts.dpi, iopts=iopts)
    return objects
Exemplo n.º 52
0
def test_each_channel():
    filtered = edges_each(COLOR_IMAGE)
    for i, channel in enumerate(np.rollaxis(filtered, axis=-1)):
        expected = img_as_float(filter.sobel(COLOR_IMAGE[:, :, i]))
        assert_allclose(channel, expected)
Exemplo n.º 53
0
import matplotlib.pyplot as plt
from skimage.filter import roberts, sobel

img_col = imread("brain.png")
edge_roberts = roberts(img)
edge_sobel = sobel(img)

#Normalize
edge_roberts = (edge_roberts - edge_roberts.min()) / (edge_roberts.max() -
                                                      edge_roberts.min())
edge_sobel = (edge_sobel - edge_sobel.min()) / (edge_sobel.max() -
                                                edge_sobel.min())

#Clean
edge_roberts *= (edge_roberts > 0.1)
edge_sobel *= (edge_sobel > 0.1)

# Red
brain_red = np.ones_like(img_col)
brain_red[:, :, 1] = 1 - edge_sobel
brain_red[:, :, 2] = 1 - edge_sobel
plt.figure()
imshow(brain_red)

# Black
brain_green = np.ones_like(img_col)
brain_green[:, :, 0] = (1 - edge_sobel)
brain_green[:, :, 1] = (1 - edge_sobel)
brain_green[:, :, 2] = (1 - edge_sobel)
plt.figure()
imshow(brain_green)
Exemplo n.º 54
0
def test_gray_scale_image():
    # We don't need to test both `hsv_value` and `each_channel` since
    # `adapt_rgb` is handling gray-scale inputs.
    assert_allclose(edges_each(GRAY_IMAGE), filter.sobel(GRAY_IMAGE))
"""
import numpy as np
from scipy import ndimage as nd
import matplotlib.pyplot as plt

from skimage.filter import sobel
from skimage.segmentation import slic, join_segmentations
from skimage.morphology import watershed
from skimage.color import label2rgb
from skimage import data, img_as_float

coins = img_as_float(data.coins())

# make segmentation using edge-detection and watershed
edges = sobel(coins)
markers = np.zeros_like(coins)
foreground, background = 1, 2
markers[coins < 30.0 / 255] = background
markers[coins > 150.0 / 255] = foreground

ws = watershed(edges, markers)
seg1 = nd.label(ws == foreground)[0]

# make segmentation using SLIC superpixels
seg2 = slic(coins, n_segments=117, max_iter=160, sigma=1, compactness=0.75,
            multichannel=False)

# combine the two
segj = join_segmentations(seg1, seg2)
Exemplo n.º 56
0
def edges_hsv_uint(image):
    return img_as_uint(filter.sobel(image))
Exemplo n.º 57
0
# Load image data from files

# In[10]:

imName='images/7th49th/cctv4391.jpg'
image=Image.open(imName).convert("L")
image=array(image)


                Perform the edge detection algorithms
                
# In[11]:

edge_roberts = roberts(image)
edge_sobel = sobel(image)
edge_canny = canny(image,low_threshold=100, high_threshold=200)
edge_gaussian= gaussian_filter(image,1)
edge_prewitt = prewitt(image)


                Plot the results
                
# In[12]:

fig, ax = plt.subplots(ncols=3, nrows=2, sharex=True, sharey=True,
                       figsize=(10, 5))

ax[0,0].imshow(image, cmap=plt.cm.gray)
ax[0,0].set_title('Original Image')
Exemplo n.º 58
0
from skimage import data, io, filter

image = data.coins()  # or any NumPy array!
edges = filter.sobel(image)
io.imshow(edges)
Exemplo n.º 59
0
 def _show_watershed(self):
     viewer = self.image_viewer
     edge_image = filter.sobel(viewer.image)
     labels = morphology.watershed(edge_image, self.paint_tool.overlay)
     viewer.ax.imshow(labels, cmap=plt.cm.jet, alpha=0.5)
     viewer.redraw()
Exemplo n.º 60
0
def macfind_g(fi, patch_threshold):
    # fi is file to read
    # patch_threshold are min [0] and max [1] of the desired patch size limits
    # read prepared horizontal BB image (res: 1px=1mm)
    # convert to float numbers
    Lface = snd.imread(fi)
    im = img_as_float(Lface)
    sim = np.shape(im)
    # calculate difference of channels to extract blue stained patches
    dim = abs(im[:, :, 1] - im[:, :, 0])
    # discard low contrasts
    dim[dim < 0.3] = 0.0

    # filter to local maxima for further segmentation
    # process segmentation according to sobel function of skimage
    # patch_threshold = 51 #define theshold for macropore identification
    image_max = ndimage.maximum_filter(dim, size=5, mode='constant')

    elevation_map = sobel(dim)

    markers = np.zeros_like(dim)
    markers[image_max < 0.1] = 2
    markers[image_max > 0.2] = 1

    segmentation = morphology.watershed(elevation_map, markers)

    segmentation = ndimage.binary_fill_holes(1 - (segmentation - 1))

    # clean patches below theshold
    patches_cleaned = morphology.remove_small_objects(segmentation,
                                                      patch_threshold[0])
    labeled_patches, lab_num = ndimage.label(patches_cleaned)
    sizes = np.bincount(
        labeled_patches.ravel())[1:]  #first entry (background) discarded

    # reanalyse for large patches and break them by means of watershed segmentation
    idx = np.where(sizes > patch_threshold[1])[0] + 1
    labeled_patches_large = labeled_patches * 0
    idy = np.in1d(labeled_patches, idx).reshape(np.shape(labeled_patches))
    labeled_patches_large[idy] = labeled_patches[idy]
    distance = ndimage.distance_transform_edt(labeled_patches_large)
    footp = int(np.round(np.sqrt(patch_threshold[1]) / 100) * 100)
    local_maxi = peak_local_max(distance,
                                indices=False,
                                footprint=np.ones((footp, footp)),
                                labels=labeled_patches_large)
    markers = ndimage.label(local_maxi)[0]
    labels_broken_large = morphology.watershed(-distance,
                                               markers,
                                               mask=labeled_patches_large)
    labeled_patches[idy] = labels_broken_large[idy] + np.max(labeled_patches)
    ulabels = np.unique(labeled_patches)[1:]
    sizes = np.bincount(labeled_patches.ravel())[1:]
    sizes = sizes[ulabels - 1]
    inlabels = ulabels - 1

    # measures
    meas = measure.regionprops(labeled_patches,
                               properties=['Area', 'Centroid'],
                               intensity_image=None)

    centroidx = ulabels.astype(np.float64)
    centroidy = ulabels.astype(np.float64)
    filledarea = ulabels.astype(np.float64)
    perimeter = ulabels.astype(np.float64)
    diameter = ulabels.astype(np.float64)

    for i in np.arange(len(ulabels)):
        ix = inlabels[i]
        centroidx[i], centroidy[i] = meas[ix]['Centroid']
        filledarea[i] = meas[ix]['FilledArea']
        perimeter[i] = meas[ix]['Perimeter']
        diameter[i] = meas[ix]['EquivDiameter']

    #calculate min/max distances of centroids
    mindist = ulabels.astype(np.float64)
    maxdist = ulabels.astype(np.float64)
    meandist = ulabels.astype(np.float64)
    mediandist = ulabels.astype(np.float64)

    for i in np.arange(len(ulabels)):
        cxm = np.ma.array(np.append(
            centroidx,
            [0.1 * sim[0], 0.9 * sim[0], 0.1 * sim[0], 0.9 * sim[0]]),
                          mask=False)
        cym = np.ma.array(np.append(
            centroidy,
            [0.1 * sim[1], 0.1 * sim[1], 0.9 * sim[1], 0.9 * sim[1]]),
                          mask=False)
        cxm.mask[i] = True
        cym.mask[i] = True
        mindist[i] = np.sqrt(
            np.min((cxm - centroidx[i])**2 + (cym - centroidy[i])**2))
        maxdist[i] = np.sqrt(
            np.max((cxm - centroidx[i])**2 + (cym - centroidy[i])**2))
        meandist[i] = np.mean(
            np.sqrt((cxm - centroidx[i])**2 + (cym - centroidy[i])**2))
        mediandist[i] = np.median(
            np.sqrt((cxm - centroidx[i])**2 + (cym - centroidy[i])**2))

    inan = -np.isnan(mediandist)
    tot_size = np.float64(im.shape[0]) * np.float64(im.shape[1])
    if (len(filledarea) > 0):
        patch_def = pd.DataFrame([
            dict(no=len(ulabels),
                 share=np.sum(filledarea) / tot_size,
                 minA=np.min(filledarea),
                 maxA=np.max(filledarea),
                 meanA=np.mean(filledarea),
                 medianA=np.median(filledarea),
                 minP=np.min(perimeter),
                 maxP=np.max(perimeter),
                 meanP=np.mean(perimeter),
                 medianP=np.median(perimeter),
                 minDia=np.min(diameter),
                 maxDia=np.max(diameter),
                 meanDia=np.mean(diameter),
                 medianDia=np.median(diameter),
                 minmnD=np.min(mindist),
                 maxmnD=np.max(mindist),
                 meanmnD=np.mean(mindist),
                 medianmnD=np.median(mindist),
                 minmxD=np.min(maxdist),
                 maxmxD=np.max(maxdist),
                 meanmxD=np.mean(maxdist),
                 medianmxD=np.median(maxdist),
                 minmD=np.min(meandist),
                 maxmD=np.max(meandist),
                 meanmD=np.mean(meandist),
                 minmdD=np.min(mediandist[inan]),
                 maxmdD=np.max(mediandist[inan]),
                 meanmdD=np.mean(mediandist[inan]),
                 skewD=np.mean(mediandist[inan] - meandist[inan]),
                 skewmxD=np.max(mediandist[inan] - meandist[inan])),
        ])
    else:
        patch_def = []

    image_label_overlay = label2rgb(labeled_patches, image=image_max)

    # plot results
    #plt.figure(figsize=(10, 5))
    plt.subplot(132)
    plt.imshow(image_max, cmap=plt.cm.gray, interpolation='nearest')
    plt.contour(labeled_patches, [0.5], linewidths=1.2, colors='y')
    plt.axis('off')
    plt.title('identified patches')
    plt.subplot(133)
    plt.imshow(image_label_overlay, interpolation='nearest')
    plt.axis('off')
    plt.title('labeled patches')
    plt.subplot(131)
    plt.imshow(im, cmap=plt.cm.gray)
    plt.axis('off')
    plt.title('input image')

    #plt.subplots_adjust(**margins)
    plt.show()

    return patch_def