def worker(input_file_path, queue):
    int_values = []
    path_sections = input_file_path.split("/")

    for string in path_sections:
        if re.search(r'\d+', string) is not None:
            int_values.append(int(re.search(r'\d+', string).group()))

    file_count = int_values[-1]

    image = cv2.imread(input_file_path, cv2.CV_LOAD_IMAGE_GRAYSCALE)
    edges = img_as_ubyte(canny(image, sigma=canny_sigma))
    img_bw = cv2.threshold(edges, 250, 255, cv2.THRESH_BINARY)[1]

    point = _find_bottom_edge(img_bw)

    try:
        distance = len(img_bw) - point[1]
    except TypeError:
        try:
            edges = img_as_ubyte(canny(image, sigma=canny_sigma_closeup))
            img_bw = cv2.threshold(edges, 250, 255, cv2.THRESH_BINARY)[1]

            distance = len(img_bw) - point[1]
        except TypeError:
            distance = 0

    output = str(file_count) + ":" + str(distance) + "\n"
    queue.put(output)

    return output
Exemple #2
0
def CannyFilter(time,sep,rawForce,Meta,tauMultiple=25,**kwargs):
        """
        Uses a canny filter (from image processing) to detect change points

        Args:
            time,sep,force,Meta,tauMultiple: see ZScoreByDwell
        Returns:
            0/1 matrix if we think there is or isnt an array
        """
        force = FilterToTau(time,tauMultiple,rawForce)
        n = force.size
        minV = np.min(force)
        maxV = np.max(force)
        # normalize the force 
        force -= minV
        force /= (maxV-minV)
        gradV = np.gradient(force)
        stdev = np.std(gradV)
        mu = np.mean(gradV)
        # convert the force to an image, to make Canny happy
        im = np.zeros((n,3))
        im[:,1] = force
        # set the 'sigma' value to our filtering value
        sigma = tauMultiple
        edges1 = feature.canny(im,sigma=sigma,low_threshold=0.8,
                               high_threshold=(1-10/n),use_quantiles=True)
        edges2 = feature.canny(im * -1,sigma=sigma,low_threshold=0.8,
                               high_threshold=(1-10/n),use_quantiles=True)
        # get where the algorithm thinks a transtition is happenening
        idx = np.where((edges1 == True) | (edges2 == True))[0]
        idx = WalkEventIdx(rawForce,idx)
        # switch canny to be between -0.5 and 0.5
        return idx - 0.5
Exemple #3
0
def edge(ifile, ofile):
    img = io.imread(ifile, flatten=True)
    edges1 = feature.canny(img)
    edges2 = feature.canny(img, sigma=VALUE_SIGMA)
    out = np.uint8(edges2 * 255)

    io.imsave(ofile, out)

    # display results
    fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(8, 3), sharex=True, sharey=True)

    ax1.imshow(img, cmap=plt.cm.jet)
    ax1.axis('off')
    ax1.set_title('noisy image', fontsize=20)

    ax2.imshow(edges1, cmap=plt.cm.gray)
    ax2.axis('off')
    ax2.set_title('Canny filter, $\sigma=1$', fontsize=20)

    ax3.imshow(edges2, cmap=plt.cm.gray)
    ax3.axis('off')
    ax3.set_title('Canny filter, $\sigma=' + str(VALUE_SIGMA) + '$', fontsize=20)

    fig.subplots_adjust(wspace=0.02, hspace=0.02, top=0.9,
                        bottom=0.02, left=0.02, right=0.98)

    plt.show()
Exemple #4
0
def my_canny(img, fn = None, sigma=6, with_mask=False, save=False, show=False):
    height = img.shape[0]
    width = img.shape[1]
    if with_mask:
        import numpy as np
        mymask = np.zeros((height, width),'uint8')
        y1, x1 = 200, 150
        y2, x2 = 500, 350
        mymask[y1: y2, x1: x2] = 1
        ret = canny(img, sigma=sigma, mask=mymask)
    else:
        ret = canny(img, sigma)
        
    if show:
        from src.utils.io import showimage_pil
        showimage_pil(255*ret.astype('uint8'))
        
    if save:
        from src.utils.io import saveimage_pil
        if with_mask:
            feature = '_sigma' + str(sigma) + '_mask'
        else:
            feature = '_sigma' + str(sigma)


        saveimage_pil(255*ret.astype('uint8'), fn+feature+'.jpg',show=False)
    return ret
def compare(file1, file2):
    image1 = io.imread(file1, as_grey = True)
    image2 = io.imread(file2, as_grey = True)
    image1 = feature.canny(image1)
    image2 = feature.canny(image2)

    return ssim(image1, image2)
Exemple #6
0
def findSigma(image):
	sig = 3.5
	total = len(image)*len(image[0])
	flag = True
	cnt = 0
	while(flag):
		cnt = cnt+1
		edges = feature.canny(image, sigma=sig)
		edSum = np.sum(edges)
		tmp = total/edSum
		print sig

		###if there are too many pixels, increase sig
		if tmp<200:
			sig = sig + .13
		###too few pixels, decr sig
		if tmp>401:
			sig = sig - .13
		elif tmp>200 and tmp <401:
			return edges

		##sometimes any sigma we put in will be incorct so we let feature decide after some trying
		elif cnt>10 and tmp == 0:
			edges = feature.canny(image)
			return edges
Exemple #7
0
def edge(ifile, ofile):
    img = io.imread(ifile, flatten=True)
    edges1 = feature.canny(img)
    edges2 = feature.canny(img, sigma=VAL_SIGMA)
    out = np.uint8(edges2 * 255)

    io.imsave(ofile, out)
    return ofile
def main():
    for file_path in glob.glob("/home/lucas/Downloads/Lucas/GSK 10uM/*.JPG"):

        img = data.imread(file_path, as_grey=True)

        img = transform.resize(img, [600, 600])
        img_color = transform.resize(data.imread(file_path), [600, 600])

        img[img >img.mean()-0.1] = 0

        # io.imshow(img)
        # io.show()
        #
        edges = canny(img)
        bordas_fechadas = closing(img > 0.1, square(15)) # fechando gaps
        fill_cells = ndi.binary_fill_holes(bordas_fechadas)
        # io.imshow(fill_cells)
        # io.show()
        img_label = label(fill_cells, background=0)
        n= 0
        for  x in regionprops(img_label):
            if x.area < 2000 and x.area > 300:
                n +=1
                print x.area
                minr, minc, maxr, maxc = x.bbox
                try:
                    out_path_name = file_path.split("/")[-1].rstrip(".JPG")
                    io.imsave("out/cell_{}_pic_{}_area_{}.png".format(n, out_path_name, str(round(x.area))),img_color[minr-3: maxr+3, minc-3: maxc+3])
                    #io.show()
                except:
                    pass
Exemple #9
0
    def _calc_crispness(self, grey_array):
        """Calculate three measures of the crispness of an channel.

        PARAMETERS
        ----------
        grey_array : 2D numpy array
            Raw data for the grey channel.

        PRODUCES
        --------
        crispnesses : list
            Three measures of the crispness in the grey channel of types:

            - ``sobel``, ``canny``, and ``laplace``
        """
        grey_array = grey_array/255
        sobel_var = filters.sobel(grey_array).var()
        canny_array = feature.canny(grey_array, sigma=1).var()
        canny_ratio = np.sum(canny_array == True)/float(
                                                    len(canny_array.flatten()))
        laplace_var = filters.laplace(grey_array, ksize=3).var()
        self.feature_data.extend([sobel_var, canny_ratio, laplace_var])
        if self.columns_out:
            self.column_names.extend(['crisp_sobel', 'crisp_canny',
                                      'crisp_laplace'])
Exemple #10
0
def analyze_ra_rotation(rotate_fn):
    """ Get RA axis center of rotation XY coordinates

    Args:
        rotate_fn (str): FITS file of RA rotation

    Returns:
        tuple(int): A tuple of integers corresponding to the XY pixel position
        of the center of rotation around RA axis
    """
    d0 = fits.getdata(rotate_fn)  # - 2048

    # Get center
    position = (d0.shape[1] // 2, d0.shape[0] // 2)
    size = (1500, 1500)
    d1 = Cutout2D(d0, position, size)

    d1.data = d1.data / d1.data.max()

    # Get edges for rotation
    rotate_edges = canny(d1.data, sigma=1.0)

    rotate_hough_radii = np.arange(100, 500, 50)
    rotate_hough_res = hough_circle(rotate_edges, rotate_hough_radii)
    rotate_accums, rotate_cx, rotate_cy, rotate_radii = \
        hough_circle_peaks(rotate_hough_res, rotate_hough_radii, total_num_peaks=1)

    return d1.to_original_position((rotate_cx[-1], rotate_cy[-1]))
def manipulate():
    edges_sigma = 1.75

    while len(framesToConvert) > 0:
        currentFrame = framesToConvert.pop(-1)

        if not os.path.isdir(currentFrame.basePath + "/raw/"):
            os.mkdir(currentFrame.basePath + "/raw/")
            os.mkdir(currentFrame.basePath + "/grayscale/")
            os.mkdir(currentFrame.basePath + "/cropped/")
            os.mkdir(currentFrame.basePath + "/resize150/")
            os.mkdir(currentFrame.basePath + "/edges_" + str(edges_sigma) + "/")

        cv2.imwrite(currentFrame.basePath + "/raw/FRAME_" + str(currentFrame.count) + ".jpg", currentFrame.image)

        grayscaleImage = cv2.cvtColor(currentFrame.image, cv2.COLOR_BGR2GRAY)
        cv2.imwrite(currentFrame.basePath + "/grayscale/FRAME_" + str(currentFrame.count) + ".jpg", grayscaleImage)

        croppedImage = grayscaleImage[0:RAW_IMAGE_HEIGHT, ((RAW_IMAGE_WIDTH - RAW_IMAGE_HEIGHT) / 2):RAW_IMAGE_WIDTH - ((RAW_IMAGE_WIDTH - RAW_IMAGE_HEIGHT) / 2)]
        cv2.imwrite(currentFrame.basePath + "/cropped/FRAME_" + str(currentFrame.count) + ".jpg", croppedImage)

        resizedImage = cv2.resize(croppedImage, (FINAL_IMAGE_SIZE, FINAL_IMAGE_SIZE), interpolation=cv2.INTER_AREA)
        cv2.imwrite(currentFrame.basePath + "/resize150/FRAME_" + str(currentFrame.count) + ".jpg", resizedImage)

        edges = img_as_ubyte(canny(resizedImage, sigma=1.75))
        cv2.imwrite(currentFrame.basePath + "/edges_" + str(edges_sigma) + "/FRAME_" + str(currentFrame.count) + ".jpg", edges)
Exemple #12
0
    def edges(cls):
        from scipy import ndimage, misc
        import numpy as np
        from skimage import feature
        col = Image.open("f990.jpg")
        gray = col.convert('L')

        # Let numpy do the heavy lifting for converting pixels to pure black or white
        bw = np.asarray(gray).copy()

        # Pixel range is 0...255, 256/2 = 128
        bw[bw < 245]  = 0    # Black
        bw[bw >= 245] = 255 # White
        bw[bw == 0] = 254
        bw[bw == 255] = 0
        im = bw
        im = ndimage.gaussian_filter(im, 1)
        edges2 = feature.canny(im, sigma=2)
        labels, numobjects =ndimage.label(im)
        slices = ndimage.find_objects(labels)
        print('\n'.join(map(str, slices)))
        misc.imsave('f990_sob.jpg', im)
        return

        #im = misc.imread('f990.jpg')
        #im = ndimage.gaussian_filter(im, 8)
        sx = ndimage.sobel(im, axis=0, mode='constant')
        sy = ndimage.sobel(im, axis=1, mode='constant')
        sob = np.hypot(sx, sy)
        misc.imsave('f990_sob.jpg', edges2)
Exemple #13
0
    def animate(i):
        print 'Frame %d' % i
        plt.title('Frame %d' % i)

        image = data[i]

        hough_radii = np.arange(10, 100, 10)
        edges = feature.canny(data[i], sigma=3.0, low_threshold=0.4, high_threshold=0.8)
        hough_res = hough_circle(edges, hough_radii)

        centers = []
        accums = []
        radii = []

        for radius, h in zip(hough_radii, hough_res):
            peaks = feature.peak_local_max(h)
            centers.extend(peaks)
            accums.extend(h[peaks[:, 0], peaks[:, 1]])
            radii.extend([radius] * len(peaks))

        image = ski.color.gray2rgb(data[i])

        for idx in np.argsort(accums)[::-1][:5]:
            center_x, center_y = centers[idx]
            radius = radii[idx]
            cx, cy = circle_perimeter(center_y, center_x, radius)

            if max(cx) < 150 and max(cy) < 200:
                image[cy, cx] = (220, 20, 20)

        im.set_data(image)

        return im,
Exemple #14
0
def test_circles2():
    data = np.memmap("E:\\guts_tracking\\data\\fish202_aligned_masked_8bit_150x200x440.raw", dtype='uint8', shape=(440,200,150)).copy()

    i = 157

    hough_radii = np.arange(10, 100, 10)
    edges = feature.canny(data[i], sigma=3.0, low_threshold=0.4, high_threshold=0.8)
    hough_res = hough_circle(edges, hough_radii)

    centers = []
    accums = []
    radii = []

    for radius, h in zip(hough_radii, hough_res):
        peaks = feature.peak_local_max(h)
        centers.extend(peaks)
        accums.extend(h[peaks[:, 0], peaks[:, 1]])
        radii.extend([radius] * len(peaks))

    image = ski.color.gray2rgb(data[i])

    for idx in np.argsort(accums)[::-1][:5]:
        center_x, center_y = centers[idx]
        radius = radii[idx]
        cx, cy = circle_perimeter(center_y, center_x, radius)

        if max(cx) < 150 and max(cy) < 200:
            image[cy, cx] = (220, 20, 20)

    plt.imshow(image, cmap='gray')

    plt.show()
 def edge(pathfile,maskpath):
     image = imread(pathfile, as_grey=True)
     mask = imread(maskpath, as_grey=True)
     mask = mask.astype(bool)
     edges = feature.canny(image, sigma=2)
     num = np.sum(edges[mask])
     return float(num),edges
Exemple #16
0
def detect_Hough(data):
    image = data.copy()
    edges = canny(image, sigma=10, low_threshold=60, high_threshold=90)

    # Detect circles between 80% and 100% of image semi-diagonal
    lx, ly = data.shape
    sizex, sizey = lx/2., ly/2.
    max_r = numpy.sqrt(sizex**2 + sizey**2) 
    hough_radii = numpy.linspace(0.5*max_r, 0.9 * max_r, 20)
    hough_res = hough_circle(edges, hough_radii)


    centers = []
    accums = []
    radii = []
    for radius, h in zip(hough_radii, hough_res):
        # For each radius, extract two circles
        num_peaks = 2
        peaks = peak_local_max(h, num_peaks=num_peaks)
        centers.extend(peaks)
        accums.extend(h[peaks[:, 0], peaks[:, 1]])
        radii.extend([radius] * num_peaks)

    # Use the most prominent circle
    idx = numpy.argsort(accums)[::-1][:1]
    center_x, center_y = centers[idx]
    radius = radii[idx]
    return center_x, center_y, radius
def find_edges(img, sigma = 4):
    img = feature.canny(img, sigma)

    selem = disk(10)
    img = dilation(img, selem)

    return img
    def blackout_outside(self, img, sigma=3):
        img_g = skic.rgb2gray(img)
        edges = skif.canny(img_g, sigma=sigma)

        hough_radii = np.arange(180, 210, 2)
        hough_res = skit.hough_circle(edges, hough_radii)

        centers = []
        accums = []
        radii = []

        for radius, h in zip(hough_radii, hough_res):
            # For each radius, extract two circles
            num_peaks = 1
            peaks = skif.peak_local_max(h, min_distance=40, num_peaks=num_peaks)
            if peaks != []:
                centers.extend(peaks)
                accums.extend(h[peaks[:, 0], peaks[:, 1]])
                radii.extend([radius] * num_peaks)

#                print radius, np.max(h.ravel()), len(peaks)

        if accums == [] and sigma==3:
            return self.blackout_outside(img, sigma=3)

    #     Draw the most prominent 5 circles
        image = (img.copy() / 4.0).astype(np.uint8)
        cx, cy = skid.circle(*self.average_hough_detections(hough_radii, hough_res))
        image[cy, cx] = img[cy, cx]

        return image
Exemple #19
0
def hugh_circle_detection(image):
	# Load picture and detect edges
	edges = canny(image, sigma=3, low_threshold=10, high_threshold=50)

	fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(5, 2))

	# Detect two radii
	hough_radii = np.arange(15, 30, 2)
	hough_res = hough_circle(edges, hough_radii)

	centers = []
	accums = []
	radii = []

	for radius, h in zip(hough_radii, hough_res):
		# For each radius, extract two circles
		num_peaks = 2
		peaks = peak_local_max(h, num_peaks=num_peaks)
		centers.extend(peaks)
		accums.extend(h[peaks[:, 0], peaks[:, 1]])
		radii.extend([radius] * num_peaks)

	# Draw the most prominent 5 circles
	image = color.gray2rgb(image)
	for idx in np.argsort(accums)[::-1][:5]:
		center_x, center_y = centers[idx]
		radius = radii[idx]
		cx, cy = circle_perimeter(center_y, center_x, radius)
		image[cy, cx] = (220, 20, 20)

	ax.imshow(image, cmap=plt.cm.gray)
	plt.show()
Exemple #20
0
    def process(self, im):
        (width, height, _) = im.image.shape

        img_adapted = im.prep(self.transform)

        if width > self.max_resized or height > self.max_resized:
            scale_height = self.max_resized / height
            scale_width = self.max_resized / width
            scale = min(scale_height, scale_width)
            img_adapted = resize(img_adapted, (int(width * scale), int(height * scale)))

        edges = canny(img_adapted, sigma=self.sigma)

        # Detect two radii
        # Calculate image diameter
        shape = im.image.shape
        diam = math.sqrt(shape[0] ** 2 + shape[1] ** 2)
        radii = np.arange(diam / 3, diam * 0.8, 2)
        hough_res = hough_circle(edges, radii)

        accums = []
        for radius, h in zip(radii, hough_res):
            # For each radius, extract two circles
            peaks = peak_local_max(h, num_peaks=1, min_distance=1)
            if len(peaks) > 0:
                accums.extend(h[peaks[:, 0], peaks[:, 1]])

        if len(accums) == 0:  # TODO: fix, should not happen
            return [0]

        idx = np.argmax(accums)
        return [accums[idx]]
Exemple #21
0
def get_image_dynamics(image):
    edges = canny(image, 1, .4, .6)
    lines = probabilistic_hough_line(edges, line_gap=6)
    TAN15 = 0.26794919243
    TAN75 = 3.73205080757
    EPS = 0.0000000005
    c1, c2, c3 = (0, 0, 0)
    dynamics = np.zeros(6, dtype=np.float64)
    for (x1, y1), (x2, y2) in lines:
        aslope = abs((x2 - x1) / (y2 - y1 + EPS))
        linelen = sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
        if (aslope < TAN15):
            c1 = c1 + 1
            dynamics[0] = dynamics[0] + aslope
            dynamics[3] = linelen
        elif (aslope > TAN75):
            c2 = c2 + 1
            dynamics[1] = dynamics[1] + aslope
            dynamics[4] = linelen
        else:
            c3 = c3 + 1
            dynamics[2] = dynamics[2] + aslope
            dynamics[5] = linelen
    if (c1 > 0):
        dynamics[0] /= c1
        dynamics[1] /= c1
    if (c2 > 0):
        dynamics[2] /= c2
        dynamics[3] /= c2
    if (c3 > 0):
        dynamics[4] /= c3
        dynamics[5] /= c3
    return dynamics;
Exemple #22
0
    def segment(self, image):
        """
        """
        # image = src[:]
        if self.use_adaptive_threshold:
            bs = self.blocksize
            if not bs % 2:
                bs += 1

            markers = threshold_adaptive(image, bs)

            # n = markers[:].astype('uint8')
            n = markers.astype('uint8')
            # n[markers] = 255
            # n[invert(markers)] = 1
            # markers = n
            return n
        else:
            markers = zeros_like(image)
            # print('image',image.max(), image.min())
            # print('le', image<self.threshold_low)
            # print('ge', image>self.threshold_high)
            markers[image <= self.threshold_low] = 1
            markers[image > self.threshold_high] = 255

        #elmap = sobel(image, mask=image)
        elmap = canny(image, sigma=1)
        wsrc = watershed(elmap, markers, mask=image)

        return invert(wsrc)
Exemple #23
0
    def test_tif_series_2otsu_Y23(self):
        frame_list = glob("./test_resources/Y23_0.4X_raw_tifs/*.tif")
        for idx in range(400,450):
            frame = frame_list[idx]
            self.image = np.array(Image.open(frame))
            self.thresholds = otsu(self.image, nclasses=2)
            if idx==425:
                # Plot intensity histogram and thresholds
                plt.hist(self.image.flatten(), bins=256, log=True)
                for t in self.thresholds:
                    plt.axvline(t, color='r')
                plt.title('Y23 using class=2')
                plt.draw()
                plt.savefig('Y23_2-class.png', dpi=300)
                plt.clf()
                Image.open('Y23_2-class.png').show()

                # Plot actual data and thresholded
                edge = feature.canny(self.image>self.thresholds[0])
                highlight = np.copy(self.image)
                highlight[edge] = 2*self.image.max() - self.image.min()
                fig = plt.figure()
                ax11 = fig.add_subplot(221)
                ax12 = fig.add_subplot(222)
                ax21 = fig.add_subplot(223)
                ax22 = fig.add_subplot(224)
                ax11.imshow(self.image)
                ax12.imshow(self.image>self.thresholds[0])
                ax21.imshow(highlight)
                ax22.imshow(edge)
                plt.show()
Exemple #24
0
def getBoundary(img, debug=False, **kwds):
    from skimage import feature
    edge = feature.canny(img, **kwds)
    start_row = None
    middle_col = (edge.shape[1]-1)//2
    start_cols = np.ones(edge.shape[0], dtype=int)*middle_col
    stop_cols = np.ones(edge.shape[0], dtype=int)*middle_col
    for i, row in enumerate(edge):
        isedge = row > 0
        if isedge.any():
            w = np.where(isedge)[0]
            start_cols[i], stop_cols[i] = w[0], w[-1]
            # set the row that starts to have object to be measured
            if start_row is None:
                start_row = i
            stop_row = i+1
        continue
    if debug:
        print(start_row, stop_row)
        # for start, stop in zip(start_cols, stop_cols):
        #    print(start, stop)
        #    continue
        from matplotlib import pyplot as plt
        plt.figure()
        plt.plot(start_cols, '.')
        plt.plot(stop_cols, '.')
        plt.savefig("edge.png")
        plt.close()
    return start_row, stop_row, start_cols, stop_cols
def add_auto_masks_area(areaFile,addMasks=False):
    from skimage.morphology import binary_dilation, binary_erosion, disk
    from skimage import exposure
    from skimage.transform import hough_circle
    from skimage.morphology import convex_hull_image
    from skimage.feature import canny
    from skimage.draw import circle_perimeter

    import h5py


    MASKs = np.zeros(areaFile.attrs['ROI_patches'].shape)
    if 'ROI_masks' in (areaFile.attrs.iterkeys()):
        print 'Masks have already been created'
        awns = raw_input('Would you like to redo them from scratch: (answer y/n): ')
    else:
        awns = 'y'

    if awns=='y':
        MASKs = np.zeros(areaFile.attrs['ROI_patches'].shape)
        for i in range(areaFile.attrs['ROI_patches'].shape[2]):
            patch = areaFile.attrs['ROI_patches'][:,:,i]
            
            tt0 = exposure.equalize_hist(patch)
            tt = 255*tt0/np.max(tt0)
            thresh = 1*tt>0.3*255
            thresh2 = 1*tt<0.1*255

            tt[thresh] = 255
            tt[thresh2] = 0



            edges = canny(tt, sigma=2, low_threshold=20, high_threshold=30)
            try_radii = np.arange(3,5)
            res = hough_circle(edges, try_radii)
            ridx, r, c = np.unravel_index(np.argmax(res), res.shape)
            
            r, c, try_radii[ridx]
            image = np.zeros([20,20,4])
            cx, cy = circle_perimeter(c, r, try_radii[ridx]+2)
            
            try:
                image[cy, cx] = (220, 80, 20, 1)
                original_image = np.copy(image[:,:,3])
                chull = convex_hull_image(original_image)
                image[chull,:] = (255, 0, 0, .4)
                
            except:
                pass
            
            
            MASKs[:,:,i] = (1*image[:,:,-1]>0)
        if addMasks:
            areaFile.attrs['ROI_masks'] = MASKs
    else:
        pass    

    return np.array(MASKs)
Exemple #26
0
def _detect_spots_hough_circle(image, radius):
	edges = canny(image)
	imshow(edges)
	show()
	hough_radii = np.arange(radius/2, radius*2, 10)
	hough_circles = hough_circle(edges, hough_radii)
	
	print(hough_circles)
Exemple #27
0
    def __call__(self,sigma, keepSourceWindow=False):
        self.start(keepSourceWindow)
        nDim=len(self.tif.shape) 
        newtif=np.copy(self.tif)

        if self.tif.dtype == np.float16:
            g.alert("Canny Edge Detection does not work on float32 images. Change the data type to use this function.")
            return None

        if nDim==2:
            newtif=feature.canny(self.tif,sigma)
        else:
            for i in np.arange(len(newtif)):
                newtif[i] = feature.canny(self.tif[i],sigma)
        self.newtif=newtif.astype(np.uint8)
        self.newname=self.oldname+' - Canny '
        return self.end()
def getEdge(img, edgepath, **kwds):
    from skimage import feature
    edge = feature.canny(img, **kwds)
    edge = np.array(edge, dtype="float32")
    edgeimg = io.ImageFile(path=edgepath)
    edgeimg.data = edge
    edgeimg.save()
    return edge
Exemple #29
0
def test():
    
    image_series = glob.glob('full_dewar/puck*_*in*_200.jpg')
    templates = [n.replace('200', '*') for n in image_series]
    template_empty = imread('template_empty.jpg')
    h, w = template_empty.shape
    
    print 'len(templates)', len(templates)
    fig, axes = plt.subplots(3, 4)
    a = axes.ravel()
    k = 0
    used = []
    while k<12:
    #for template in templates[:12]:
        template = random.choice(templates)
        if template in used:
            pass
        else:
            used.append(template)
            original_image = img_as_float(combine(template, length=200))
            ax = a[k]
            gray_image = color.rgb2gray(original_image)
            img_sharp = unsharp(gray_image)
            edges = canny(img_sharp, sigma=3.0, low_threshold=0.04, high_threshold=0.05)
            med_unsharp = median(img_sharp/img_sharp.max(), selem=disk(4))
            sharp_med_unsharp = unsharp(med_unsharp)
            edges_med = canny(sharp_med_unsharp, sigma=7)
            match = match_template(gaussian(edges_med, 4), template_empty)
            print 'match.max()'
            print match.max()
            peaks = peak_local_max(gaussian(match, 3), threshold_abs=0.3, indices=True)
            print 'template', template
            print '# peaks', len(peaks)
            print peaks
            ax.imshow(original_image) #, cmap='gray')
            #ax.imshow(gaussian(edges_med, 3), cmap='gnuplot')
            for peak in peaks:
                y, x = peak
                rect = plt.Rectangle((x, y), w, h, edgecolor='g', linewidth=2, facecolor='none')
                ax.add_patch(rect)
            #ax[edges] = (0, 1, 0)
            #image = img_as_int(original_image)
            #image[edges==True] = (0, 255, 0)
            ax.set_title(template.replace('full_dewar/', '').replace('_*.jpg', '') + ' detected %s' % (16-len(peaks),))
            k += 1
    plt.show()
def extract_canny(image):
    edges = canny(image, sigma=4)

    fig, ax = plt.subplots(nrows=1, ncols=1)
    ax.imshow(edges, cmap=plt.cm.gray)
    ax.axis('off')
    ax.set_title('Canny filter, $\sigma=1$', fontsize=20)
    plt.show()
Exemple #31
0
def canny_hsv(image):
    return canny(image)
Exemple #32
0
def average_pixel_width(img):
    im = IMG.open(img)
    im_array = np.asarray(im.convert(mode='L'))
    edges_sigma1 = feature.canny(im_array, sigma=3)
    apw = (float(np.sum(edges_sigma1)) / (im.size[0]*im.size[1]))
    return apw*100
Exemple #33
0
for root, dirs, files in os.walk(rootDir + "Albums"):  
    for filename in files:
        if  '.csv' not in filename and 'Uploaded' not in root and 'Blurred' not in root:
            imagePath = root + "\\" + filename #os.path.join(root, filename)
            images.append(imagePath)    
      

for image in images:    
    #try:
    im = scipy.misc.imread(image)

    img_gray = rgb2gray(im) 

    edge_roberts = roberts(img_gray)
    edge_sobel = sobel(img_gray)
    edge_canny1 = feature.canny(img_gray)
    edge_canny2 = feature.canny(img_gray, sigma=0.2)
    edge_canny3 = feature.canny(img_gray, sigma=0.25)
    edge_canny4 = feature.canny(img_gray, sigma=0.3)
    edge_prewitt = prewitt(img_gray)

        
      
    noise_gaussian = random_noise(im, "gaussian") 
    noise_salt = random_noise(im, "salt") 
    noise_pepper = random_noise(im, "pepper") 
    noise_speckle = random_noise(im, "speckle") 

    blur_avg = cv2.blur(im,(5,5))
    blur_gaussian =  cv2.GaussianBlur(im,(5,5),0)
    blur_median = cv2.medianBlur(im,5)
    w = np.sum(np.sum(f, axis=-1), axis=-1) + 0.00001
    filtered = np.expand_dims(
        np.sum(np.sum(patch_sdm * f, axis=3), axis=2) / w, -1)

    return filtered


# load image as grey
fname = './lena.tiff'
#fname = './input.png'
im = skimage.img_as_float(io.imread(fname))
im_grey = color.rgb2grey(im)

# canny edge detection
sigma = 1.
edge_map = feature.canny(im_grey, sigma)

# estimate defocus map
std_1 = 1.
std_2 = std_1 * 2.5
ratio = gaussian_gradient_magnitude(
    im_grey, std_1) / gaussian_gradient_magnitude(im_grey, std_2)

# get edge index
sdm = np.zeros_like(ratio)
mx, my = np.where(edge_map * (ratio > 1.01) * (ratio <= std_2 / std_1))

# enhancement
sdm[mx, my] = np.sqrt(((ratio[mx, my]**2 * std_1**2 - std_2**2) + 0.001) /
                      (1 - ratio[mx, my]**2))
print img.shape
img = img.astype('float32')
print "check1"
dx = ndimage.sobel(img, 0)  # horizontal derivative
print "check2"
dy = ndimage.sobel(img, 1)  # vertical derivative
mag = np.hypot(dx, dy)  # magnitude
print mag
mag *= 255.0 / np.max(mag)  # normalize (Q&D)
plt.imshow(img, cmap=plt.get_cmap('gray'))
plt.show()
scipy.misc.imsave('sobel.jpg', mag)

im = io.imread('sobel.jpg')
# Compute the Canny filter for two values of sigma
edges = np.uint8(feature.canny(im, sigma=1) * 255)

print edges.shape

newArr = []

for y in range(edges.shape[1]):
    for x in range(edges.shape[0]):
        if (edges[x][y] == 255):
            tempVar = str(x) + "," + str(y)
            newArr.append(tempVar)

plt.imshow(edges)

plt.show()
scipy.misc.imsave('canny.jpg', edges)
 def load_edge(self, img, mask):
     sigma = self.sigma
     return canny(img, sigma=sigma).astype(np.float)
def edge_detection(frames, n_samples, method='canny', track=False):
    """
    To detect the edges of the wells, fill and label them to
    determine their centroids.

    Parameters
    -----------
    frames : Array
        The frames to be processed and determine the
        sample temperature from.
    n_samples : Int
        The number of samples in the input video.
    method : String
        Edge detection algorithm to be used
    track : Boolean
        to enable spatial tracking (to be implemented with real-time in the future)

    Returns
    --------
    labeled_samples : Array
        All the samples in the frame are labeled
        so that they can be used as props to get pixel data.
    """

    # when enable spatial tracking
    if track:
        # type cast to ndarray
        if not isinstance(frames, np.ndarray):
            frames_array = np.array(frames)
        else:
            frames_array = frames

        video_length = len(frames_array)
        video_with_label = np.empty(frames_array.shape, dtype=int)
        background = frames_array.mean(0)
        alpha = 2  # intensity threshold
        counter = 0
        missing = 0
        boolean_mask = None
        for time in range(video_length):
            # remove background proportional to time in video
            img_lin_bg = frames_array[time] - background * time / (
                video_length - 1)
            # apply sobel filter
            edges_lin_bg = filters.sobel(img_lin_bg)
            #  booleanize with certain threshold alpha
            edges_lin_bg = edges_lin_bg > edges_lin_bg.mean() * alpha
            # erode edges, fill in holes
            edges_lin_bg = ndimage.binary_erosion(edges_lin_bg,
                                                  mask=boolean_mask)
            edges_lin_bg = binary_fill_holes(edges_lin_bg)

            # find progressive background
            if time is 0:
                progressive_background = 0
            else:
                progressive_background = frames_array[0:time].mean(0)
            # remove background
            img_prog_bg = frames_array[time] - progressive_background
            # apply sobel filter
            edges_prog_bg = filters.sobel(img_prog_bg)
            #  booleanize with certain threshold alpha
            edges_prog_bg = edges_prog_bg > edges_prog_bg.mean() * alpha
            # erode edges, fill in holes
            edges_prog_bg = ndimage.binary_erosion(edges_prog_bg,
                                                   mask=boolean_mask)
            edges_prog_bg = binary_fill_holes(edges_prog_bg)

            # combining
            combined_samples = edges_lin_bg + edges_prog_bg
            #  make the boolean mask for the for frame
            if time is 0:
                boolean_mask = ~ndimage.binary_erosion(combined_samples)
                # boolean_mask = ~combined_samples

            # labeled_samples = ndimage.binary_erosion(labeled_samples, mask=boolean_mask)
            # labeled_samples = binary_fill_holes(labeled_samples, structure=np.ones((2,2)))

            # remove stray pixels and label
            combined_samples = remove_small_objects(combined_samples,
                                                    min_size=2)
            labeled_samples = label(combined_samples)

            # confirm matching labels vs n_samples
            unique, counts = np.unique(labeled_samples, return_counts=True)
            label_dict = dict(zip(unique, counts))

            #  in case of missing label
            if len(label_dict) < n_samples + 1:
                trial = 0
                # keep eroding to separate the samples
                while len(label_dict) < n_samples + 1 and trial < 10:
                    labeled_samples = ndimage.binary_erosion(labeled_samples,
                                                             mask=boolean_mask)
                    labeled_samples = label(labeled_samples)
                    unique, counts = np.unique(labeled_samples,
                                               return_counts=True)
                    label_dict = dict(zip(unique, counts))
                    trial += 1
                # print('missing:', time)
                missing += 1

            # in case of extra label identify
            if len(label_dict) > n_samples + 1:
                trial = 0
                # keep removing smaller labels until matching with n_samples
                while len(label_dict) > n_samples + 1 and trial < 10:
                    temp = min(label_dict.values())
                    labeled_samples = remove_small_objects(labeled_samples,
                                                           min_size=temp + 1)
                    unique, counts = np.unique(labeled_samples,
                                               return_counts=True)
                    label_dict = dict(zip(unique, counts))
                    trial += 1

                # print('excess:', time, val)
                counter += 1

            video_with_label[time] = labeled_samples
        # print(counter)
        # print(missing)
        return video_with_label

    # when disable spatial tracking (default)
    else:
        labeled_samples = None
        size = None
        thres = None
        props = None

        # use canny edge detection method
        if method is 'canny':
            for size in range(15, 9, -1):
                for thres in range(1500, 900, -100):
                    edges = feature.canny(frames[0] / thres)

                    # fig = plt.figure(2)  # for debugging
                    # plt.imshow(edges)
                    # plt.show()

                    filled_samples = binary_fill_holes(edges)
                    cl_samples = remove_small_objects(filled_samples,
                                                      min_size=size)
                    labeled_samples = label(cl_samples)
                    props = regionprops(labeled_samples,
                                        intensity_image=frames[0])

                    # fig = plt.figure(3)
                    # plt.imshow(filled_samples)  # for debugging

                    if len(props) == n_samples:
                        break
        #             if thres == 1000 and len(props) != n_samples:
        #                 print('Not all the samples are being recognized with
        #                 the set threshold range for size ',size)
                if len(props) == n_samples:
                    break
            if size == 10 and thres == 1000 and len(props) != n_samples:
                print('Not all the samples are being recognized with the set \
                    minimum size and threshold range')
            # plt.show()  # for debugging
            return labeled_samples

        # use sobel edge detection method
        if method is 'sobel':
            for size in range(15, 9, -1):
                # use sobel
                edges = filters.sobel(frames[0])
                edges = edges > edges.mean() * 3  # booleanize data

                # fig = plt.figure(2)  # for debugging
                # plt.imshow(edges)
                # plt.colorbar()

                #  fill holes and remove noise
                filled_samples = binary_fill_holes(edges)
                cl_samples = remove_small_objects(filled_samples,
                                                  min_size=size)
                labeled_samples = label(cl_samples)
                props = regionprops(labeled_samples, intensity_image=frames[0])

                # fig = plt.figure(3)
                # plt.imshow(filled_samples)  # for debugging

                if len(props) == n_samples:
                    break
            if size == 10 and len(props) != n_samples:
                print('Not all the samples are being recognized with the set \
                    minimum size and threshold range')
            # plt.show()  # for debugging
            return labeled_samples
Exemple #38
0
import numpy as np
import matplotlib.pyplot as plt

from skimage import data, color
from skimage.transform import hough_circle, hough_circle_peaks
from skimage.feature import canny
from skimage.draw import circle_perimeter
from skimage.util import img_as_ubyte

# Load picture and detect edges
image = img_as_ubyte(data.coins()[160:230, 70:270])
edges = canny(image, sigma=3, low_threshold=10, high_threshold=50)

# Detect two radii
hough_radii = np.arange(20, 35, 2)
hough_res = hough_circle(edges, hough_radii)

# Select the most prominent 5 circles
accums, cx, cy, radii = hough_circle_peaks(hough_res,
                                           hough_radii,
                                           total_num_peaks=3)

# Draw them
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(10, 4))
image = color.gray2rgb(image)
for center_y, center_x, radius in zip(cy, cx, radii):
    circy, circx = circle_perimeter(center_y, center_x, radius)
    image[circy, circx] = (220, 20, 20)

ax.imshow(image, cmap=plt.cm.gray)
plt.show()
Exemple #39
0
def keep_tile(tile, tile_size, tissue_threshold):
    """
    Determine if a tile should be kept.

    This filters out tiles based on size and a tissue percentage
    threshold, using a custom algorithm. If a tile has height &
    width equal to (tile_size, tile_size), and contains greater
    than or equal to the given percentage, then it will be kept;
    otherwise it will be filtered out.

    Args:
     tile: tile is a 3D NumPy array of shape (tile_size, tile_size, channels).
     tile_size: The width and height of a square tile to be generated.
     tissue_threshold: Tissue percentage threshold.

    Returns:
     A Boolean indicating whether or not a tile should be kept for
     future usage.
    """
    if tile.shape[0:2] == (tile_size, tile_size):
        tile_orig = tile

        # Check 1
        # Convert 3D RGB image to 2D grayscale image, from
        # 0 (dense tissue) to 1 (plain background).
        tile = rgb2gray(tile)
        # 8-bit depth complement, from 1 (dense tissue)
        # to 0 (plain background).
        tile = 1 - tile
        # # Canny edge detection with hysteresis thresholding.
        # # This returns a binary map of edges, with 1 equal to
        # # an edge. The idea is that tissue would be full of
        # # edges, while background would not.
        tile = canny(tile)
        # Binary closing, which is a dilation followed by
        # an erosion. This removes small dark spots, which
        # helps remove noise in the background.
        tile = binary_closing(tile, disk(10))
        # Binary dilation, which enlarges bright areas,
        # and shrinks dark areas. This helps fill in holes
        # within regions of tissue.
        tile = binary_dilation(tile, disk(10))
        # Fill remaining holes within regions of tissue.
        tile = binary_fill_holes(tile)
        # Calculate percentage of tissue coverage.
        percentage = tile.mean()
        check1 = percentage >= tissue_threshold

        # Check 2
        # Convert to optical density values
        tile = optical_density(tile_orig)
        # Threshold at beta
        beta = 0.15
        tile = np.min(tile, axis=2) >= beta
        # Apply morphology for same reasons as above.
        tile = binary_closing(tile, disk(2))
        tile = binary_dilation(tile, disk(2))
        percentage = tile.mean()
        check2 = percentage >= tissue_threshold
        #set the tile back to the original tile
        tile = tile_orig
        return check1 and check2

    else:
        return False
    def Fix_Cam_Height_Analysis(self):
        '''
        step 1 : Line detection using canny edge detection
        '''
        if self.isPlaying == False:

            self.current_Frame = int(self.cap.get(cv2.CAP_PROP_POS_FRAMES))

            self.Current_Frame.set(self.current_Frame)

            success, Height_Analysis_Img = self.cap.read()
            '''
            step 1 : Line detection using canny edge detection
            
            Uses canny edge detection and then finds (small) lines using probabilstic
            hough transform as edgelets.
            Parameters
            ----------
            image: ndarray
                Image for which edgelets are to be computed.
            sigma: float
                Smoothing to be used for canny edge detection.
            Returns
            -------
            locations: ndarray of shape (n_edgelets, 2)
                Locations of each of the edgelets.
            directions: ndarray of shape (n_edgelets, 2)
                Direction of the edge (tangent) at each of the edgelet.
            strengths: ndarray of shape (n_edgelets,)
                Length of the line segments detected for the edgelet.
            '''
            #==============================================================================
            #             #compute median of singel channel pixel intensities
            #             median_intensity = np.median(Height_Analysis_Img)
            #
            #             #apply cutomatic thresholding
            #             sigma = 0.33
            #             lower = int (max(0, (1.0 - sigma) * median_intensity))
            #             upper = int (min(255, (1.0 + sigma) * median_intensity))
            #
            #             Height_Analysis_Img_Gray = cv2.cvtColor(Height_Analysis_Img, cv2.COLOR_BGR2GRAY)
            #
            #             Height_Analysis_Img_blurred = cv2.GaussianBlur(Height_Analysis_Img_Gray, (3, 3),0)
            #
            #             Canny_edge = cv2.Canny(Height_Analysis_Img_blurred, lower, upper)
            #==============================================================================
            Height_Analysis_Img_Gray = color.rgb2gray(Height_Analysis_Img)

            sigma = 3

            Canny_edge = feature.canny(Height_Analysis_Img_Gray, sigma)

            lines = transform.probabilistic_hough_line(Canny_edge,
                                                       line_length=3,
                                                       line_gap=2)

            locations = []
            directions = []
            strengths = []

            for p0, p1 in lines:
                p0, p1 = np.array(p0), np.array(p1)
                locations.append((p0 + p1) / 2)
                directions.append(p1 - p0)
                strengths.append(np.linalg.norm(p1 - p0))

            #convert to numpy arrays and normalize
            locations = np.array(locations)
            directions = np.array(directions)
            strengths = np.array(strengths)

            directions = np.array(directions) / \
                np.linalg.norm(directions, axis=1)[:, np.newaxis]
            """
            Step 2 : Compute lines in homogenous system for edglets.
            Parameters
            ----------
            edgelets: tuple of ndarrays
                (locations, directions, strengths) as computed by `compute_edgelets`.
            Returns
            -------
            lines: ndarray of shape (n_edgelets, 3)
                Lines at each of edgelet locations in homogenous system.
            """

            cv2.imshow("canny edge", Canny_edge)
            cv2.waitKey(5000)

            cv2.destroyWindow()
Exemple #41
0
def compute(addr):
    loc = getgeocodfromadd(addr)
    image = getpic(loc)

    #image = io.imread('staticmap3.png')

    plt.imshow(image)

    img2 = image.copy()
    mask = image[:, :, 0] > 80
    img2[mask] = [0, 0, 0]

    mask = image[:, :, 2] > 80
    img2[mask] = [0, 0, 0]
    #plt.imshow(img2)

    img3 = rgb2gray(img2)
    plt.imshow(img3, 'gray')
    plt.hist(img3.ravel(), bins=256, histtype='step', color='black')
    elevation = sobel(img3)
    plt.imshow(elevation)
    img3_denoised = filters.median(img3, selem=np.ones((7, 7)))

    f, (ax0, ax1) = plt.subplots(1, 2, figsize=(15, 5))
    ax0.imshow(img3)
    ax1.imshow(img3_denoised)

    masks = np.zeros_like(img3_denoised)
    masks[img3_denoised > 0.2] = 1
    plt.imshow(masks)

    edges = feature.canny(img3_denoised, sigma=3)

    dt = distance_transform_edt(~edges)
    plt.imshow(dt)

    local_max = feature.peak_local_max(dt, indices=False, min_distance=5)
    plt.imshow(local_max, cmap='gray')

    peak_idx = feature.peak_local_max(dt, indices=False, min_distance=5)
    peak_idx[:5]

    plt.plot(peak_idx[:, 1], peak_idx[:, 0], 'r.')
    plt.imshow(dt)

    markers = measure.label(local_max)
    labels = morphology.watershed(-dt, markers)
    plt.imshow(segmentation.mark_boundaries(image, labels))
    plt.imshow(color.label2rgb(labels, image))
    plt.imshow(color.label2rgb(labels, image, kind='avg'))
    regions = measure.regionprops(labels, intensity_image=img3)
    region_means = [r.mean_intensity for r in regions]
    plt.hist(region_means, bins=20)

    model = KMeans(n_clusters=2)
    region_means = np.array(region_means).reshape(-1, 1)

    model.fit(region_means)
    print(model.cluster_centers_)

    bg_fg_labels = model.predict(region_means)
    #bg_fg_labels

    classified_labels = labels.copy()
    for bg_fg, region in zip(bg_fg_labels, regions):
        classified_labels[tuple(region.coords.T)] = bg_fg

    plt.imshow(color.label2rgb(classified_labels, image))

    fr_cvg = for_cvr(masks)
    print(fr_cvg)
Exemple #42
0
def edge_based_segmentation(image):
    im = cv2.cvtColor(image[0], cv2.COLOR_BGR2GRAY)
    edges = canny(im / 255.)
    fill = ndi.binary_fill_holes(edges)
    plt.imshow(fill.astype('float'))
for im_idx in np.r_[0:num_imgs]:
    print("Starting img %u/%u" % (im_idx, num_imgs))
    filename = img_paths[im_idx][-12:]
    img = pyvips.Image.new_from_file(img_paths[im_idx], access="sequential")
    this_img = np.ndarray(
        buffer=img.write_to_memory(),
        dtype=format_to_dtype[img.format],
        shape=[img.height, img.width, img.bands],
    )

    # this_img = img_imports[im_idx]
    im_greyscale = ski.color.rgb2gray(this_img)

    # Detect the edges of the lunar disk
    edges = feature.canny(im_greyscale, sigma=sm_sigma)
    edge_pts = np.nonzero(edges)
    my = int(np.mean(edge_pts[0])) + np.r_[-winsize, winsize]
    mx = int(np.mean(edge_pts[1])) + np.r_[-winsize, winsize]
    print("Edges detected")
    # Find the centre by minimizing the variance of the radius function
    x0 = np.mean(edge_pts, axis=1)
    opt_res = scp.optimize.minimize(
        lambda x: cost_fn(x, edge_pts),
        x0,
        method="nelder-mead",
        options={
            "xtol": 1e-8,
            "disp": opt_verbose
        },
    )
Exemple #44
0
import skimage
from skimage import feature

# Read in CT scan from
# https://www.yxlon.com/Yxlon/media/Content/Applications/Aerospace/Turbine%20blades/CT-feanbeame_Aerospace_Turbine_blade_001_144.jpg?ext=.jpg
filename = 'istockphoto-683494078-612x612.jpg'
img = skimage.io.imread(filename)

# Convert to greyscale
img_grey = skimage.color.rgb2grey(img)

# Compute contours
contours = skimage.measure.find_contours(img_grey, 0.8)

# Compute Canny filter
edges1 = feature.canny(img_grey, sigma=1)
edges2 = feature.canny(img_grey, sigma=3)

# Compute Edge Operators
edge_roberts = skimage.filters.roberts(img_grey)
edge_sobel = skimage.filters.sobel(img_grey)

# Show original
fig, ax = plt.subplots(nrows=3, ncols=2)
ax[0, 0].imshow(img)
ax[0, 0].set_title('Original')

# Show contour
ax[0, 1].imshow(img)
ax[0, 1].set_title('With Contours')
for n, contour in enumerate(contours):
Exemple #45
0
import matplotlib.pyplot as plt
import numpy as np
from skimage.feature import canny
from scipy import misc
from scipy.ndimage import label
from scipy.ndimage import center_of_mass
from scipy.spatial import distance

from skimage.feature import corner_fast, corner_peaks, corner_orientations

img1 = misc.imread("155c012t4.tif", mode='L')
labels1, num_features1 = label(img1)
location1 = (center_of_mass(img1, labels1, 1))
print("Object {} center of mass at {}".format(1, location1))
center_1_x, center_1_y = location1[1], location1[0]
edges = canny(img1, sigma=8.8)

corner_response = corner_fast(edges, threshold=0.5)
corner_pos = corner_peaks(corner_response)

fig, axes = plt.subplots(ncols=2, figsize=(8, 4.5))
ax = axes.ravel()
ax[0] = plt.subplot(1, 2, 1)
ax[1] = plt.subplot(1, 2, 2)

ax[0].imshow(img1)
ax[0].plot(center_1_x, center_1_y, 'r^', markersize=20)
radius_array = np.zeros(len(corner_pos))

for k in range(0, len(corner_pos)):
    y, x = corner_pos[k]
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage as ndi

from skimage import feature

# Generate noisy image of a square
im = np.zeros((128, 128))
im[32:-32, 32:-32] = 1

im = ndi.rotate(im, 15, mode='constant')
im = ndi.gaussian_filter(im, 4)
im += 0.2 * np.random.random(im.shape)

# Compute the Canny filter for two values of sigma
edges1 = feature.canny(im)
edges2 = feature.canny(im, sigma=3)

# display results
fig, (ax1, ax2, ax3) = plt.subplots(nrows=1,
                                    ncols=3,
                                    figsize=(8, 3),
                                    sharex=True,
                                    sharey=True)

ax1.imshow(im, cmap=plt.cm.jet)
ax1.axis('off')
ax1.set_title('noisy image', fontsize=20)

ax2.imshow(edges1, cmap=plt.cm.gray)
ax2.axis('off')
# Hough transform

from skimage import data, feature
from skimage.transform import hough_circle
from skimage.draw import circle_perimeter

image = data.coins()[0:95, 180:370]
edges = feature.canny(image, sigma=3, low_threshold=10, high_threshold=60)

hough_radii = np.arange(15, 30, 2)
hough_response = hough_circle(edges, hough_radii)

centers = []
likelihood = []
radii = []
# Your code here

for idx, resp in enumerate(hough_response):
    # Find the local peaks:
    peaks = feature.peak_local_max(resp)
    centers.extend(peaks)
    # The likelihood for each of these peaks is given by the Hough response:
    likelihood.extend(resp[peaks[:, 0], peaks[:, 1]])
    # Radius is given by the array index, but we need to save the radius for
    # every peak we added to the list above. So we create an array of equal
    # length (`peaks.shape[0]`)
    radii.extend(np.ones(peaks.shape[0]) * hough_radii[idx])

# Make a copy of the image so we can draw on it without messing up the original
im = image.copy()
Exemple #48
0
from skimage.transform import hough_circle, hough_circle_peaks
from skimage.feature import canny
from skimage.draw import circle_perimeter
from skimage.util import img_as_ubyte

# Load picture and detect edges
image = imageio.imread("results_L3.jpg")

#convert to grayscale
image = color.rgb2gray(image)

#sx = ndimage.sobel(image, axis=0, mode='constant')
#sy = ndimage.sobel(image, axis=1, mode='constant')
#edges = np.hypot(sx, sy)

edges = canny(image, sigma=1.7)  # this work better
plt.imshow(edges)

# Detect two radii
hough_radii = np.arange(70, 120, 2)
hough_res = hough_circle(edges, hough_radii)

# Select the most prominent 10 circles
accums, cx, cy, radii = hough_circle_peaks(hough_res,
                                           hough_radii,
                                           total_num_peaks=10)

# Draw them
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(25, 20))
image = color.gray2rgb(image)
for center_y, center_x, radius in zip(cy, cx, radii):
Exemple #49
0
    def determine_skew(self, img_file):

        img = io.imread(img_file, as_grey=True)
        edges = canny(img, sigma=self.sigma)
        h, a, d = hough_line(edges)
        _, ap, _ = hough_line_peaks(h, a, d, num_peaks=self.num_peaks)

        if len(ap) == 0:
            return {"Image File": img_file, "Message": "Bad Quality"}

        absolute_deviations = [self.calculate_deviation(k) for k in ap]
        average_deviation = np.mean(np.rad2deg(absolute_deviations))
        ap_deg = [np.rad2deg(x) for x in ap]

        bin_0_45 = []
        bin_45_90 = []
        bin_0_45n = []
        bin_45_90n = []

        for ang in ap_deg:

            deviation_sum = int(90 - ang + average_deviation)
            if self.compare_sum(deviation_sum):
                bin_45_90.append(ang)
                continue

            deviation_sum = int(ang + average_deviation)
            if self.compare_sum(deviation_sum):
                bin_0_45.append(ang)
                continue

            deviation_sum = int(-ang + average_deviation)
            if self.compare_sum(deviation_sum):
                bin_0_45n.append(ang)
                continue

            deviation_sum = int(90 + ang + average_deviation)
            if self.compare_sum(deviation_sum):
                bin_45_90n.append(ang)

        angles = [bin_0_45, bin_45_90, bin_0_45n, bin_45_90n]
        lmax = 0

        for j in range(len(angles)):
            l = len(angles[j])
            if l > lmax:
                lmax = l
                maxi = j

        if lmax:
            ans_arr = self.get_max_freq_elem(angles[maxi])
            ans_res = np.mean(ans_arr)

        else:
            ans_arr = self.get_max_freq_elem(ap_deg)
            ans_res = np.mean(ans_arr)

        data = {
            "Image File": img_file,
            "Average Deviation from pi/4": average_deviation,
            "Estimated Angle": ans_res,
            "Angle bins": angles
        }

        if self.display_output:
            self.display(data)

        if self.plot_hough:
            self.display_hough(h, a, d)
        return data
Exemple #50
0
def _process_image(image_in: Image) -> Image:
    grayscale_data = np.array(image_in.convert('L'))
    edges = feature.canny(grayscale_data, sigma=3)
    image_out = Image.fromarray(edges)
    image_out.format = image_in.format
    return image_out
Exemple #51
0
def DetectImplant(array, pixel, area):
    image=array.copy()
    original=image.copy()
    """Takes a Grey-Scale image (e.g dicom pixel array) and calculates the percentage
    of pixels that are above an intensity threshold within the most circular object in the image.

    Returns True if more than x percentage of the image is greater than y percentage of maximum intensity"""

    
    #percentile for pixels to be in to be considered "implant bright"
    pixel_threshold=pixel

    #percentage of the image that needs to be "implant bright" to trigger implant detection
    area_threshold=area
    
    global detected_implant_count, dicom_implant_count

    rows, cols = image.shape
    #Cuts image in half (taking left or right depending on image
    try:
        if re.match(r"L.*MLO",ds.SeriesDescription) or re.match(r"L.*CC", ds.SeriesDescription):
            image=image[:,:int(cols/2)]    
        elif re.match(r"R.*MLO",ds.SeriesDescription) or re.match(r"R.*CC", ds.SeriesDescription):
            image=image[:,int(cols/2):]
        elif "L" in ds.PatientOrientation[1]:
            image=image[:,:int(cols/2)]
        elif "R" in ds.PatientOrientation[1]:
            image=image[:,int(cols/2):]
    except:
        pass
    
    
    cropped=image.copy()
    #resets dimensions for the new, cropped, image
    rows, cols = image.shape
    #Calculates true area of the image, based on pixels that are not zero
    AOI=np.count_nonzero(image)
    #resets image if AOI is too low (e.g DiCom tag mis entered for Left/Right
    if AOI<75000:
        image=original.copy()
        cropped=original.copy()
        rows, cols = image.shape
        AOI=np.count_nonzero(image)
    #calculates minimum pixel intensity required for a pixel to be "implant bright"
    pixels_intensity=round(image.max()*pixel_threshold)

    try:
        #'pre-mask' to filter out tisue/breast outline etc... (as breast outline is
        #also circular it causes inconsitency in the RANSAC
        mask = image<round(image.max()*0.80)
        image[mask]=0
        #creates a circular mask around the most circular object, helps to limit intensity search to implant only
        #and also for magviews the circle is the middle of the magview, so all the white metal
        #around the circle gets blacked out (jsut doing intensity some magviews were implant positive)
        edges = feature.canny(image, sigma=10, low_threshold=10, high_threshold=500)
        points = np.array(np.nonzero(edges)).T
        model_robust, inliers = ransac(points, CircleModel, min_samples=5,
                                       residual_threshold=1, max_trials=100)
        cy, cx, r = model_robust.params
        y,x=np.ogrid[-cy:rows-cy, -cx:cols-cx]
        mask = x*x + y*y <= r*r
        #as mask is the circle, ~mask inverts it
        #Sometimes mask is 1-2 pixels larger than image (not sure why)
        #[:rows, :cols] only applies mask where there is image
        image[~mask[:rows,:cols]]=0
    except Exception as e:
        pass
    #zeros out all pixels below the threshold
    mask = image < pixels_intensity
    image[mask]=0


    #gets area of pixels that are above threshold
    IMP_area=np.count_nonzero(image)

    try:
        percent=IMP_area/AOI
    except Exception as e:
        print(e)
        pdb.set_trace()
    if show==True:
        f, (ax0, ax1, ax2, ax3) = plt.subplots(1, 4, figsize=(15, 8))
        ax0.imshow(original, cmap=plt.cm.bone)
        ax0.set_title("ORIGINAL")

        ax1.imshow(cropped>(round(image.max()*0.90)), cmap=plt.cm.bone)
        ax1.set_title("FIRST THRESH")
        
        ax2.imshow(cropped, cmap=plt.cm.bone)
        circle = plt.Circle((cx, cy), radius=r, facecolor='r', linewidth=2)
        ax2.add_patch(circle)
        ax2.set_title("CIRCLE MASK")

        ax3.imshow(image, cmap=plt.cm.bone)
        ax3.set_title("PIXELS COUTNING FOR 'IMPLANT'")

        plt.show()
    if percent>area_threshold:
        return True
Exemple #52
0
     sample_saving_path,
     file_name + image_name + channel_color + file_suffix)
 mm = np.load(file_reading)
 mmm = np.copy(mm)
 total_pixel_y, total_pixel_x = mmm.shape
 ### reading mmm_array ---
 ###
 ### filtering_noise_particle ---
 #from skimage import filters
 #mmm = filters.median(mmm)
 #mmm = (mmm - mmm.min()) / (mmm.max() - mmm.min())
 ### filtering_noise_particle +++
 ###
 from skimage import feature
 #bool_mmm = feature.canny(mmm, sigma = 2.5, low_threshold = 0.1, high_threshold = 0.94, use_quantiles = True)
 bool_mmm = feature.canny(mmm)
 ### boundary_masking +++
 bool_mmm[:, :2] = False
 bool_mmm[:, -2:] = False
 bool_mmm[:2, :] = False
 bool_mmm[-2:, :] = False
 ### boundary_masking ---
 ###
 new_mmm = np.array(bool_mmm, dtype=np.int64)
 file_suffix = '.png'
 saving_data_file = file_name + section_name_in
 mpl.image.imsave(
     os.path.join(sample_saving_path, saving_data_file + file_suffix),
     new_mmm)
 ###
 file_suffix = '.npy'
def main():
    imgLoc = sys.argv[1]
    imgDir, exactImageLoc = os.path.split(imgLoc)
    img = io.imread(imgLoc)
    imgray = color.rgb2gray(img)
    edge = canny(imgray)
    #show_img(rescale(edge,0.5))
    print edge.shape
    #show_img(edge)
    print "edge found"
    s = [[1, 1, 1], [1, 1, 1], [1, 1, 1]]
    labeled_array, num_features = label(edge, structure=s)
    print "labeling done"
    temp = []
    print "started with", num_features, "connected components"
    for i in range(1, num_features + 1):
        label_i_indices = [(labeled_array == i).nonzero()]
        if getBoundingBoxfromPixels(label_i_indices,
                                    (img.shape[1], img.shape[0])):
            temp.append(
                getBoundingBoxfromPixels(label_i_indices,
                                         (img.shape[1], img.shape[0])))

    print "started with", len(temp), "bounding boxes"
    imgarea = (img.shape[0] * img.shape[1])
    print imgarea
    interimBoundingboxes = []
    for item in temp:
        if not (item[2] - item[0]) * (item[3] - item[1]) > 0.01 * imgarea:
            if (item[2] - item[0]) > 5 or (item[3] - item[1]) > 5:
                interimBoundingboxes.append(item)

    interimBoundingboxesDict = {}
    for item in interimBoundingboxes:
        interimBoundingboxesDict[','.join([str(i) for i in item])] = True

    for item1 in interimBoundingboxes:
        toberemoved = []
        for item2 in interimBoundingboxes:
            if (item1 != item2):
                result = boundingBoxiConsumedbyj(item1, item2)
                if result:
                    toberemoved.append(item1)
        for item in toberemoved:
            interimBoundingboxesDict[','.join([str(i) for i in item])] = False

    finalBoundingboxes = []
    for key in interimBoundingboxesDict.keys():
        if interimBoundingboxesDict[key]:
            item = [int(x) for x in key.split(',')]
            finalBoundingboxes.append(item)

    #starting to merge
    finalBoundingboxesFiltered = finalBoundingboxes
    print "starting to merge"
    edges = sp.zeros(edge.shape)
    labeldict = {}
    colornumber = [0]
    for i in range(0, len(finalBoundingboxesFiltered)):
        item1 = finalBoundingboxesFiltered[i]
        stritem1 = ','.join([str(x) for x in item1])
        currentcolors = [labeldict[item] for item in labeldict.keys()]

        if currentcolors:
            for color1 in currentcolors:
                colornumber.append(color1)

        currentcolornumber = max(colornumber) + 1
        if not stritem1 in labeldict:
            labeldict[stritem1] = currentcolornumber
        else:
            currentcolornumber = labeldict[stritem1]
        edges[item1[0]:item1[2], item1[1]:item1[3]] = currentcolornumber
        for j in range(i + 1, len(finalBoundingboxesFiltered)):
            item2 = finalBoundingboxesFiltered[j]
            stritem2 = ','.join([str(x) for x in item2])
            if rectmanDist(item1, item2) < 1:
                #if rectmanDistHorizontal(item1,item2)<10:
                #print item1,"merged with",item2,"with",currentcolornumber
                if stritem2 in labeldict:
                    currentcolornumber = labeldict[stritem2]
                    edges[item1[0]:item1[2],
                          item1[1]:item1[3]] = currentcolornumber
                    labeldict[stritem1] = currentcolornumber
                else:
                    labeldict[stritem2] = currentcolornumber
                edges[item2[0]:item2[2],
                      item2[1]:item2[3]] = currentcolornumber

    labeled_array = edges.copy()
    num_features = max(colornumber) + 1

    temp = []
    for i in range(1, num_features + 1):
        label_i_indices = [(labeled_array == i).nonzero()]
        #print i,len(label_i_indices[0][0])
        if len(label_i_indices[0][0]) > 0:
            temp.append(
                getBoundingBoxfromPixelsLarge(label_i_indices,
                                              (img.shape[1], img.shape[0])))

    print "after merging boundingboxes", len(temp)

    interimBoundingboxes = list(temp)
    interimBoundingboxesDict = {}
    for item in interimBoundingboxes:
        interimBoundingboxesDict[','.join([str(i) for i in item])] = True

    for item1 in temp:
        toberemoved = []
        for item2 in temp:
            if (item1 != item2):
                result = boundingBoxiConsumedbyj(item1, item2)
                if result:
                    toberemoved.append(item1)
        for item in toberemoved:
            interimBoundingboxesDict[','.join([str(i) for i in item])] = False

    finalBoundingboxes = []
    for key in interimBoundingboxesDict.keys():
        if interimBoundingboxesDict[key]:
            item = [int(x) for x in key.split(',')]
            finalBoundingboxes.append(item)

    #finalBoundingboxes=interimBoundingboxes
    pilImg = Image.fromarray(img)
    draw = ImageDraw.Draw(pilImg)
    font = ImageFont.truetype("Berylium.ttf", 16)
    for item in finalBoundingboxes:
        draw.rectangle([item[1] - 2, item[0] - 2, item[3] + 2, item[2] + 2],
                       outline="red")
        cutimg = Image.fromarray(img[item[0] - 2:item[2] + 2,
                                     item[1] - 2:item[3] + 2])
        #cutimg=pilImg.crop((item[1],item[0],item[3],item[2]))
        #cutimg.show()
        text = pytesseract.image_to_string(cutimg)
        print item, text
        #draw.text((item[1], item[0]),text,(0,0,255),font=font)
        #print item,((item[2]-item[0])*(item[3]-item[1]))/imgarea
    pilImg.show()

    #pilImg.save(os.path.join(imgDir,"textextraction/ccsaurabhonepercentmerged",exactImageLoc[:-4]+"-ccsaurabh.png"))
    '''
Exemple #54
0
def detectionAlgorithm(BL_Image, FU_Image):
    """
    this function detects the changes in the base line and follow up images
    :param BL_Image: base line image
    :param FU_Image: follow up image
    :return:
    """
    fu_im = io.imread(FU_Image, as_gray=True)[:BOTTOM_CAPTION, :]
    registered_bl = firstRegistrationAlgorithm(FU_Image,
                                               BL_Image)  # used for case3
    # registered_bl = secondRegistrationAlgorithm(FU_filename, BL_filename)  # used for case4

    # Normalize both images by columns:
    normalized_bl = registered_bl / np.amax(registered_bl)
    normalized_fu = fu_im / np.amax(fu_im)

    # Subtract FU from BL after registration:
    new_im = normalized_bl - normalized_fu
    above_zero = new_im > 0
    new_im[above_zero] = 0
    new_im = np.abs(new_im)

    fig = plt.figure()
    fig.add_subplot(221)
    plt.imshow(new_im, cmap='gray')
    plt.title("After stages 1-3")

    # remove some of the noise:
    th = 0.2
    above_th = new_im > th
    underneath_th = new_im <= th
    new_im[above_th] = 1
    new_im[underneath_th] = 0

    fig.add_subplot(222)
    plt.imshow(new_im, cmap='gray')
    plt.title("After stage 4")

    # clean noise touching the boundary:
    new_im = segmentation.clear_border(new_im)

    # remove small object:
    new_im = findingBiggestComponent(new_im)

    fig.add_subplot(223)
    plt.imshow(new_im, cmap='gray')
    plt.title("After stage 5-6")

    # Remove Blood Vessels:
    fu_seg = SegmentBloodVessel(fu_im)
    bl_seg = SegmentBloodVessel(registered_bl)
    new_im = np.bitwise_or(fu_seg, bl_seg).astype(int) - new_im
    new_im[new_im < 1] = 0

    fig.add_subplot(224)
    plt.imshow(new_im, cmap='gray')
    plt.title("After stage 7")
    plt.show()

    # Morphologically close:
    new_im = morphology.dilation(new_im)

    # Draw the borders/polygons:
    new_im = canny(new_im)
    new_im = morphology.dilation(new_im)

    fu_im[np.nonzero(new_im)] = 0
    registered_bl[np.nonzero(new_im)] = 0
    fig = plt.figure()
    fig.add_subplot(121)
    plt.imshow(fu_im, cmap='gray')
    plt.title("final output Follow-Up scan")

    fig.add_subplot(122)
    plt.imshow(registered_bl, cmap='gray')
    plt.title("final output Base-Line scan")
    plt.show()
Exemple #55
0
def getCoordMask(mask):
    edge = feature.canny(np.copy(mask), sigma=6)
    pixelpoints = np.transpose(np.nonzero(np.copy(edge)))
    X = pixelpoints[:, 0]
    Y = pixelpoints[:, 1]
    return pixelpoints
    block_size = 15
    #image = threshold_local(image_unthresholded, block_size, offset=10)
    #image_647 = threshold_local(image_647_unthresholded, block_size, offset=10)

    radius = 5
    selem = disk(radius)

    #thresholding both files (getting rid of this because it should not be necessary!)
    #image = rank.otsu(image_unthresholded, selem)
    #image_647 = rank.otsu(image_647_unthresholded, selem)

    image = image_unthresholded
    image_647 = image_647_unthresholded

    #perfoming edge detection and morphological filling
    edges_open = canny(image, 3, 1, 25)  #originally 2,1,25
    #edges_open = canny(image, 2) #originally 2,1,25
    selem = disk(5)
    edges = closing(edges_open, selem)
    fill_tubes = ndi.binary_fill_holes(edges)
    io.imsave(cy3_file + "fill_tubes.png",
              img_as_uint(fill_tubes),
              cmap=cm.gray)
    cy3_endpoint_mask = make_endpoints_mask(fill_tubes)

    edges_open_647 = canny(image_647, 2, 1, 25)
    selem = disk(2)
    edges_647 = closing(edges_open_647, selem)
    fill_tubes_647 = ndi.binary_fill_holes(edges_647)
    io.imsave(atto647_file + "fill_tubes.png",
              img_as_uint(fill_tubes_647),
Exemple #57
0
    curr_filtered_coord, curr_filtered_coord_x, curr_filtered_coord_y, curr_filtered_2d,curr_filtered_1d= curr_thresh_filter(X,Y,Z,curr_thresh_factor)

    #plot filtered current
    # fig = plt.figure()
    # plt.contourf(X, Y, curr_filtered_2d, 30, cmap=cm.coolwarm)
    # plt.show()

    #apply filter to smoothen the image. makes edge detection better
    curr_filtered_2d=ndimage.median_filter(curr_filtered_2d, size=10)
    # plot smoothened data
    # fig = plt.figure()
    # plt.contourf(X, Y, curr_filtered_2d, 30, cmap=cm.coolwarm)
    # plt.show()

    Current_edge= feature.canny(1000.0*curr_filtered_2d)
    Curr_edge_shape= Current_edge.shape
    #plot image edges
    # fig = plt.figure()
    # plt.contourf(X, Y, Current_edge, 30, cmap=cm.coolwarm)
    # ax = fig.add_subplot(111, projection='3d')
    # ax.plot_surface(VplgR, VplgL, Current_edge, cmap=cm.coolwarm)
    # plt.show()

    # put the points on edges into curr_filtered_coord_x and _y
    curr_filtered_coord_x=[]
    curr_filtered_coord_y=[]
    for r in range(0,Curr_edge_shape[0]):
        for s in range(0, Curr_edge_shape[1]):
            if Current_edge[r][s]==True:
                curr_filtered_coord_x+=[X[r][s]]
Exemple #58
0
ax1.set_ylabel('Distance (pixels)')
ax1.axis('image')

ax2.imshow(image, cmap=cm.gray)
row1, col1 = image.shape
for _, angle, dist in zip(*hough_line_peaks(h, theta, d)):
    y0 = (dist - 0 * np.cos(angle)) / np.sin(angle)
    y1 = (dist - col1 * np.cos(angle)) / np.sin(angle)
    ax2.plot((0, col1), (y0, y1), '-r')
ax2.axis((0, col1, row1, 0))
ax2.set_title('Detected lines')
ax2.set_axis_off()

# Line finding using the Probabilistic Hough Transform.
image = data.camera()
edges = canny(image, 2, 1, 25)
lines = probabilistic_hough_line(edges,
                                 threshold=10,
                                 line_length=5,
                                 line_gap=3)

# Generating figure 2.
fig, (ax0, ax1, ax2) = plt.subplots(1,
                                    3,
                                    figsize=(16, 6),
                                    sharex=True,
                                    sharey=True)
plt.tight_layout()

ax0.imshow(image, cmap=cm.gray)
ax0.set_title('Input image')
Exemple #59
0
def extract_roi_mask(image: np.ndarray,
                     min_hull_ratio: float = 0.3) -> Tuple[np.ndarray, float]:
    """
    Extract region of interest (ROI) for the given image.

    Parameters
    ----------
    image : np.ndarray
        Input document image covering the entire ROI.
    min_hull_ratio : float, optional
        Minimum ratio All/ROI for counting as "success", by default 0.3.

    Returns
    -------
    mask_fullsize : np.ndarray
        Binary image respresenting the ROI. White pixels (1) = ROI.
    mask_ratio : float
        Pixel ratio (widht * height) / ROI.

    Raises
    ------
    Exception
        If the minimum desired ratio could not be achieved, an error is raised.
    """
    # Scale image to fixed size
    size = 1000
    width, height, _ = image.shape
    image_resized = cv2.resize(image, (size, size))

    # Search for edges with canny filter on each channel of HSV image
    image_hsv = cv2.cvtColor(image_resized, cv2.COLOR_RGB2HSV)
    image_canny = np.empty_like(image_hsv)
    for channel in range(3):
        # Apply adaptive histogram equalization
        channel_eq = equalize_adapthist(image_hsv[:, :, channel],
                                        kernel_size=200)
        # Apply canny filter
        channel_canny = canny(channel_eq, sigma=3)
        image_canny[:, :, channel] = channel_canny

    # Perform segmentation using the Felzenszwalb-Huttenlocher algorithm
    # and sort segments by size in descending order
    image_segmented = felzenszwalb(image_canny,
                                   scale=1000,
                                   sigma=0.3,
                                   min_size=50)

    segment_sizes = np.bincount(image_segmented.flatten())
    segments = np.argsort(-segment_sizes)

    # Iterate over segments, starting from largest
    for s in segments:
        # Get segment and fill all holes
        segment = image_segmented == s
        hull = ndimage.binary_fill_holes(segment)

        # Exit if hull_ratio is sufficient
        hull_ratio = np.sum(hull) / (size**2)
        if hull_ratio >= min_hull_ratio:
            break

    # Raise error if hull_ratio criterion could not be met
    if hull_ratio < min_hull_ratio:
        raise Exception('ROI could not be computed')

    ### Postprocessing

    # Removes areas that are only connected by few pixels to the hull
    hull_opened = cv2.morphologyEx(hull.astype(np.uint8),
                                   cv2.MORPH_OPEN,
                                   kernel=disk(20))

    # Take center blob
    blobs_segmented = measure.label(hull_opened)
    center_blob_label = blobs_segmented[size // 2, size // 2]
    mask = blobs_segmented == center_blob_label

    # Resize mask back to original image size
    mask_fullsize = cv2.resize(mask.astype(np.uint8), (height, width))
    mask_ratio = np.sum(mask) / (size**2)

    return mask_fullsize, mask_ratio
Exemple #60
0
 def canny(self, image):
     return feature.canny(image, sigma=self.sigma_spinbox.value())