def test_apply_parallel_lazy(): import dask.array as da # data a = np.arange(144).reshape(12, 12).astype(float) d = da.from_array(a, chunks=(6, 6)) # apply the filter expected1 = threshold_local(a, 3) result1 = apply_parallel(threshold_local, a, chunks=(6, 6), depth=5, extra_arguments=(3,), extra_keywords={'mode': 'reflect'}, compute=False) # apply the filter on a Dask Array result2 = apply_parallel(threshold_local, d, depth=5, extra_arguments=(3,), extra_keywords={'mode': 'reflect'}, compute=False) assert isinstance(result1, da.Array) assert_array_almost_equal(result1.compute(), expected1) assert isinstance(result2, da.Array) assert_array_almost_equal(result2.compute(), expected1)
def test_apply_parallel(): import dask.array as da # data a = np.arange(144).reshape(12, 12).astype(float) # apply the filter expected1 = threshold_local(a, 3) result1 = apply_parallel(threshold_local, a, chunks=(6, 6), depth=5, extra_arguments=(3,), extra_keywords={'mode': 'reflect'}) assert_array_almost_equal(result1, expected1) def wrapped_gauss(arr): return gaussian(arr, 1, mode='reflect') expected2 = gaussian(a, 1, mode='reflect') result2 = apply_parallel(wrapped_gauss, a, chunks=(6, 6), depth=5) assert_array_almost_equal(result2, expected2) expected3 = gaussian(a, 1, mode='reflect') result3 = apply_parallel( wrapped_gauss, da.from_array(a, chunks=(6, 6)), depth=5, compute=True ) assert isinstance(result3, np.ndarray) assert_array_almost_equal(result3, expected3)
def preview(self): if g.win is None or g.win.closed: return win = g.win value = self.getValue('value') block_size = self.getValue('block_size') preview = self.getValue('preview') darkBackground = self.getValue('darkBackground') nDim = len(win.image.shape) if nDim > 3: g.alert("You cannot run this function on an image of dimension greater than 3. If your window has color, convert to a grayscale image before running this function") return None if preview: if nDim == 3: # if the image is 3d testimage=np.copy(win.image[win.currentIndex]) elif nDim == 2: testimage=np.copy(win.image) testimage = threshold_local(testimage, block_size, offset=value) if darkBackground: testimage = np.logical_not(testimage) testimage = testimage.astype(np.uint8) win.imageview.setImage(testimage, autoLevels=False) win.imageview.setLevels(-.1, 1.1) else: win.reset() if nDim == 3: image = win.image[win.currentIndex] else: image = win.image win.imageview.setLevels(np.min(image), np.max(image))
def scanner(image): # Edge Detection # load the image and compute the ratio of old height to the new height, clone it and resize it image = cv2.imread(image) ratio = image.shape[0] / 500.0 orig = image.copy() image = imutils.resize(image, height=500) # convert the image to grayscale, blur it, and find images edges in image gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) grayscale = cv2.GaussianBlur(gray, (5, 5), 0) edged = cv2.Canny(gray, 75, 200) #cv2.imshow("Image", image) #cv2.imshow("Edged", edged) #cv2.waitKey(0) #cv2.destroyAllWindows() # Finding Contours # Assume that the longest contour in the image with exactly four points is the piece of the paper # to be scanned cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) cnts = imutils.grab_contours(cnts) # This will filter out all the contours with the largest area cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5] for c in cnts: #approximating the contour peri = cv2.arcLength(c, True) approx = cv2.approxPolyDP(c, 0.02 * peri, True) # finding the rectangle if the contour has 4 points if len(approx) == 4: screenCnt = approx break cv2.drawContours(image, [screenCnt], -1, (0,255,0), 2) #cv2.imshow("Outline", image) #cv2.waitKey(0) #cv2.destroyAllWindows() #orig: original image #screenCnt: contour representing the document multiplied by ratio for resizing the original image warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio) #this is for black and white feels warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY) T = threshold_local(warped, 11, offset=10, method="gaussian") warped = (warped > T).astype("uint8") * 255 #cv2.imshow("Original", imutils.resize(orig, height = 650)) #cv2.imshow("Scanned", imutils.resize(warped, height = 650)) #cv2.waitKey(0) return warped
async def imgscan(event): ok = await event.get_reply_message() if not (ok and (ok.media)): await eor(event, "`Reply The pdf u Want to Download..`") return ultt = await ok.download_media() if not ultt.endswith(("png", "jpg", "jpeg", "webp")): await eor(event, "`Reply to a Image only...`") os.remove(ultt) return xx = await eor(event, "`Processing...`") image = cv2.imread(ultt) original_image = image.copy() ratio = image.shape[0] / 500.0 image = imutils.resize(image, height=500) image_yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YUV) image_y = np.zeros(image_yuv.shape[0:2], np.uint8) image_y[:, :] = image_yuv[:, :, 0] image_blurred = cv2.GaussianBlur(image_y, (3, 3), 0) edges = cv2.Canny(image_blurred, 50, 200, apertureSize=3) contours, hierarchy = cv2.findContours( edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE, ) polygons = [] for cnt in contours: hull = cv2.convexHull(cnt) polygons.append( cv2.approxPolyDP(hull, 0.01 * cv2.arcLength(hull, True), False)) sortedPoly = sorted(polygons, key=cv2.contourArea, reverse=True) cv2.drawContours(image, sortedPoly[0], -1, (0, 0, 255), 5) simplified_cnt = sortedPoly[0] if len(simplified_cnt) == 4: cropped_image = four_point_transform( original_image, simplified_cnt.reshape(4, 2) * ratio, ) gray_image = cv2.cvtColor(cropped_image, cv2.COLOR_BGR2GRAY) T = threshold_local(gray_image, 11, offset=10, method="gaussian") ok = (gray_image > T).astype("uint8") * 255 if len(simplified_cnt) != 4: ok = cv2.detailEnhance(original_image, sigma_s=10, sigma_r=0.15) cv2.imwrite("o.png", ok) image1 = PIL.Image.open("o.png") im1 = image1.convert("RGB") scann = f"Scanned {ultt.split('.')[0]}.pdf" im1.save(scann) await event.client.send_file(event.chat_id, scann, reply_to=event.reply_to_msg_id) await xx.delete() os.remove(ultt) os.remove("o.png") os.remove(scann)
def local(fname): #read in image as bw adata = sm.imread(fname, flatten=True) #apply sobel edge detection block_size = 35 val = filters.threshold_local(adata, block_size, offset=10) bdata = adata > val edg = bdata + 0.0 #apply binary fill holes #shape = nd.binary_fill_holes(edg) + 0.0 return edg
def local_thresholding(x_img, ext:int=200): gray = np.mean(x_img, axis=2) x_threshold = filters.threshold_local(gray, block_size=ext*2+1) # Debugging: if 1: from plotting import concurrent concurrent([gray, x_threshold]) return np.greater_equal(gray, x_threshold)
def get_component_props(img_stack, output_file=None, verbose=False): z_dim = img_stack.shape[0] y_dim = img_stack.shape[1] x_dim = img_stack.shape[2] z_props = [] bin_stack = np.zeros(img_stack.shape) for k in range(0, z_dim): z_slice = img_stack[k, :, :] if (not np.any(z_slice)): # If all values in the slice are 0, then continue to the next slice continue # Initial thresholding: making all voxels with intensity less than half of otsu's 0. thresh_value = threshold_otsu(z_slice) / 2 initial_thresholding = np.zeros(z_slice.shape) for i in range(0, x_dim): for j in range(0, y_dim): initial_thresholding[ j, i] = 0 if z_slice[j, i] < thresh_value else z_slice[j, i] # Adaptive thresholding. After this step, images should be binary. block_size = 35 local_thresh = threshold_local(initial_thresholding, block_size) otsu_thresh_value = threshold_otsu(local_thresh) otsu_thresh = local_thresh > otsu_thresh_value # Morphological tranformations using a disk shaped kernal of radius 5 pixels erosion = binary_erosion(otsu_thresh, disk(5)) opening = binary_opening(erosion, disk(5)) bin_stack[k, :, :] = opening # Labelling connected components components, num_components = label(opening, return_num=True, connectivity=2) if (verbose): print('%d detected component(s) for z=%d ' % (num_components, z)) props = regionprops(components) z_props.append(props) # imsave('bin_stack.tif', bin_stack) image = sitk.GetImageFromArray(bin_stack) sitk.WriteImage(sitk.Cast(image, sitk.sitkUInt16), 'bin_stack.tif') if (output_file != None): with open(output_file + '.pkl', 'wb') as f: pickle.dump(z_props, f) return z_props
def adaptive_threshold(directory, image_name, block_size): np_array = misc.imread(directory, flatten=True) block_size = block_size plt.imshow(np_array) plt.show() adaptive_threshold = skif.threshold_local(np_array, block_size, method='mean', offset=0) binary_threshold = np_array > adaptive_threshold misc.imsave(image_name, binary_threshold)
def _deskew(self): image = self.image ratio = image.shape[0] / 500.0 orig = image.copy() image = imutils.resize(image, height=500) # convert the image to grayscale, blur it, and find edges # in the image gray = image gray = cv2.GaussianBlur(gray, (5, 5), 0) edged = cv2.Canny(gray, 75, 200) # find the contours in the edged image, keeping only the # largest ones, and initialize the screen contour cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) cnts = imutils.grab_contours(cnts) cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:10] # loop over the contours approx = [] screenCnt = None # print('=') for c in cnts: # print(c.shape, cv2.contourArea(c)) # approximate the contour peri = cv2.arcLength(c, True) approx.append(cv2.approxPolyDP(c, 0.02 * peri, True)) # if our approximated contour has four points, then we # can assume that we have found our screen # print(approx[-1]) if screenCnt is None and len(approx[-1]) == 4: screenCnt = approx[-1] # print(screenCnt) cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2) # apply the four point transform to obtain a top-down # view of the original image warped: np.array = four_point_transform( orig, screenCnt.reshape(4, 2) * ratio) # convert the warped image to grayscale, then threshold it # to give it that 'black and white' paper effect # warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY) T = threshold_local(warped, self.block_size, offset=self.offset, method="gaussian") warped = (warped > T).astype("uint8") * 255 return warped.astype("uint8")
def _apply_transformation(self, ctr, blackwhite=False): wrp = four_point_transform(self.original, ctr.reshape(4, 2) * self.ratio) # convert the warped image to grayscale, then threshold it # to give it that 'black and white' paper effect if blackwhite: wrp = cvtColor(wrp, COLOR_BGR2GRAY) t = threshold_local(wrp, 11, offset=10, method="gaussian") wrp = (wrp > t).astype("uint8") * 255 return wrp
def rotate_image(self): warped = four_point_transform(self.orig, self.get_contours().reshape(4, 2)) warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY) T = threshold_local(warped, 11, offset=10, method='gaussian') warped = (warped > T).astype('uint8') * 255 cv2.imshow('warped', imutils.resize(warped, height=1000)) cv2.waitKey(0)
def __call__(self, value, block_size, darkBackground=False, keepSourceWindow=False): self.start(keepSourceWindow) if self.tif.dtype == np.float16: g.alert("Local Threshold does not support float16 type arrays") return newtif = np.copy(self.tif) if self.oldwindow.nDims == 2: newtif = threshold_local(newtif, block_size, offset=value) elif self.oldwindow.nDims == 3: for i in np.arange(len(newtif)): newtif[i] = threshold_local(newtif[i], block_size, offset=value) else: g.alert("You cannot run this function on an image of dimension greater than 3. If your window has color, convert to a grayscale image before running this function") return None if darkBackground: newtif = np.logical_not(newtif) self.newtif = newtif.astype(np.uint8) self.newname = self.oldname + ' - Thresholded ' + str(value) return self.end()
def getCenterAndR_adap(self, blockSize=33): """Calculate the weighting center and radius of circle based on the adapative threshold. Parameters ---------- blockSize : int, optional Block size for adaptive threshold. This value should be odd. (the default is 33. Returns ------- float Weighting center x. float Weighting center y. float Radius. numpy.ndarray[int] Binary image. """ # Adaptive threshold delta = 1 times = 0 while (delta > 1e-2) and (times < 10): img = self.getImg().copy() imgBinary = (img > threshold_local(img, blockSize)).astype(float) # Calculate the weighting radius realR = np.sqrt(np.sum(imgBinary) / np.pi) # Calculte the nearest odd number of radius for the blockSize if (int(realR)%2 == 0): oddRearR = int(realR+1) else: oddRearR = int(realR) # Critera check of while loop delta = abs(blockSize - oddRearR) times += 1 # New value of blockSize blockSize = oddRearR # Calculate the center of mass realcy, realcx = center_of_mass(imgBinary) # The values of (realcx, realcy, realR) will be (nan, nan, 0.0) for the # invalid image. if (not np.isfinite([realcx, realcy]).any()): print("Can not fit donut to circle.") return realcx, realcy, realR, imgBinary
def get_string(): global cap global ctr global flag ctr = 0 flag = 0 while 1: ret, image = cap.read() if ret == False: return "Vibrate 2" else: rows, cols, x = image.shape M = cv2.getRotationMatrix2D((cols / 2, rows / 2), 90, 1) image = cv2.warpAffine(image, M, (cols, rows)) if ctr >= 1: print("Capturing Image.....") orig = image.copy() gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (5, 5), 0) edged = cv2.Canny(gray, 75, 200) cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) cnts = imutils.grab_contours(cnts) cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5] for c in cnts: peri = cv2.arcLength(c, True) approx = cv2.approxPolyDP(c, 0.02 * peri, True) if len(approx) == 4: screenCnt = approx break else: flag = 1 break if flag == 1: cv2.imwrite('capture.jpg', image) return "Vibrate" else: #print(flag) print("Getting OCR....") warped = four_point_transform(orig, screenCnt.reshape(4, 2)) warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY) T = threshold_local(warped, 11, offset=10, method="gaussian") #warped = (warped > T).astype("uint8") * 255 #cv2.imshow("Scanned", warped) #cv2.imshow("pers", image) cv2.imwrite('capture.jpg', image) cv2.imwrite('text.jpg', warped) cv2.waitKey(0) cv2.destroyAllWindows() return pytesseract.image_to_string(warped)
def convertImages(images): i = cv2.imread(images) i = cv2.cvtColor(i,cv2.COLOR_BGR2GRAY) T = threshold_local(i,999,offset=10,method="gaussian") i = (T - i).astype("uint8")*255 img = Image.fromarray(i).resize((28,28)) im2arr = np.array(img)/255.0 #squeeze - removes unwanted dimensions. (28*28,1 - n^2,1 (1D)) im2arr = np.squeeze(im2arr.reshape(1,28*28,1,1)) im2arr = im2arr.reshape(1,28*28) return im2arr
def subtract_rolling_ball(image, radius): """Subtracts background from image using the Rolling Ball algorithm.""" subtract = SubtractBall(radius) new_radius = subtract.ball.width small_image = pyramid_reduce(image, downscale=subtract.ball.shrink_factor) background = threshold_local(small_image, new_radius, method='generic', param=subtract.bg) background = resize(background, image.shape) return image - background
def intensity_object_features(im, threshold=None, adaptive_t_radius=51, sample_size=None, random_seed=None): """Segment objects based on intensity threshold and compute properties. Parameters ---------- im : 2D np.ndarray of float or uint8. The input image. threshold : float, optional A threshold for the image to determine objects: connected pixels above this threshold will be considered objects. If ``None`` (default), the threshold will be automatically determined with both Otsu's method and a locally adaptive threshold. adaptive_t_radius : int, optional The radius to calculate background with adaptive threshold. sample_size : int, optional Sample this many objects randomly, rather than measuring all objects. random_seed: int, or numpy RandomState instance, optional An optional random number generator or seed from which to draw samples. Returns ------- f : 1D np.ndarray of float The feature vector. names : list of string The list of feature names. """ if threshold is None: tim1 = im > filters.threshold_otsu(im) f1, names1 = object_features(tim1, im, sample_size=sample_size, random_seed=random_seed) names1 = ['otsu-threshold-' + name for name in names1] tim2 = im > filters.threshold_local(im, adaptive_t_radius) f2, names2 = object_features(tim2, im, sample_size=sample_size, random_seed=random_seed) names2 = ['adaptive-threshold-' + name for name in names2] f = np.concatenate([f1, f2]) names = names1 + names2 else: tim = im > threshold f, names = object_features(tim, im, sample_size=sample_size, random_seed=random_seed) return f, names
def img_thresholding(img, type): show_image(img) img_grayscale = color.rgb2gray(img) if type == 'global': thresh = threshold_otsu(img_grayscale) else: thresh = threshold_local(img_grayscale, block_size=35, offset=10) img_binary = img_grayscale > thresh img_binary2 = img_grayscale < thresh show_image(img_binary) show_image(img_binary2)
def create_photos(self): tempPhotos = self.photos[:] for item in tempPhotos: oldpath = r'' + oldPhotosPath + "\\" + item image = cv2.imread(oldpath) ratio = image.shape[0] / 500.0 orig = image.copy() image = imutils.resize(image, height=500) # convert photos to gray and find edges gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (5, 5), 0) edged = cv2.Canny(gray, 75, 200) # find the contours in the edged image, keeping only the # largest ones, and initialize the screen contour cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) cnts = imutils.grab_contours(cnts) cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5] # loop over the contours for c in cnts: peri = cv2.arcLength(c, True) approx = cv2.approxPolyDP(c, 0.02 * peri, True) if len(approx) == 4: screenCnt = approx break newPath = r'' + newPhotosPath + '\\' + item + '.jpg' if len(approx) != 4: self.add_photos_left(item) save_photo = self.no_scan(oldpath) if save_photo: self.save_photo(newPath, image) self.del_photo(item) else: self.del_photo(item) continue else: cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2) warped = four_point_transform.four_point_transform( orig, screenCnt.reshape(4, 2) * ratio) warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY) T = threshold_local(warped, 11, offset=10, method="gaussian") warped = (warped > T).astype("uint8") * 255 self.save_photo(newPath, warped) self.del_photo(item)
def show_adaptive_thresholding(image, blurred_image): cv_thresh = cv2.adaptiveThreshold( blurred_image, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, blockSize=25, C=15) sk_t = threshold_local( blurred_image, block_size=29, offset=5, method="gaussian") # bitwise_not equivalent sk_thresh = (blurred_image < sk_t).astype("uint8") * 255 cv2.imshow("OpenCV Mean Adaptive Thresholding", cv_thresh) cv2.imshow("Scikit Mean Adaptive Thresholding", sk_thresh) cv2.imshow("Original", image) cv2.waitKey(0)
def ocr(image, median): gray = get_grayscale(image) if(median): gray = cv2.medianBlur(gray, 3) warped = thresholding(gray) warped = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) T = threshold_local(warped, 11, offset=10, method='gaussian') warped = (warped > T).astype('uint8') * 255 pytesseract.pytesseract.tesseract_cmd = TESSERACT_PATH custom_config = r'--oem 3 --psm 6' text = pytesseract.image_to_string(warped, config=custom_config) return text
def image_to_df(image_name): angle_indicator = int(image_name.split('_')[1]) image = misc.imread('train_sample/' + image_name + '.jpg',flatten = True).astype(float) image_rgb = misc.imread('train_sample/' + image_name + '.jpg') image_float = image_rgb.astype(float) image_mask = misc.imread('train_masks/' + image_name + '_mask.gif',flatten = True) image_mask = image_mask/255 #io.imshow(image_mask) image_index = np.where(image >= 0) sobel = filters.sobel(image) # working #io.imshow(sobel) sobel_blurred = filters.gaussian(sobel,sigma=1) # Working #io.imshow(sobel_blurred) canny_filter_image = canny(image/255.) #io.imshow(canny_filter_image) # threshold_niblack_11 = filters.threshold_niblack(sobel_blurred,201) #io.imshow(threshold_niblack) threshold_li = filters.threshold_li(image) mask_li = image > threshold_li #io.imshow(mask) sobel_h = filters.sobel_h(image) sobel_v = filters.sobel_v(image) laplace = filters.laplace(image) threshold_local_51 = filters.threshold_local(image,51) mask_local_51 = image > threshold_local_51 #io.imshow(mask) df = pd.DataFrame() df['l1_dist_y'] = abs(image_index[0] - 639.5)/639.5 df['l1_dist_x'] = abs(image_index[1] - 958.5)/958.5 df['l2_dist'] = np.sqrt((df.l1_dist_y)**2 + (df.l1_dist_x)**2)/np.sqrt(2) df['grey_values'] = image.reshape((1,1918*1280))[0]/255. df['red_values'] = image_rgb.reshape((3,1918*1280))[0]/255. df['blue_values'] = image_rgb.reshape((3,1918*1280))[1]/255. df['green_values'] = image_rgb.reshape((3,1918*1280))[2]/255. df['red_float'] = image_float.reshape((3,1918*1280))[0]/255. df['blue_float'] = image_float.reshape((3,1918*1280))[1]/255. df['green_float'] = image_float.reshape((3,1918*1280))[2]/255. df['sobel_blurred'] = sobel_blurred.reshape((1,1918*1280))[0]/255. df['canny_filter_image'] = canny_filter_image.reshape((1,1918*1280))[0].astype(int) df['sobel_h'] = sobel_h.reshape((1,1918*1280))[0]/255. df['sobel_v'] = sobel_v.reshape((1,1918*1280))[0]/255. df['laplace'] = laplace.reshape((1,1918*1280))[0]/511. df['threshold_local_51'] = mask_local_51.reshape((1,1918*1280))[0].astype(int) # df['threshold_niblack_11'] = threshold_niblack_11.reshape((1,1918*1280))[0]#/255. df['threshold_li'] = mask_li.reshape((1,1918*1280))[0].astype(int) for i in range(1,17): if i == angle_indicator: df['angle_indicator_' + str(i)] = 1 else: df['angle_indicator_' + str(i)] = -1 df['mask'] = image_mask.reshape((1,1918*1280))[0] df['mask'] = df['mask'].astype('category') return df
def preprocess_mobile_image(image): # convert the image to grayscale, blur it, and find edges gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (5, 5), 0) edged = cv2.Canny(gray, 100, 200) T = threshold_local(edged, 11, offset=10, method="gaussian") edged = (edged > T).astype("uint8") * 255 output = Image.fromarray(edged) output.save("temp/mobile_output.jpg") return True
def test(): files = [ f.path for f in os.scandir("../sroie-data/task1/data_bordered/") if f.name.endswith(".jpg") ] for f in files: print(f) im = numpy.array(Image.open(f).convert("L")) im_bin = threshold_local(im, block_size=9).astype(numpy.uint8) # print(im_bin) Image.fromarray(im_bin).save(os.path.splitext(f)[0] + "-bin.png")
def denoising(): listImagesLocal = [] listImagesBinary = [] for i in range(143): listImagesLocal.append( threshold_local(images[i], block_size=35, offset=40)) listImagesBinary.append(images[i] > listImagesLocal[i]) return listImagesBinary
def preprocess_image(image, i): gray = image.copy() element = np.ones((1, 2)) #bright images are processed more accurately with mean method and smaller range if (np.mean(gray) < 190): T = threshold_local(gray, 15, offset=8, method="median") #generic, mean, median, gaussian else: T = threshold_local(gray, 7, offset=8, method="mean") #generic, mean, median, gaussian thresholded = (gray > T).astype("uint8") * 255 cv2.imwrite("staffs/staffs" + repr(i) + "_thr.png", thresholded) thresholded = cv2.erode(thresholded, element) cv2.imwrite("staffs/staffs" + repr(i) + "_erode.png", thresholded) edges = cv2.Canny(thresholded, 10, 100, apertureSize=3) cv2.imwrite("staffs/staffs" + repr(i) + "_canny.png", edges) return edges, thresholded
def findmembranes(arr_raw): '''Morphological operations to find cell membranes from dystrophin channel, or similar''' kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) kernelsm = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) # Need to check here that the values in arr lie between 0 and 255! arr = np.array(arr_raw, dtype=np.uint8) recipe = [ (cv2.dilate, kernelsm), (cv2.dilate, kernelsm), (cv2.dilate, kernelsm), #(cv2.dilate,kernel), #(cv2.dilate,kernel), #(cv2.erode,kernel), (cv2.erode, kernelsm), (cv2.erode, kernelsm), (cv2.erode, kernelsm) ] #arrf = ndimage.gaussian_filter(arr,0.3) #arrf = cv2.GaussianBlur(arr, ksize=(3,3),sigmaX=0,sigmaY=0) #arrf = cv2.medianBlur(arr,3) #arrf = cv2.bilateralFilter(arr,d=9,sigmaColor=1555,sigmaSpace=1555) #ret,thresh = cv2.threshold(arrf.astype(np.uint8),1,255,cv2.THRESH_BINARY) arrf = restoration.denoise_bilateral(arr, 11, sigma_color=3, sigma_spatial=3, multichannel=False) Image.fromarray(makepseudo(arrf)).show() #glob_thresh = filters.threshold_otsu(arrf) #thresh = np.array(255*(arrf > glob_thresh/2.0),dtype=np.uint8) locthresh = filters.threshold_local(arrf, block_size=21, offset=0) thresh = arrf > (locthresh) Image.fromarray(makepseudo(255 * thresh)).show() threshclean = morphology.remove_small_objects(thresh, 600) Image.fromarray(makepseudo(255 * threshclean)).show() #thresh = morphology.skeletonize(thresh) #Image.fromarray(makepseudo(255*thresh)).show() #thresh = morphology.binary_dilation(thresh) #Image.fromarray(makepseudo(255*thresh)).show() thresh = np.array(255 * thresh, dtype=np.uint8) comb0 = threshorig(arr, thresh) for func, kern in recipe: thresh = func(thresh, kern) Image.fromarray(makepseudo(thresh)).show() ithresh = cv2.bitwise_not(thresh) return ((ithresh, comb0, threshorig(arr, thresh)))
def run(self): while True: ret, img = self.cam.read() if ret: block_size = 35 bw = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) adaptive_thresh = threshold_local(bw, block_size, offset=10) binary_adaptive = bw > adaptive_thresh img[binary_adaptive] = (0,0,0) image = QtGui.QImage(img.data, self.width, self.height, QtGui.QImage.Format_RGB888) self.signal.emit(image)
def local_threshold(imagePath, outputPath): warnings.filterwarnings("ignore") imagePath = "" + imagePath color = io.imread(imagePath) img = rgb2gray(color) image = img_as_ubyte(img) block_size = 35 adaptive_thresh = threshold_local(image, block_size, offset=10) binary_local = image > adaptive_thresh local_out = img_as_ubyte(binary_local) imsave('' + outputPath, local_out) image
def detectCharacterCandidates(self,region): plate = perspective.four_point_transform(self.image,region) V = cv2.split(cv2.cvtColor(plate,cv2.COLOR_BGR2HSV))[2] T = threshold_local(V,29,offset=15,method="gaussian") thresh = (V>T).astype("uint8") *255 thresh = cv2.bitwise_not(thresh) plate = imutils.resize(plate,width=400) thresh = imutils.resize(thresh,width=400) labels = measure.label(thresh,neighbors=8,background=0) charCandidates = np.zeros(thresh.shape,dtype="uint8") for label in np.unique(labels): if label ==0: continue labelMask = np.zeros(thresh.shape, dtype="uint8") labelMask[labels == label] = 255 cnts = cv2.findContours(labelMask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = imutils.grab_contours(cnts) if len(cnts) > 0: c = max(cnts, key=cv2.contourArea) (boxX, boxY, boxW, boxH) = cv2.boundingRect(c) aspectRatio = boxW / float(boxH) solidity = cv2.contourArea(c) / float(boxW * boxH) heightRatio = boxH / float(plate.shape[0]) keepAspectRatio = aspectRatio < 1.0 keepSolidity = solidity > 0.15 keepHeight = heightRatio > 0.4 and heightRatio < 0.95 if keepAspectRatio and keepSolidity and keepHeight: hull = cv2.convexHull(c) cv2.drawContours(charCandidates, [hull], -1, 255, -1) charCandidates = segmentation.clear_border(charCandidates) cnts = cv2.findContours(charCandidates.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) cnts = imutils.grab_contours(cnts) if len(cnts) > self.minChars: (charCandidates, cnts) = self.pruneCandidates(charCandidates, cnts) thresh = cv2.bitwise_and(thresh, thresh, mask=charCandidates) cv2.imshow("Char Threshold", thresh) return LicensePlate(success=True,plate=plate,thresh=thresh,candidates=charCandidates)
def eucledean_distance_map(img, thresh_block_size=21, denoise_level=9): '''creates an Eucledean distance map for an input image''' img_gray = color.rgb2gray(img) #convert to gray scale adaptive_thresh = filters.threshold_local( img_gray, block_size=thresh_block_size, offset=0 ) # sets the thershold values img_gray_thres = img_gray > adaptive_thresh # applies the threshold values img_gray_thres = img_gray > adaptive_thresh # applies the threshold values img_denoise = median_filter( img_gray_thres, size=denoise_level) # reduce image noise by despeckle img_EDM = distance_transform_edt( img_denoise) # estimate eucldean distance to the closest dark pixel return img_EDM, img_denoise
def warp(img): # compute the ratio of the old height # to the new height, clone it, and resize it ratio = img.shape[0] / 500.0 orig = img.copy() img = imutils.resize(img, height=500) # convert the image to grayscale, blur it, and find edges # in the image gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (5, 5), 0) edged = cv2.Canny(gray, 75, 200) # find the contours in the edged image, keeping only the # largest ones, and initialize the screen contour cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) cnts = imutils.grab_contours(cnts) cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5] # loop over the contours for c in cnts: # approximate the contour peri = cv2.arcLength(c, True) approx = cv2.approxPolyDP(c, 0.02 * peri, True) # if our approximated contour has four points, then we # can assume that we have found our screen ### todo: incomplete document with more than 4 edges if len(approx) == 4: screenCnt = approx break # apply the four point transform to obtain a top-down # view of the original image warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio) # convert the warped image to grayscale, then threshold it # to give it that 'black and white' paper effect warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY) T = threshold_local(warped, 11, offset=10, method="gaussian") warped = (warped > T).astype("uint8") * 255 warped = cv2.cvtColor(warped, cv2.COLOR_GRAY2RGB) # show the original and scanned images #print("STEP 3: Apply perspective transform") cv2.imshow("Original", imutils.resize(orig, height=650)) cv2.imshow("Scanned", imutils.resize(warped, height=650)) cv2.waitKey(0) cv2.destroyAllWindows() return warped
def segmentation(self, LpRegion, name): # apply thresh to extracted licences plate V = cv2.split(cv2.cvtColor(LpRegion, cv2.COLOR_BGR2HSV))[2] # adaptive threshold T = threshold_local(V, 15, offset=10, method="gaussian") thresh = (V > T).astype("uint8") * 255 cv2.imwrite("output/lp/{}_step2_1.png".format(name), thresh) # convert black pixel of digits to white pixel thresh = cv2.bitwise_not(thresh) cv2.imwrite("output/lp/{}_step2_2.png".format(name), thresh) thresh = imutils.resize(thresh, width=400) thresh = cv2.medianBlur(thresh, 5) cv2.imwrite("output/lp/{}_step2_3.png".format(name), thresh) # connected components analysis labels = measure.label(thresh, connectivity=2, background=0) # loop over the unique components for label in np.unique(labels): # if this is background label, ignore it if label == 0: continue # init mask to store the location of the character candidates mask = np.zeros(thresh.shape, dtype="uint8") mask[labels == label] = 255 # find contours from mask _, contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) if len(contours) > 0: contour = max(contours, key=cv2.contourArea) (x, y, w, h) = cv2.boundingRect(contour) # rule to determine characters aspectRatio = w / float(h) solidity = cv2.contourArea(contour) / float(w * h) heightRatio = h / float(LpRegion.shape[0]) if 0.1 < aspectRatio < 1.0 and solidity > 0.1 and 0.35 < heightRatio < 2.0: # extract characters candidate = np.array(mask[y:y + h, x:x + w]) square_candidate = convert2Square(candidate) square_candidate = cv2.resize(square_candidate, (28, 28), cv2.INTER_AREA) cv2.imwrite( './characters/' + str(x) + "_" + str(y) + ".png", cv2.resize(square_candidate, (56, 56), cv2.INTER_AREA)) square_candidate = square_candidate.reshape((28, 28, 1)) self.candidates.append((square_candidate, (y, x)))
def threshold_local(image, *args, **kwargs): ''' skimage changed threshold_adaptive to threshold_local. This wraps both to ensure the same behaviour with old and new versions. ''' try: from skimage.filters import threshold_local mask = image > threshold_local(image, *args, **kwargs) except ImportError: from skimage.filters import threshold_adaptive mask = threshold_adaptive(image, *args, **kwargs) return mask
def preprocess_cheque(infile, outfile): # Open image in grayscale mode image = np.array(Image.open(infile).convert("L")) # Apply local OTSU thresholding block_size = 25 adaptive_thresh = threshold_local(image, block_size, offset=15) binarized = image > adaptive_thresh binarized = binarized.astype(float) * 255 binarized = Image.fromarray(binarized).convert("L") # Save binarized file binarized.save(outfile)
def get_bin_threshold(self, percent, high=True, adaptive=False, binary=True, img=False): if adaptive: if binary: return self.pixels > threshold_local(self.pixels, percent) return threshold_local(self.pixels, percent) mi = np.min(self.pixels) norm = (self.pixels-mi)/(np.max(self.pixels)-mi) if high: r = norm > percent else: r = norm < percent if not img: if binary: return r return np.ones(self.pixels.shape)*r else: I = copy.deepcopy(self) I.channel = "Threshold from "+I.channel if binary: I.pixels = r else: I.pixels = np.ones(self.pixels.shape)*r return I
def intensity_object_features(im, threshold=None, adaptive_t_radius=51, sample_size=None, random_seed=None): """Segment objects based on intensity threshold and compute properties. Parameters ---------- im : 2D np.ndarray of float or uint8. The input image. threshold : float, optional A threshold for the image to determine objects: connected pixels above this threshold will be considered objects. If ``None`` (default), the threshold will be automatically determined with both Otsu's method and a locally adaptive threshold. adaptive_t_radius : int, optional The radius to calculate background with adaptive threshold. sample_size : int, optional Sample this many objects randomly, rather than measuring all objects. random_seed: int, or numpy RandomState instance, optional An optional random number generator or seed from which to draw samples. Returns ------- f : 1D np.ndarray of float The feature vector. names : list of string The list of feature names. """ if threshold is None: tim1 = im > imfilter.threshold_otsu(im) f1, names1 = object_features(tim1, im, sample_size=sample_size, random_seed=random_seed) names1 = ['otsu-threshold-' + name for name in names1] tim2 = im > imfilter.threshold_local(im, adaptive_t_radius) f2, names2 = object_features(tim2, im, sample_size=sample_size, random_seed=random_seed) names2 = ['adaptive-threshold-' + name for name in names2] f = np.concatenate([f1, f2]) names = names1 + names2 else: tim = im > threshold f, names = object_features(tim, im, sample_size=sample_size, random_seed=random_seed) return f, names
def _optimal_w(image, p=0.05): # Calculate the optimal window size for the image segmentation given a quantile. # It expand the radious until it reaches the best segmentation. # radiusMin, radius Max and inc in percentages of the image size, p as [0,1] value, image is the original version radiusMin = 5 radiusMax = 40 inc = 1 f = (image - np.min(image)) / (np.max(image) - np.min(image)) dims = f.shape rows = dims[0] cols = dims[1] maxsize = np.max([rows, cols]) imagesize = cols * rows radius_thresh = np.round(np.min([rows, cols]) / 4.) unit = np.round(maxsize / 100.) radiusMin = radiusMin * unit radiusMax = radiusMax * unit radiusMax = int(np.min([radiusMax, radius_thresh])) radius = radiusMin inc = inc * unit bg = np.percentile(f, p * 100) fg = np.percentile(f, (1 - p) * 100) min_ov = imagesize while (radius <= radiusMax): tt = int(radius * radius) if tt % 2 == 0: tt += 1 adaptive_threshold = threshold_local(f, tt, method='mean', offset=0)#(f, tt, offset=0) g = f > adaptive_threshold ov = _bg_fg(f, g, bg, fg) if (ov < min_ov): w = radius min_ov = ov radius += inc return w
def test_apply_parallel(): # data a = np.arange(144).reshape(12, 12).astype(float) # apply the filter expected1 = threshold_local(a, 3) result1 = apply_parallel(threshold_local, a, chunks=(6, 6), depth=5, extra_arguments=(3,), extra_keywords={'mode': 'reflect'}) assert_array_almost_equal(result1, expected1) def wrapped_gauss(arr): return gaussian(arr, 1, mode='reflect') expected2 = gaussian(a, 1, mode='reflect') result2 = apply_parallel(wrapped_gauss, a, chunks=(6, 6), depth=5) assert_array_almost_equal(result2, expected2)
def equalize_exposure(image, iterations=1, kernel_size=None, min_object_size=500, dark_objects=True, stretch=False): """ Filter a grayscale image with uneven brightness across it, such as you might see in a microscope image. Removes large objects using adaptive thresholding based on `min_object_size`, then calculates the mean in a circular neighborhood of diameter `kernel_size`. Smooths this mean, then subtracts the background variation from the original image (including large objects). Run twice of best results, though once should give satisfactory results. As a bonus, this often enhances white balance and colors. For color images, run on each band separately and then combine into a [dim_x, dim_y, 3] numpy array. When run on color images with `stretch=True`, this function improves white balance and colors. Essential for filtering candidate objects by color. Slow; could be optimized with `opencv_python`, though this function doesn't support masking when calculating means. Parameters ---------- image : ndarray (float, int) Grayscale image or band. kernel_size : int Passes to `skimage.morphology.disk` to create a kernel of diameter kernel_size in pixels. If `None`, defaults to `max(image.shape)/10`. min_object_size : int Passes to `skimage.morphology.remove_small_holes`. Area of objects to ignore when averaging background values, in pixels. dark_objects : bool Are objects dark against a light background? stretch : bool Stretch values to cover entire colorspace? Enhances colors. Largely aesthetic. Not recommended for batch analyses. Returns ------- An ndarray of type float [0:1]. See Also -------- `skimage.filters.rank.mean` """ # Housekeeping img = img_as_float(image.copy()) if stretch is True: img = img/img.max() if dark_objects is False: img = 1-img # invert img_in = img.copy() # for use later if kernel_size is None: kernel_size = np.int(max(image.shape[0], image.shape[1])/10) # mean filter kernel kernel = morphology.disk(int(kernel_size/2)) # identify objects to ignore if kernel_size % 2 is 0: block_size = kernel_size + 1 else: block_size = kernel_size #objects = ~filters.threshold_adaptive(img, block_size, offset = 0.01*img.max()) # deprecated function objects = img > filters.threshold_local(img, block_size, offset = 0.01*img.max()) objects = morphology.remove_small_objects(objects, min_size = min_object_size) # Correct Exposure x times i = 0 while i < iterations: # Global mean img_mean = np.ma.masked_array(img, mask=objects).mean() # global means local_means = filters.rank.mean(img, selem=kernel, mask=~objects) local_means = filters.gaussian(local_means, kernel_size) # Correct Image img += (img_mean - local_means) img[img>1] = 1 # for compatibilty with img_as_float img[img<0] = 0 # for compatibilty with img_as_float i += 1 out = img_as_float(img) return(out)
def frangi_segmentation(image, colors, frangi_args, threshold_args, separate_objects=True, contrast_kernel_size='skip', color_args_1='skip', color_args_2='skip', color_args_3='skip', neighborhood_args='skip', morphology_args_1='skip', morphology_args_2='skip', hollow_args='skip', fill_gaps_args='skip', diameter_args='skip', diameter_bins='skip', image_name='image', verbose=False): """ Possible approach to object detection using frangi filters. Selects colorbands for analysis, runs frangi filter, thresholds to identify candidate objects, then removes spurrious objects by color and morphology characteristics. See frangi_approach.ipynb. Unless noted, the dictionaries are called by their respective functions in order. Parameters ---------- image : ndarray RGB image to analyze colors : dict or str Parameters for picking the colorspace. See `pyroots.band_selector`. frangi_args : list of dict or dict Parameters to pass to `skimage.filters.frangi` threshold_args : list of dict or dict Parameters to pass to `skimage.filters.threshold_adaptive` contrast_kernel_size : int, str, or None Kernel size for `skimage.exposure.equalize_adapthist`. If `int`, then gives the size of the kernel used for adaptive contrast enhancement. If `None`, uses default (1/8 shortest image dimension). If `skip`, then skips. color_args_1 : dict Parameters to pass to `pyroots.color_filter`. color_args_2 : dict Parameters to pass to `pyroots.color_filter`. Combines with color_args_1 in an 'and' statement. color_args_3 : dict Parameters to pass to `pyroots.color_filter`. Combines with color_args_1, 2 in an 'and' statement. neighborhood_args : dict Parameters to pass to 'pyroots.neighborhood_filter'. morphology_args_1 : dict Parameters to pass to `pyroots.morphology_filter` morphology_args_2 : dict Parameters to pass to `pyroots.morphology_filter`. Happens after fill_gaps_args in the algorithm. hollow_args : dict Parameters to pass to `pyroots.hollow_filter` fill_gaps_args : dict Paramaters to pass to `pyroots.fill_gaps` diameter_bins : list To pass to `pyroots.bin_by_diameter` image_name : str Identifier of image for summarizing Returns ------- A dictionary containing: 1. `"geometry"` summary `pandas.DataFrame` 2. `"objects"` binary image 3. `"length"` medial axis image 4. `"diameter"` medial axis image """ # Pull band from colorspace working_image = band_selector(image, colors) # expects dictionary (lazy coding) nbands = len(working_image) if verbose is True: print("Color bands selected") ## Count nubmer of dictionaries in threshold_args and frangi_args. Should equal number of bands. Convert to list if necessary try: len(threshold_args[0]) except: threshold_args = [threshold_args] if nbands != len(threshold_args): raise ValueError( """Number of dictionaries in `threshold_args` doesn't equal the number of bands in `colors['band']`!""" ) pass try: len(frangi_args[0]) except: frangi_args = [frangi_args] if nbands != len(frangi_args): raise ValueError( """Number of dictionaries in `frangi_args` doesn't equal the number of bands in `colors['band']`!""" ) pass working_image = [img_as_float(i) for i in working_image] # Contrast enhancement try: for i in range(nbands): temp = exposure.equalize_adapthist(working_image[i], kernel_size = contrast_kernel_size) working_image[i] = img_as_float(temp) if verbose: print("Contrast enhanced") except: if contrast_kernel_size is not 'skip': warn('Skipping contrast enhancement') pass # invert if necessary for i in range(nbands): if not colors['dark_on_light'][i]: working_image[i] = 1 - working_image[i] # Identify smoothing sigma for edges and frangi thresholding # simultaneously detect edges (computationally cheaper than multiple frangi enhancements) edges = [np.ones_like(working_image[0]) == 1] * nbands # all True sigma_val = [0.125] * nbands # step is 0, 0.25, 0.5, 1, 2, 4, 8, 16 for i in range(nbands): edge_val = 1 while edge_val > 0.1 and sigma_val[i] < 10: sigma_val[i] = 2*sigma_val[i] temp = filters.gaussian(working_image[i], sigma=sigma_val[i]) temp = filters.scharr(temp) temp = temp > filters.threshold_otsu(temp) edge_val = np.sum(temp) / np.sum(np.ones_like(temp)) edges_temp = temp.copy() if sigma_val[i] == 0.25: # try without smoothing temp = filters.scharr(working_image[i]) temp = temp > filters.threshold_otsu(temp) edge_val = np.sum(temp) / np.sum(np.ones_like(temp)) if edge_val <= 0.1: sigma_val[i] = 0 edges_temp = temp.copy() if separate_objects: edges[i] = morphology.skeletonize(edges_temp) if verbose: print("Sigma value: {}".format(sigma_val)) if separate_objects: print("Edges found") # Frangi vessel enhancement for i in range(nbands): temp = filters.gaussian(working_image[i], sigma=sigma_val[i]) temp = filters.frangi(temp, **frangi_args[i]) temp = 1 - temp/np.max(temp) temp = temp < filters.threshold_local(temp, **threshold_args[i]) working_image[i] = temp.copy() frangi = working_image.copy() if verbose: print("Frangi filter, threshold complete") # Combine bands, separate objects combined = working_image[0] * ~edges[0] for i in range(1, nbands): combined = combined * working_image[i] * ~edges[i] working_image = combined.copy() # Filter candidate objects by color try: color1 = color_filter(image, working_image, **color_args_1) #colorspace, target_band, low, high, percent) if verbose: print("Color filter 1 complete") except: if color_args_1 is not 'skip': warn("Skipping Color Filter 1") color1 = np.ones(working_image.shape) # no filtering try: color2 = color_filter(image, working_image, **color_args_2) # nesting equates to an "and" statement. if verbose: print("Color filter 2 complete") except: if color_args_2 is not 'skip': warn("Skipping Color Filter 2") color2 = np.ones(working_image.shape) # no filtering try: color3 = color_filter(image, working_image, **color_args_3) # nesting equates to an "and" statement. if verbose: print("Color filter 3 complete") except: if color_args_3 is not 'skip': warn("Skipping Color Filter 3") color3 = np.ones(working_image.shape) # no filtering # Combine bands working_image = color1 * color2 * color3 del color1 del color2 del color3 # Re-expand to area if separate_objects: # find edges removed temp = [frangi[i] * edges[i] for i in range(nbands)] rm_edges = temp[0].copy() for i in range(1, nbands): rm_edges = rm_edges * temp[i] # filter by color per criteria above try: color1 = color_filter(image, rm_edges, **color_args_1) except: color1 = np.ones(rm_edges.shape) try: color2 = color_filter(image, rm_edges, **color_args_2) except: color2 = np.ones(rm_edges.shape) try: color3 = color_filter(image, rm_edges, **color_args_3) except: color3 = np.ones(rm_edges.shape) # Combine color filters expanded = color1 * color2 * color3 else: expanded = np.zeros(colorfilt.shape) == 1 # evaluate to false working_image = expanded ^ working_image # bitwise or try: # remove little objects (for computational efficiency) working_image = morphology.remove_small_objects( working_image, min_size=morphology_args_1['min_size'] ) except: pass if verbose: print("Edges re-added") # Filter candidate objects by morphology try: working_image = morphology_filter(working_image, **morphology_args_1) if verbose: print("Morphology filter 1 complete") except: if morphology_args_1 is not 'skip': warn("Skipping morphology filter 1") pass # Filter objects by neighborhood colors try: working_image = neighborhood_filter(image, working_image, **neighborhood_args) if verbose: print("Neighborhood filter complete") except: if neighborhood_args is not 'skip': warn("Skipping neighborhood filter") pass # Filter candidate objects by hollowness if hollow_args is not 'skip': temp = morphology.remove_small_holes(working_image, min_size=10) try: if np.sum(temp) > 0: working_image = hollow_filter(temp, **hollow_args) if verbose: print("Hollow filter complete") except: warn("Skipping hollow filter") pass # Close small gaps and holes in accepted objects try: working_image = fill_gaps(working_image, **fill_gaps_args) if verbose: print("Gap filling complete") except: if fill_gaps_args is not 'skip': warn("Skipping filling gaps") pass # Filter candidate objects by morphology try: working_image = morphology_filter(working_image, **morphology_args_2) if verbose: print("Morphology filter 2 complete") except: if morphology_args_2 is not 'skip': warn("Skipping morphology filter 2") pass # Skeletonize. Now working with a dictionary of objects. skel = skeleton_with_distance(working_image) if verbose: print("Skeletonization complete") # Diameter filter try: diam = diameter_filter(skel, **diameter_args) if verbose: print("Diameter filter complete") except: diam = skel.copy() if diameter_args is not 'skip': warn("Skipping diameter filter") pass # Summarize if diameter_bins is None or diameter_bins is 'skip': summary_df = summarize_geometry(diam['geometry'], image_name) else: diam_out, summary_df = bin_by_diameter(diam['length'], diam['diameter'], diameter_bins, image_name) diam['diameter'] = diam_out out = {'geometry' : summary_df, 'objects' : diam['objects'], 'length' : diam['length'], 'diameter' : diam['diameter']} if verbose is True: print("Done") return(out)
# Here, we binarize an image using the `threshold_local` function, which # calculates thresholds in regions with a characteristic size `block_size` surrounding # each pixel (i.e. local neighborhoods). Each threshold value is the weighted mean # of the local neighborhood minus an offset value. # from skimage.filters import threshold_otsu, threshold_local image = data.page() global_thresh = threshold_otsu(image) binary_global = image > global_thresh block_size = 35 adaptive_thresh = threshold_local(image, block_size, offset=10) binary_adaptive = image > adaptive_thresh fig, axes = plt.subplots(nrows=3, figsize=(7, 8)) ax = axes.ravel() plt.gray() ax[0].imshow(image) ax[0].set_title('Original') ax[1].imshow(binary_global) ax[1].set_title('Global thresholding') ax[2].imshow(binary_adaptive) ax[2].set_title('Adaptive thresholding')
def thresholding_segmentation(image, threshold_args, image_name='Default Image', colors='dark', contrast_kernel_size='skip', mask_args='skip', noise_removal_args='skip', morphology_filter_args='skip', fill_gaps_args='skip', lw_filter_args='skip', diam_filter_args='skip', diameter_bins=None, verbose=False): """ Full analysis of an image for length of objects based on thresholding. Performs the following steps: 1. Colorspace conversion and selecting analysis bands 2. Contrast enhancement 3. Adaptive thresholding bands to binary images 4. Combining multiplicatively (i.e. kept if `True` in all, if multiple bands) 5. Filter objects by size, length:width ratio, and diameter (all optional) 6. Smoothing (optional) 7. Measuring medial axis length and diameter along the length 8. Summarizing by diameter class or the entire image Methods that are optional are set as 'skip' for default. Most arguments require dictionaries of arguments for the subfunctions. The easiest way to generate these dictionaries is to use the thresholding-based segmentation notebook. See the pyroots functions for more information. Parameters ---------- image : array An RGB or black and white image for analysis threshold_args : list of dicts Dictionaries contain options for adaptive thresholding. See skimage.filters.threshold_adaptive(). At minimum, requires 'block_size', for example, threshold_args = [{'block_size':101}]. image_name : str What do you want to call your image? colors : dict or string See `pyroots.band_selector` For color analysis: Currently only supports one colorspace, but you can choose multiple bands. A dictionary containing: - colorspace: string. Colorspace in which to run the analysis (ex. RGB, LAB, HSV). See scikit-image documentation for all options. - band: list of integers Specifying which bands of `colorspace` on which to run the analysis (ex. 2 in RGB gives Blue, 0 gives Red). - dark_on_light: list of boolean Are the objects dark objects on a light background? Length must match length of `colors['band']`. For black and white analysis: A string of either: `'dark'` for dark roots on a light background `'light'` for light roots on a dark background contrast_kernel_size : int or None Dimension of kernel for adaptively enhancing contrast. Calls `skimage.exposure.equalize_adapthist()`. If `None`, will use a default of 1/8 height by 1/8 width. mask_args : dict Used for masking the image with an ellipse. Useful for photomicroscopy. See `pr.ellipse_mask`. noise_removal_args : dict Smooths and despeckles the image, and also separates loosely connected objects for easier filtering. Contains arguments for `pyroots.noise_removal()`. morphology_filter_args : dict Filters objects by shape, size, and solidity. See `pyroots.morphology_filter()`. fill_gaps_args : dict Removes small holes and gaps between objects, now that most noise is removed. See `pyroots.fill_gaps()`. lw_filter_args : dict Removes objects based on medial axis length:mean width ratios. See `pyroots.length_width_filter()`. diam_filter_args : dict Removes entire objects or parts of objects based on diameters. See `pyroots.diameter_filter()`. diameter_bins : list of float Bin cutoffs for summarizing object length by diameter class. Defaults to `None`, which returns total length and average diameter for all objects in the image. verbose : bool Give feedback showing the step working on? Returns ------- A dictionary containing: 1. 'geometry' : a `pandas` dataframe describing either: - image name, total length, mean diameter, and the number of objects (if `diameter_bins` is `None`) - image name, length by diameter class, and diameter class (otherwise) 2. 'objects' : a binary image of kept objects 3. 'length' : a 2D image array of object medial axes with values indicating the length at that axis 4. 'diameter' : a 2D image array of object medial axes with values indicating either: - the diameter at that pixel (if `diameter_bins` is `None`) - the diameter bin to which a pixel belongs (otherwise) 3) skeleton pixel lengths; 4) skeleton pixel diameters. Notes ----- Most functions within this method are attempted. If they receive an unuseable argument, e.g. the dictionary contains a formatting error or a bad keyword, then the method will be skipped with a warning. If you see such a warning ("Skipping (function)..."), check the formatting of your argument and re-create a parameters file with a jupyter notebook. See Also -------- For example parameter dictionaries, see example_thresholding_analysis_parameters.py. """ # Begin ## Convert Colorspace, enhance contrast # Pull band from colorspace working_image = band_selector(image, colors) nbands = len(working_image) if verbose is True: print("Color bands selected") ## Count nubmer of dictionaries in threshold_args. Should equal number of bands. Make sure is list. try: len(threshold_args[0]) except: threshold_args = [threshold_args] if nbands != len(threshold_args): raise ValueError("Number of dictionaries in `threshold_args` doesn't\ equal the number of bands in `colors['band']`!") pass try: for i in range(nbands): temp = exposure.equalize_adapthist(working_image[i], kernel_size = contrast_kernel_size) working_image[i] = img_as_ubyte(temp) if verbose is True: print("Contrast enhanced") except: if contrast_kernel_size is not 'skip': warn("Skipping contrast enhancement") pass ## threshold for i in range(nbands): working_image[i] = working_image[i] > filters.threshold_local(working_image[i], **threshold_args[i]) for i in range(nbands): if len(colors) == 3: if colors['dark_on_light'][i] is True: working_image[i] = ~working_image[i] else: if colors == 'dark': working_image[i] = ~working_image[i] ## Combine bands. As written, keeps all 'TRUE' combined = working_image[0].copy() for i in range(1, nbands): combined = combined * working_image[i] working_image = combined.copy() if verbose is True: print("Thresholding complete") ## Mask, filtering, smoothing try: working_image = working_image * draw_mask(working_image, **mask_args) if verbose is True: print("Image masked") except: if mask_args is not 'skip': warn("Skipping mask") pass try: working_image = noise_removal(working_image, **noise_removal_args) if verbose is True: print("Smoothing and noise removal complete") except: if noise_removal_args is not 'skip': warn("Skipping noise removal") pass try: working_image = morphology_filter(working_image, **morphology_filter_args) if verbose is True: print("Morphology filtering complete") except: if morphology_filter_args is not 'skip': warn("Skipping morphology filter") pass try: working_image = fill_gaps(working_image, **fill_gaps_args) if verbose is True: print("Smoothing and gap filling complete") except: if fill_gaps_args is not 'skip': warn("Skipping gap filling and smoothing") pass ## skeleton, length-width, diameter filters skel_dict = skeleton_with_distance(working_image) if verbose is True: print("Skeletonization complete") try: lw_dict = length_width_filter(skel_dict, **lw_filter_args) if verbose is True: print("Length:width filtering complete") except: lw_dict = skel_dict.copy() if lw_filter_args is not 'skip': warn("Skipping length-width filter") pass try: diam_dict = diameter_filter(lw_dict, **diam_filter_args).copy() if verbose is True: print("Diameter filter complete") except: if diam_filter_args is not 'skip': warn("Skipping diameter filter") pass ## Summarize if diameter_bins is None or diameter_bins is 'skip': summary_df = summarize_geometry(skel_dict['geometry'], image_name) else: diam_out, summary_df = bin_by_diameter(skel_dict['length'], skel_dict['diameter'], diameter_bins, image_name) skel_dict['diameter'] = diam_out out = {'geometry' : summary_df, 'objects' : skel_dict['objects'], 'length' : skel_dict['length'], 'diameter' : skel_dict['diameter']} if verbose is True: print("Done") return(out)