def thinning(self): self.img = cv2.imread('{}/01-last_result.png'.format(self.output_path), 0) ret, img = cv2.threshold(self.img, 90, 255, 0) import thinning self.img = thinning.guo_hall_thinning(img) self.save_img(self.img, '03-skeletonize_result.png', self.output_path)
def mask_thinning(img): """ returns the skeleton (thinned image) of a mask. This uses `thinning.guo_hall_thinning` if available and otherwise falls back to a slow python implementation taken from http://opencvpython.blogspot.com/2012/05/skeletonization-using-opencv-python.html """ try: import thinning except ImportError: # thinning module was not available and we use a python implementation size = np.size(img) skel = np.zeros(img.shape, np.uint8) kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3)) while True: eroded = cv2.erode(img, kernel) temp = cv2.dilate(eroded, kernel) cv2.subtract(img, temp, temp) cv2.bitwise_or(skel, temp, skel) img = eroded zeros = size - cv2.countNonZero(img) if zeros==size: break else: # use the imported thinning algorithm skel = thinning.guo_hall_thinning(img) return skel
def skeletonize(road_network: np.ndarray, path: str = "workshop/vectorized.png", largest_component: bool = False): ''' Thinning/skeletonization of the road network image to a wired model. Input(s): road_network: black and white image of the road network (streets in white) path: path where the skeletonized image should be saved largest_component: if True, only the largest road network component will be kept Output(s): vectorized: skeletonized image ''' assert len(road_network.shape) == 2, 'ERROR: road_network must be grayscale image' img = cv2.resize(road_network, (road_network.shape[1]//2, road_network.shape[0]//2)) vectorized = thinning.guo_hall_thinning(img) vectorized[vectorized > 100] = 255 vectorized[vectorized <= 100] = 0 if largest_component: try: _, labels, stats, _ = cv2.connectedComponentsWithStats(vectorized.copy(), connectivity=8, stats=cv2.CC_STAT_AREA) stats = stats[1:] main_component = (np.argmax(stats[:,4])+1).astype('int32') vectorized = (labels == main_component).astype('uint8')*255 except: 'Warning: Skeletonization failed to apply largest_component = True param. Skipping.' cv2.imwrite(path, vectorized) return vectorized
def mask_thinning(img): """ returns the skeleton (thinned image) of a mask. This uses `thinning.guo_hall_thinning` if available and otherwise falls back to a slow python implementation taken from http://opencvpython.blogspot.com/2012/05/skeletonization-using-opencv-python.html """ try: import thinning except ImportError: # thinning module was not available and we use a python implementation size = np.size(img) skel = np.zeros(img.shape, np.uint8) kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3)) while True: eroded = cv2.erode(img, kernel) temp = cv2.dilate(eroded, kernel) cv2.subtract(img, temp, temp) cv2.bitwise_or(skel, temp, skel) img = eroded zeros = size - cv2.countNonZero(img) if zeros == size: break else: # use the imported thinning algorithm skel = thinning.guo_hall_thinning(img) return skel
def skeletonisation(img, remove_borders=False): skeleton = thinning.guo_hall_thinning(img.copy()) if remove_borders: skeleton[0, :] = 0 skeleton[:, 0] = 0 skeleton[-1, :] = 0 skeleton[:, -1] = 0 return skeleton
def process(self, args): """ Guo Hall Thinning. Args: | *args* : a list of arguments, e.g. image ndarray """ # create a skeleton skeleton = guo_hall_thinning(args[0].copy()) #skeleton = cv2.cvtColor(skeleton, cv2.COLOR_GRAY2BGR) self.result['skeleton'] = skeleton self.result['img'] = args[0]
def process(self, args): """ Guo Hall Thinning. Args: | *args* : a list of arguments, e.g. image ndarray """ # create a skeleton skeleton = thinning.guo_hall_thinning(args[0].copy()) #skeleton = cv2.cvtColor(skeleton, cv2.COLOR_GRAY2BGR) self.result['skeleton'] = skeleton self.result['img'] = args[0]
def features_to_skel(joints, ends, edges, simplify=False): if not simplify: return joints + ends + edges else: # dilate to prevent extra mini spurs and help connect gaps kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) dilated = cv2.dilate(joints + ends, kernel, iterations=1) edges_no_overlap = edges.copy() edges_no_overlap[dilated > 0] = 0 skel = cv2.dilate(dilated + edges_no_overlap, kernel, iterations=1) return thinning.guo_hall_thinning(skel)
def mask_thinning(img, method='auto'): """ returns the skeleton (thinned image) of a mask. This uses `thinning.guo_hall_thinning` if available and otherwise falls back to a slow python implementation taken from http://opencvpython.blogspot.com/2012/05/skeletonization-using-opencv-python.html Note that this implementation is not equivalent to guo_hall implementation """ # try importing the thinning module try: import thinning except ImportError: thinning = None # determine the method to use if automatic method is requested if method == 'auto': if thinning is None: method = 'python' else: method = 'guo-hall' # do the thinning with the requested method if method == 'guo-hall': if thinning is None: raise ImportError('Using the `guo-hall` method for thinning ' 'requires the `thinning` module, which could not ' 'be imported.') skel = thinning.guo_hall_thinning(img) elif method =='python': # thinning module was not available and we use a python implementation size = np.size(img) skel = np.zeros(img.shape, np.uint8) kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3)) while True: eroded = cv2.erode(img, kernel) temp = cv2.dilate(eroded, kernel) cv2.subtract(img, temp, temp) cv2.bitwise_or(skel, temp, skel) img = eroded zeros = size - cv2.countNonZero(img) if zeros==size: break else: raise ValueError('Unknown thinning method `%s`' % method) return skel
def colSegmentation(img): original_imgage = preprocessing( img ) #keep original image after preprocess,background white and words black thining_image = p.guo_hall_thinning(original_imgage.copy( )) #make thining for image,it make background black and words white Boundries = [] #for keep colIndices for crop height, width = thining_image.shape for colIndex in range(width): # width(no.of columns) whitekPixel = 0 current_cell = colIndex #to keep track for sequential col contain lines for rowIndex in range(height): # height(no.of rows in column) if thining_image[ rowIndex, current_cell] == 255: #for check below pixel "high priority than other" whitekPixel += 1 elif (current_cell < width - 1 and thining_image[rowIndex, current_cell + 1] == 255): #for check right below pixel and not out of index whitekPixel += 1 current_cell += 1 elif (current_cell != 0 and thining_image[rowIndex, current_cell - 1] == 255): #for check left below pixel and not out of index whitekPixel += 1 current_cell -= 1 if whitekPixel >= round( height * .90): #check Number of pixel , to know it is col or not Boundries.append(colIndex) # for thinned image for i in range(0, len(Boundries) - 1): thining_croped_image = thining_image[0:height, Boundries[i]:Boundries[i + 1]] original_croped_image = original_imgage[0:height, Boundries[i]:Boundries[i + 1]] imgNumber = str(counter()) if len(thining_croped_image[0]) > 10 and len( thining_croped_image) > 10: # cv2.imwrite("output\\cols\\" + imgNumber + ".png", original_croped_image) wordSegmentaion(thining_croped_image, original_croped_image)
def process(self, args): """ Guo Hall thinning. Use ```zhang_suen_node_detection()``` for node detection. Use ```breadth_first_edge_detection()``` for edge detection. Args: | *args* : a list of arguments, e.g. image ndarray """ # create a skeleton skeleton = thinning.guo_hall_thinning(args[0].copy()) # detect nodes graph = zhang_suen_node_detection(skeleton) # detect edges # graph = breadth_first_edge_detection(skeleton, gray_img, graph) graph = breadth_first_edge_detection(skeleton, args[0], graph) skeleton = cv2.cvtColor(skeleton, cv2.COLOR_GRAY2BGR) self.result['graph'], self.result['img'] = graph, skeleton
def test(): i=0 for w in range(1,5): for h in range(1,5): arr = numpy.empty(w*h, dtype=numpy.ubyte) for o in it.product([0,1],repeat=w*h): arr.flat = o arr = numpy.reshape(arr,[h,w]) print i # print arr i+=1 # print arr # arr[1][1]=0 brr = thinning.guo_hall_thinning(arr.copy()) arr2 = guo_hall_thinning(arr) if not all((arr2 == brr).flat): print "original:",arr print "c",brr print "python", arr2 return
def col_segmentation(self, img): original_imgage = self.preprocessing( img) # keep original image after preprocess,background white and words black thining_image = p.guo_hall_thinning( original_imgage.copy()) # make thining for image,it make background black and words white height, width = thining_image.shape Boundries = self.get_boundaries_indices(thining_image) colIndices = [] # for thinned image for col in range(0, len(Boundries) - 1): thining_croped_image = thining_image[0:height, Boundries[col]:Boundries[col + 1]] original_croped_image = original_imgage[0:height, Boundries[col]:Boundries[col + 1]] # imgNumber = str(counter()) if len(thining_croped_image[0]) > 10 and len(thining_croped_image) > 10: colIndices.append((thining_croped_image, original_croped_image)) # cv2.imwrite("output\\cols\\" + imgNumber + ".png", original_croped_image) # wordSegmentaion(thining_croped_image, original_croped_image) return colIndices
cap = cv2.VideoCapture(0) while True: ret, img = cap.read() gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) gray = cv2.equalizeHist(gray_img) horizontal_grad = cv2.Scharr(gray_img, cv2.CV_32F, 1, 0) vertical_grad = cv2.Scharr(gray_img, cv2.CV_32F, 0, 1) # , ksize=3) gradient = np.sqrt(np.power(horizontal_grad, 2) + np.power(vertical_grad, 2)) _, th = cv2.threshold(gradient, 200, 255, cv2.THRESH_BINARY) # binary = local_thresholding(th) kernel = np.ones((3, 3), np.uint8) dilated = cv2.dilate(th, kernel, 1) erosion = cv2.erode(dilated, kernel, iterations=1) closed = cv2.morphologyEx(erosion, cv2.MORPH_CLOSE, kernel, iterations=1) thinned = thinning.guo_hall_thinning(closed.astype(np.uint8)) #_, binary = cv2.threshold(gray, 200, 255, cv2.THRESH_BINARY) edges = cv2.Canny(gray, 50, 150, apertureSize=3) lines = cv2.HoughLinesP(thinned, 1, np.pi / 180, 100, 5) if lines is not None: for x1, y1, x2, y2 in lines[0]: cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2) corners = cv2.goodFeaturesToTrack(gray, 10, 0.01, 25) corners = np.float32(corners) # print("corners", corners.shape) for item in corners: x, y = item[0]
def detect(img, negate=False, robot=False, thin=False): if robot: MEDIAN_LINE_THRESHOLD = 30 # Robot else: MEDIAN_LINE_THRESHOLD = 10 # Kitty if thin: MEDIAN_LINE_THRESHOLD = 1 left = None right = None width = img.shape[1] height = img.shape[0] mask = np.zeros((img.shape[0], img.shape[1]), dtype=np.uint8) left_mask = np.zeros((img.shape[0], img.shape[1]), dtype=np.uint8) right_mask = np.zeros((img.shape[0], img.shape[1]), dtype=np.uint8) if not negate: gray = np.zeros((height, width, 1), dtype=np.uint8) else: gray = np.full((height, width, 1), 255, dtype=np.uint8) cv2.cvtColor(img, cv2.COLOR_RGB2GRAY, gray, 1) if negate: gray = abs(255 - gray) # V2 implementation use equalization of histogram # FIXME bad on robot # clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) # gray = clahe.apply(gray) # Use adaptive thresholding because it is better for difficult lighting condition # if robot: th2 = cv2.adaptiveThreshold( gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 71, -15) # TODO values for ROBOT this should be ok #else: # th2 = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 41, -35) #TODO values for KITTY maybe use a bit little biass if DEBUG: cv2.imshow("No erosion", th2) cv2.moveWindow("No erosion", 800, 600) th2 = cv2.erode(th2, kernel=(11, 11, 1), iterations=4) # TODO test #th2 = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU) #TODO values for KITTY maybe use a bit little biass # =============================== Calculate base histogram ============================= #hist_time_start = time.time() hist = compute_base_hist(th2) if PLOT: plt.plot(range(0, th2.shape[1]), hist) plt.show() # get highest values half_width = (width / 2) left_max = np.argpartition(hist[0:int(half_width - 1)], -2)[-1:] right_max = np.argpartition(hist[int(half_width):int(width - 1)], -2)[-1:] + int(half_width) #hist_time_stop = time.time() print(left_max, right_max) #print("Base Hist time:"+str(hist_time_stop-hist_time_start)) # =============================== THRESHOLD ======================================== LINE_THRESHOLD = 0.3 #10000 # 0.3 left_line = None right_line = None WINDOW_WIDTH = 100 WINDOW_HEIGHT = height / 12 if hist[left_max].squeeze() >= LINE_THRESHOLD: left_line = left_max if hist[right_max].squeeze() >= LINE_THRESHOLD: right_line = right_max if left_line is not None and right_line is not None and right_line - left_line < 200: # TODO search better method if hist[left_line] > hist[right_line]: right_line = None else: left_line = None #mask_time_start = time.time() # # ======================= LEFT LINE if left_line is not None: update_mask_for_line(th2, left_line, left_mask, WINDOW_WIDTH, WINDOW_HEIGHT, debug_img=img) else: print("LEFT LINE NONE!!!") # # ======================= RIGHT LINE if right_line is not None: update_mask_for_line(th2, right_line, right_mask, WINDOW_WIDTH, WINDOW_HEIGHT, debug_img=img) else: print("RIGHT LINE NONE!!!") if DEBUG: cv2.imshow("th2m", th2) cv2.moveWindow("th2m", 10, 700) # ================================ MASKING REGIONS ================================ mask = cv2.bitwise_or(mask, left_mask) mask = cv2.bitwise_or(mask, right_mask) # if DEBUG: # cv2.imshow("Mask", mask) # cv2.moveWindow("Mask", 200, 10) # th2 = cv2.bitwise_and(th2, mask) # mask_time_end= time.time() # print("Mask time:" + str(mask_time_end - mask_time_start)) if DEBUG: cv2.imshow("th2m", th2) cv2.moveWindow("th2m", 10, 700) cv2.imshow("left_mask", left_mask) cv2.moveWindow("left_mask", 10, 700) cv2.imshow("right_mask", right_mask) cv2.moveWindow("right_mask", 650, 700) cv2.imshow("mask", mask) cv2.moveWindow("mask", 1500, 700) # ================================ POLYNOMIAL FIT ================================ #thin_time_start = time.time() if thin: th2 = thinning.guo_hall_thinning( th2) # TODO FIXME faster bat bad quality #thin_time_stop = time.time() #print("Thin time:" + str(thin_time_stop - thin_time_start)) if DEBUG: cv2.imshow("Thinning", th2) cv2.moveWindow("Adapt mean", 100, 100) # TODO FIX ME # x_values_left, y_values_left = find_median_line(th2, from_x=0, to_x=int((width/2)-1)) # x_values_right, y_values_right = find_median_line(th2, from_x=int(width/2), to_x=int(width-1)) #median_time_start = time.time() x_values_left, y_values_left = find_median_line( th2, mask=left_mask, threshold=MEDIAN_LINE_THRESHOLD) x_values_right, y_values_right = find_median_line( th2, mask=right_mask, threshold=MEDIAN_LINE_THRESHOLD) #median_time_stop = time.time() #print("Median time:" + str(median_time_stop - median_time_start)) if DEBUG: for i in range(0, len(x_values_left) - 1): cv2.circle(img, (int(y_values_left[i]), int(x_values_left[i])), 1, (255, 0, 0), thickness=1) for i in range(0, len(x_values_right) - 1): cv2.circle(img, (int(y_values_right[i]), int(x_values_right[i])), 1, (255, 0, 0), thickness=1) #fit_time_start = time.time() if len( x_values_left ) > FIT_POINTS_THRESHOLD: # TODO fix custom threshold for realiable line left_fit, residuals, rank, singular_values, rcond = np.polyfit( x_values_left, y_values_left, 2, full=True) # TODO check residuals for quality if residuals[0] / len(x_values_left) < RESIDUALS_THRESHOLD: left = left_fit print("Residuals: " + str(residuals) + "RES/n_points: " + str(residuals / len(x_values_left))) if DEBUG: for i in range(0, img.shape[0] - 1): y_fit = left_fit[0] * (i**2) + left_fit[1] * i + left_fit[2] cv2.circle(img, (int(y_fit), i), 1, (0, 0, 255), thickness=1) if len( x_values_right ) > FIT_POINTS_THRESHOLD: # TODO fix custom threshold for realiable line right_fit, residuals, rank, singular_values, rcond = np.polyfit( x_values_right, y_values_right, 2, full=True) # TODO check residuals for quality if residuals[0] / len(x_values_right) < RESIDUALS_THRESHOLD: right = right_fit print("Residuals: " + str(residuals) + "RES/n_points: " + str(residuals / len(x_values_right))) if DEBUG: for i in range(0, img.shape[0] - 1): y_fit = right_fit[0] * (i**2) + right_fit[1] * i + right_fit[2] cv2.circle(img, (int(y_fit), i), 1, (0, 0, 255), thickness=1) #fit_time_stop = time.time() #print("Fit time:" + str(fit_time_stop - fit_time_start)) if DEBUG: pass # cv2.circle(img, (int(np.round(left)), img.shape[0] - INTERSECTION_LINE), 5, (0, 0, 255), thickness=2) # cv2.circle(img, (int(np.round(right)), img.shape[0] - INTERSECTION_LINE), 5, (0, 0, 255), thickness=2) # # # center # lines_range = right - left # mid = left + lines_range / 2 # cv2.circle(img, (int(np.round(mid)), img.shape[0] - INTERSECTION_LINE), 7, (0, 255, 255), thickness=2) # # # car position # cv2.circle(img, (int(np.round(car_position)), img.shape[0] - INTERSECTION_LINE), 5, (14, 34, 255), thickness=5) # # #cv2.imshow("Gray", gray) # cv2.imshow("Otzu", thr) # cv2.imshow("Img", img) # cv2.imshow("Adapt gaussian", th3) # cv2.imshow("Canny", edges) # cv2.imshow("CannyDilated", dilate) # cv2.imshow("Adapt mean erosion", th2erosion) # cv2.imshow("Adapt gaussian erosion", th3erosion) # cv2.imshow("erosion", erosion) cv2.imshow("Adapt mean", th2) cv2.moveWindow("Adapt mean", 1500, 100) cv2.waitKey(1) print(left) print(right) # TODO understand if this is good # if (left is None and right is not None) or (right is None and left is not None): # left, right = compute_lines_based_on_available(left,right) return left, right
import cv2 import thinning img = cv2.imread("./example.png") img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) thinned = thinning.guo_hall_thinning(img_gray) cv2.imwrite("./thinned.png", thinned)
def get_window_3_stroke(im, j, i, window_shape=(100, 100), scale_factor=10, show=False): """ a windowed function for the 3-stroke conversion params: lines get_curves(im) im image j, i window coordinates window_shape window dimensions (H, W) scale_factor scale the strokes show display the image and path method: - use Guo-Hall thinning to reduce to a skeleton - get all non-zero points in skeleton - use DBSCAN clustering to find clusters of neighbors - interpret each cluster as a "stroke" - use get_opt_path on each cluster to find best-fit line - simplify best-fitting lines with Ramer-Douglas-Peuker algorithm - convert lines to strokes returns: strokes pen-stroke format """ # preprocess window with Guo-Hall thinning window = im[j:j + window_shape[0], i:i + window_shape[1]] _, th = cv2.threshold(window, 127, 255, cv2.THRESH_BINARY_INV) window = guo_hall_thinning(th) points = np.argwhere(window.T != 0) # points = np.flip(points, 1) if len(points) == 0: return # segment graph into clusters using DBSCAN algorithm db = DBSCAN(eps=5) labels = db.fit_predict(points) lines = [] if show: plt.imshow(window, 'gray') # for each cluster, get the optimal path for label in set(labels): cluster = points[labels == label] if len(cluster) < 3: continue path = get_opt_path(cluster) line = rdp(cluster[path], epsilon=1) # line = cluster[path] if show: x, y = line.T plt.plot(x, y) # add line to lines lines.append(line) if show: plt.show() strokes = lines_to_strokes(lines) # normalize strokes if len(strokes) > 0: strokes[:, 0:2] /= scale_factor strokes[0] = [0, 0, 0] return strokes
for file_name in files: img = cv2.imread(file_name, cv2.COLOR_BGR2GRAY) #print np.min(img) img2 = img < 120 img2 = img2.astype(np.uint8) * 255 print img2.shape #print file_name cv2.imshow("Orig", img) #blur = cv2.GaussianBlur(img, (5, 5), 0) #ret3, th3 = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU) adap = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 5, 2) kernel = np.ones((2, 2), np.uint8) prev = adap.copy() cv2.imshow("Previo cierre", adap) #for _ in range(3): adap = cv2.morphologyEx(adap, cv2.MORPH_DILATE, kernel) thin = adap.copy() thinning.guo_hall_thinning(thin) cv2.imshow("Otsu", adap) cv2.imshow("skel guo and hall", thin) k = cv2.waitKey(0) if k & 0xFF == ord('q'): break cv2.destroyAllWindows()
def preprocessing(img): gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) gray_img = cv2.equalizeHist(gray_img) horizontal_grad = cv2.Scharr(gray_img, cv2.CV_32F, 1, 0) vertical_grad = cv2.Scharr(gray_img, cv2.CV_32F, 0, 1) # , ksize=3) gradient = np.sqrt(np.power(horizontal_grad, 2) + np.power(vertical_grad, 2)) _, th = cv2.threshold(gradient, 200, 255, cv2.THRESH_BINARY) binary = local_thresholding(th) kernel = np.ones((3, 3), np.uint8) dilated = cv2.dilate(binary, kernel, 1) erosion = cv2.erode(dilated, kernel, iterations=2) closed = cv2.morphologyEx(erosion, cv2.MORPH_CLOSE, kernel, iterations=1) thinned = thinning.guo_hall_thinning(closed.astype(np.uint8)) contours, hierarchy = cv2.findContours(thinned.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contours2 = [] # arc_lengths = [] for ele in contours: if len(ele) > 2: retval = cv2.arcLength(ele, False) # arc_lengths.append(retval) if retval > 160: contours2.append(ele) trimmed_binary = np.zeros(gray_img.shape, np.uint8) cv2.drawContours(trimmed_binary, contours2, -1, (255, 255, 255), 1) potential_corners = [] for i in xrange(1, trimmed_binary.shape[0] - 1): for j in xrange(1, trimmed_binary.shape[1] - 1): if trimmed_binary[i, j] > 200: counter = 0 if trimmed_binary[i - 1, j - 1] > 200: counter = counter + 1 if trimmed_binary[i - 1, j + 1] > 200: counter = counter + 1 if trimmed_binary[i + 1, j - 1] > 200: counter = counter + 1 if trimmed_binary[i + 1, j + 1] > 200: counter = counter + 1 if trimmed_binary[i, j + 1] > 200: counter = counter + 1 if trimmed_binary[i, j - 1] > 200: counter = counter + 1 if trimmed_binary[i + 1, j] > 200: counter = counter + 1 if trimmed_binary[i - 1, j] > 200: counter = counter + 1 if counter > 2: potential_corners.append([j, i]) p_corners = np.array(potential_corners) for ele in p_corners: cv2.circle(img, tuple(ele), 2, (0, 0, 255), 2) plt.subplot(2, 2, 1) plt.imshow(img, cmap='gray') plt.subplot(2, 2, 2) plt.imshow(thinned, cmap='gray') plt.subplot(2, 2, 3) plt.imshow(trimmed_binary, cmap='gray') plt.subplot(2, 2, 4) plt.imshow(gradient, cmap='gray') plt.show()
groupedList.append(tempLst) tempLst = [] tempLst.append(arr[i]) if (i == len(arr) - 1): groupedList.append(tempLst) return groupedList #Read the image, remove the noise, threshold the image to convert the image in binary format and invert it img = cv2.imread("hw.png", 0) img = cv2.fastNlMeansDenoising(img, h=20) img = cv2.threshold(img, 160, 255, cv2.THRESH_BINARY_INV)[1] #Thin the image using the guo_hall thinning algorithm img = thinning.guo_hall_thinning(img.copy()) #Keep track of the column indices with no white pixels and columns with one white pixel(ligature) zerosLst = [] onesLst = [] for i in range(img.shape[1]): if (np.sum(img[:, i]) == 0): zerosLst.append(i) elif (np.sum(img[:, i]) == 255): onesLst.append(i) #Group nearby columns from into a list(from 1-D list to list of lists) zerosLst = groupCols(zerosLst) onesLst = groupCols(onesLst) #Get the final indices of columns
import cv2 import thinning img = cv2.imread("./input1.JPG") constant = cv2.copyMakeBorder( img, 2, 2, 2, 2, cv2.BORDER_CONSTANT ) # adding 1 pixel thick border since image touches the existing # boundary img_gray = cv2.cvtColor(constant, cv2.COLOR_BGR2GRAY) thinned = thinning.guo_hall_thinning(img_gray) cv2.imwrite("skelta/thinned2.png", thinned) # this is the most suitable code for the project