def contourBtn(self): """ Obtain the contour of image :return: """ print("Contour button clicked") contour_ = getContourOfImage(self.image_gray) # remove the break points contour_ = removeBreakPointsOfContour(contour_) self.contour_gray = contour_.copy() qimg = QImage(contour_.data, contour_.shape[1], contour_.shape[0], contour_.shape[1], QImage.Format_Indexed8) self.contour_pix = QPixmap.fromImage(qimg) self.temp_contour_pix = self.contour_pix.copy() self.scene.addPixmap(self.contour_pix) self.scene.update() self.statusbar.showMessage("Contour successed!") del contour_, qimg
cv2.line(img_rbg, (x, y), (x + w, y + h), (0, 0, 255), 2) # cog cog_x, cog_y = getCenterOfGravity(img_bit) print(cog_x, cog_y) img_cog = cv2.circle(img_cog, (cog_x, cog_y), 2, (0, 0, 255), 10) # skeleton img_skel = cv2.imread("skel.png", 0) _, img_skel = cv2.threshold(img_skel, 220, 255, cv2.THRESH_BINARY) img_stroke = cv2.imread("stroke_2.png", 0) _, img_stroke = cv2.threshold(img_stroke, 127, 255, cv2.THRESH_BINARY) contours = getContourOfImage(img_stroke) for y in range(contours.shape[0]): for x in range(contours.shape[1]): if contours[y][x] != 255: img_skel[y][x] = contours[y][x] # stroke features img_sk_rgb = cv2.cvtColor(img_stroke, cv2.COLOR_GRAY2RGB) img_sk_mini = img_sk_rgb.copy() img_sk_ar = img_sk_rgb.copy() # img_sk_cog = img_sk_rgb.copy() x, y, w, h = getSingleMaxBoundingBoxOfImage(img_stroke) cv2.rectangle(img_sk_mini, (x, y), (x + w, y + h), (0, 255, 0), 2) img_sk_cog = img_sk_mini.copy()
def autoSmoothContoursOfComponent(component, blockSize=3, ksize=3, k=0.04): """ :param component: :return: """ if component is None: return # 5. Using corner detection to get corner regions corner_component = np.float32(component) dst = cv2.cornerHarris(corner_component, blockSize=blockSize, ksize=ksize, k=k) dst = cv2.dilate(dst, None) corners_area_points = [] for y in range(dst.shape[0]): for x in range(dst.shape[1]): if dst[y][x] > 0.1 * dst.max(): corners_area_points.append((x, y)) print("corner area points num: %d" % len(corners_area_points)) # 6. Determine center points of corner areas blank_gray = createBlankGrayscaleImage(component) for pt in corners_area_points: blank_gray[pt[1]][pt[0]] = 0.0 rectangles = getAllMiniBoundingBoxesOfImage(blank_gray) corners_area_center_points = [] for rect in rectangles: corners_area_center_points.append( (rect[0] + int(rect[2] / 2.), rect[1] + int(rect[3] / 2.))) print("corner area center points num: %d" % len(corners_area_center_points)) # based the distance to end points and cross points, remove extra corners area center points component_skeleton = getSkeletonOfImage(component) end_points = getEndPointsOfSkeletonLine(component_skeleton) cross_points = getCrossPointsOfSkeletonLine(component_skeleton) # remove extra branches # img_skeleton = removeBranchOfSkeletonLine(img_skeleton, end_points, cross_points) # end_points = getEndPointsOfSkeletonLine(img_skeleton) # cross_points = getEndPointsOfSkeletonLine(img_skeleton) # detect valid corner region center points closed to end points and cross points valid_corners_area_center_points = [] dist_threshold = 40 for pt in corners_area_center_points: is_valid = False for ept in end_points: dist = math.sqrt((pt[0] - ept[0])**2 + (pt[1] + ept[1])**2) if dist <= dist_threshold: is_valid = True break if is_valid: valid_corners_area_center_points.append(pt) continue for cpt in cross_points: dist = math.sqrt((pt[0] - cpt[0])**2 + (pt[1] - cpt[1])**2) if dist <= dist_threshold: is_valid = True break if is_valid: valid_corners_area_center_points.append(pt) print("valid corner area center points num: %d" % len(valid_corners_area_center_points)) del blank_gray # 7. Get all contours of component component_contours = getContourOfImage(component) contours = getConnectedComponents(component_contours, connectivity=8) print("contours num: %d" % len(contours)) # 8. Process contours to get closed and 1-pixel width contours contours_processed = [] for cont in contours: cont = removeBreakPointsOfContour(cont) contours_processed.append(cont) print("contours processed num: %d" % len(contours_processed)) # 9. Find corner points of conthours closed to corner region center points. For each contour, there is a coner points list. contours_corner_points = [] for i in range(len(contours_processed)): corner_points = [] contour = contours_processed[i] for pt in valid_corners_area_center_points: x0 = target_x = pt[0] y0 = target_y = pt[1] min_dist = 10000 # search target point in region: 20 * 20 of center is (x0, y0) for y in range(y0 - 10, y0 + 10): for x in range(x0 - 10, x0 + 10): if contour[y][x] == 255: continue dist = math.sqrt((x - x0)**2 + (y - y0)**2) if dist < min_dist: min_dist = dist target_x = x target_y = y if min_dist < 5: corner_points.append((target_x, target_y)) contours_corner_points.append(corner_points) total_num = 0 for cont in contours_corner_points: total_num += len(cont) if total_num == len(valid_corners_area_center_points): print("corner points not ignored") else: print("corner points be ignored") # 10. Separate contours into sub-contours based on the corner points on different contours sub_contours = [] for i in range(len(contours_processed)): contour = contours_processed[i] corner_points = contours_corner_points[i] # sorted the contour contour_points_sorted = sortPointsOnContourOfImage(contour) # sorted the corner points corner_points_sorted = [] for pt in contour_points_sorted: if pt in corner_points: corner_points_sorted.append(pt) # sepate the contour into sub-contour for j in range(len(corner_points_sorted)): start_pt = corner_points_sorted[j] end_pt = None if j == len(corner_points_sorted) - 1: end_pt = corner_points_sorted[0] else: end_pt = corner_points_sorted[j + 1] # find indexes of start point and end point in contour_points_sorted start_index = contour_points_sorted.index(start_pt) end_index = contour_points_sorted.index(end_pt) # separate sub_contour = None if start_index <= end_index: if end_index == len(contour_points_sorted) - 1: sub_contour = contour_points_sorted[ start_index:len(contour_points_sorted)] sub_contour.append(contour_points_sorted[0]) else: sub_contour = contour_points_sorted[start_index:end_index + 1] else: sub_contour = contour_points_sorted[ start_index:len(contour_points_sorted )] + contour_points_sorted[0:end_index + 1] sub_contours.append(sub_contour) print("sub contours num: %d" % len(sub_contours)) # 11. Beizer curve fit all sub-contours under maximal error max_error = 100 sub_contours_smoothed = [] for id in range(len(sub_contours)): # single sub-contour sub_contour = np.array(sub_contours[id]) if len(sub_contour) < 2: continue beziers = fitCurve(sub_contour, maxError=max_error) sub_contour_smoothed = [] for bez in beziers: bezier_points = draw_cubic_bezier(bez[0], bez[1], bez[2], bez[3]) sub_contour_smoothed += bezier_points sub_contours_smoothed.append(sub_contour_smoothed) # 12. Merge sub-contours together img_smoothed_gray = createBlankGrayscaleImage(component) # merge all smoothed sub-contours for sub in sub_contours_smoothed: for pt in sub: img_smoothed_gray[pt[1]][pt[0]] = 0.0 # process smoothed contours to get closed and 1-pixel width img_smoothed_gray = getSkeletonOfImage(img_smoothed_gray) # remove single points that 8 cv2.imshow("img_smoothed_gray", img_smoothed_gray) contours_smoothed = getConnectedComponents(img_smoothed_gray) if len(contours_smoothed) == 1: # no hole exist, directly fill black in the contour cont = contours_smoothed[0] cont_points = sortPointsOnContourOfImage(cont) cont_points = np.array([cont_points], "int32") fill_contour_smooth = np.ones_like(component) * 255 fill_contour_smooth = np.array(fill_contour_smooth, dtype=np.uint8) fill_contour_smooth = cv2.fillPoly(fill_contour_smooth, cont_points, 0) return fill_contour_smooth else: # exist hole, should processed print("there are holes!") fill_img_list = [] hole_points = [] for cont in contours_smoothed: cont_points = sortPointsOnContourOfImage(cont) cont_points = np.array([cont_points], "int32") fill_contour_smooth = np.ones_like(component) * 255 fill_contour_smooth = np.array(fill_contour_smooth, dtype=np.uint8) fill_contour_smooth = cv2.fillPoly(fill_contour_smooth, cont_points, 0) valid_num = same_num = 0 for y in range(component.shape[0]): for x in range(component.shape[1]): if component[y][x] == 0.0: valid_num += 1 if fill_contour_smooth[y][x] == 0.0: same_num += 1 if 1.0 * same_num / valid_num > 0.8: fill_img_list.append(fill_contour_smooth) print("ratio: %f" % (1.0 * same_num / valid_num)) else: print("ratio: %f" % (1.0 * same_num / valid_num)) for y in range(fill_contour_smooth.shape[0]): for x in range(fill_contour_smooth.shape[1]): if fill_contour_smooth[y][x] == 0.0: hole_points.append((x, y)) # merge filled images blank_temp = np.ones_like(component) * 255 for fl in fill_img_list: for y in range(fl.shape[0]): for x in range(fl.shape[1]): if fl[y][x] == 0.0: blank_temp[y][x] = fl[y][x] # hole points for pt in hole_points: blank_temp[pt[1]][pt[0]] = 255 return blank_temp
import cv2 import numpy as np from algorithms.RDP import rdp from matplotlib import pyplot as plt from utils.Functions import getContourOfImage, getSkeletonOfImage, createBlankGrayscaleImage, getConnectedComponents,\ sortPointsOnContourOfImage, removeBreakPointsOfContour path = "../test_images/page1_char_3.png" img = cv2.imread(path, 0) _, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY) img_contour = getContourOfImage(img) # # dft1 = cv2.dft(np.float32(img_contour), flags=cv2.DFT_COMPLEX_OUTPUT) # dft_shift1 = np.fft.fftshift(dft1) dft_basic = cv2.dft(np.float32(img), flags=cv2.DFT_COMPLEX_OUTPUT) dft_basic = np.fft.fftshift(dft_basic) # # dft_basic += dft1 dft_ift = np.fft.ifftshift(dft_basic) img_reverse = cv2.idft(dft_ift) img_reverse = cv2.magnitude(img_reverse[:, :, 0], img_reverse[:, :, 1]) magnitude_spectrum = 20 * np.log( cv2.magnitude(dft_basic[:, :, 0], dft_basic[:, :, 1])) plt.subplot(121), plt.imshow(img_reverse, cmap='gray') plt.title('Input Image'), plt.xticks([]), plt.yticks([])
def main(): src_path = "../strokes/src_strokes7.png" tag_path = "../strokes/tag_strokes7.png" src_img = cv2.imread(src_path, 0) tag_img = cv2.imread(tag_path, 0) # threshold _, src_img = cv2.threshold(src_img, 127, 255, cv2.THRESH_BINARY) _, tag_img = cv2.threshold(tag_img, 127, 255, cv2.THRESH_BINARY) # get the minimum bounding boxs src_box = getSingleMaxBoundingBoxOfImage(src_img) tag_box = getSingleMaxBoundingBoxOfImage(tag_img) # get the region of strokes src_region = src_img[src_box[1] - 5:src_box[1] + src_box[3] + 5, src_box[0] - 5:src_box[0] + src_box[2] + 5] tag_region = tag_img[tag_box[1] - 5:tag_box[1] + tag_box[3] + 5, tag_box[0] - 5:tag_box[0] + tag_box[2] + 5] # get the contour of storkes based on the Canny algorithm src_edge = getContourOfImage(src_region) tag_edge = getContourOfImage(tag_region) cv2.imshow("src edge", src_edge) cv2.imshow("tag edge", tag_edge) # get the skeletons of strokes based on the thinning algorithm src_img_ = src_region != 255 tag_img_ = tag_region != 255 src_skel = skeletonize(src_img_) tag_skel = skeletonize(tag_img_) src_skel = (1 - src_skel) * 255 tag_skel = (1 - tag_skel) * 255 src_skel = np.array(src_skel, dtype=np.uint8) tag_skel = np.array(tag_skel, dtype=np.uint8) src_end_points = getEndPointsOfSkeletonLine(src_skel) tag_end_points = getEndPointsOfSkeletonLine(tag_skel) src_cross_points = getCrossPointsOfSkeletonLine(src_skel) tag_cross_points = getCrossPointsOfSkeletonLine(tag_skel) # if len(src_cross_points) > 0: # # exist branches # src_skel = removeBranchOfSkeletonLine(src_skel, src_end_points, src_cross_points) # # if len(tag_cross_points) > 0: # # exist branches # tag_skel = removeBranchOfSkeletonLine(tag_skel, tag_end_points, tag_cross_points) cv2.imshow("src skel", src_skel) cv2.imshow("tag skel", tag_skel) # split the strokes based on the rule: begin, middle and the end parts. src_regions = splitStrokes(src_region, type="LongHeng") tag_regions = splitStrokes(tag_region, type="LongHeng") print('len src regions: %d' % len(src_regions)) print('len tag regions: %d' % len(tag_regions)) cv2.imshow('src begin', src_regions[0]) cv2.imshow('src middle', src_regions[1]) cv2.imshow('src end', src_regions[2]) cv2.imshow("src", src_region) cv2.imshow("tag", tag_region) cv2.waitKey(0) cv2.destroyAllWindows()
continue for cpt in cross_points: dist = math.sqrt((pt[0] - cpt[0])**2 + (pt[1] - cpt[1])**2) if dist <= dist_threshold: is_valid = True break if is_valid: valid_corners_area_center_points.append(pt) print("valid corner area center points num: %d" % len(valid_corners_area_center_points)) del blank_gray # 7. Get all contours of component component_contours = getContourOfImage(component) contours = getConnectedComponents(component_contours, connectivity=8) print("contours num: %d" % len(contours)) # 8. Process contours to get closed and 1-pixel width contours contours_processed = [] for cont in contours: cont = removeBreakPointsOfContour(cont) contours_processed.append(cont) print("contours processed num: %d" % len(contours_processed)) # 9. Find corner points of conthours closed to corner region center points. For each contour, there is a coner points list. contours_corner_points = [] for i in range(len(contours_processed)): corner_points = [] contour = contours_processed[i]
def main(): # 0107亻 1133壬 0554十 0427凹 path = "0554十.jpg" # open image img = cv2.imread(path, 0) _, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY) img_rgb = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) # contour without break points contour = getContourOfImage(img.copy()) contour = removeBreakPointsOfContour(contour) contour_rgb = cv2.cvtColor(contour, cv2.COLOR_GRAY2RGB) contours = splitConnectedComponents(contour) print("contours num: %d" % len(contours)) contours_sorted = [] for cont in contours: points = sortPointsOnContourOfImage(cont) print("points num: %d" % len(points)) contours_sorted.append(points) contour_points = [] for y in range(contour.shape[0]): for x in range(contour.shape[1]): if contour[y][x] == 0.0: # black points contour_points.append((x, y)) print("contour points num:%d" % len(contour_points)) # skeleton without extra branches skeleton = getSkeletonOfImage(img.copy()) # remove extra branches end_points = getEndPointsOfSkeletonLine(skeleton) cross_points = getCrossPointsOfSkeletonLine(skeleton) print("originale end: %d and cross: %d" % (len(end_points), len(cross_points))) skeleton_nobranches = removeBranchOfSkeletonLine(skeleton.copy(), end_points, cross_points) skeleton = skeleton_nobranches # new end points and cross points end_points = getEndPointsOfSkeletonLine(skeleton) cross_points = getCrossPointsOfSkeletonLine(skeleton) cross_points_bk = cross_points.copy() # merge the close points cross_points_merged = [] cross_distance_threshold = 10 used_index = [] for i in range(len(cross_points)): if i in used_index: continue pt1 = cross_points[i] midd_pt = None used_index.append(i) for j in range(len(cross_points)): if i == j or j in used_index: continue pt2 = cross_points[j] dist = math.sqrt((pt2[0] - pt1[0])**2 + (pt2[1] - pt1[1])**2) if dist < cross_distance_threshold: used_index.append(j) offset = (pt1[0] - pt2[0], pt1[1] - pt2[1]) print(offset) midd_pt = (pt2[0] + int(offset[0] / 2.), pt2[1] + int(offset[1] / 2.0)) if skeleton[midd_pt[1]][midd_pt[0]] == 0.0: cross_points_merged.append(midd_pt) else: min_distance = 100000000 current_pt = None for y in range(skeleton.shape[0]): for x in range(skeleton.shape[1]): if skeleton[y][x] == 0: dist = math.sqrt((midd_pt[0] - x)**2 + (midd_pt[1] - y)**2) if dist < min_distance: min_distance = dist current_pt = (x, y) if current_pt: cross_points_merged.append(current_pt) print("After merge cross points num: %d" % len(cross_points_merged)) cross_points = cross_points_merged print("After end: %d and cross: %d" % (len(end_points), len(cross_points))) skeleton_rgb = cv2.cvtColor(skeleton, cv2.COLOR_GRAY2RGB) # display all end points for pt in end_points: skeleton_rgb[pt[1]][pt[0]] = (0, 0, 255) for pt in cross_points: skeleton_rgb[pt[1]][pt[0]] = (0, 255, 0) for pt in cross_points_bk: skeleton_rgb[pt[1]][pt[0]] = (0, 0, 255) # all corner points on contour img = np.float32(img.copy()) dst = cv2.cornerHarris(img, 3, 3, 0.03) dst = cv2.dilate(dst, None) corners_area_points = [] for y in range(dst.shape[0]): for x in range(dst.shape[1]): if dst[y][x] > 0.1 * dst.max(): corners_area_points.append((x, y)) # show the corner points for pt in corners_area_points: if img[pt[1]][pt[0]] == 0: img_rgb[pt[1]][pt[0]] = (0, 255, 0) else: img_rgb[pt[1]][pt[0]] = (0, 0, 255) # all corner area points on the contour corners_lines_points = [] for pt in corners_area_points: if pt in contour_points: corners_lines_points.append(pt) for pt in corners_lines_points: contour_rgb[pt[1]][pt[0]] = (0, 255, 0) # merge points of corner points corners_merged_points = [] for contour_sorted in contours_sorted: i = 0 while True: midd_index = -1 pt = contour_sorted[i] if pt in corners_lines_points: # red point start = i end = start while True: end += 1 if end >= len(contour_sorted): break # next point next_pt = contour_sorted[end] if next_pt in corners_lines_points: # red point continue else: # black point break end -= 1 midd_index = start + int((end - start) / 2.0) i = end i += 1 if i >= len(contour_sorted): break if midd_index != -1: corners_merged_points.append(contour_sorted[midd_index]) print("After merged, corner points num: %d" % len(corners_merged_points)) for pt in corners_merged_points: contour_rgb[pt[1]][pt[0]] = (0, 0, 255) # remove the no-corner points corners_points = [] threshold_distance = 30 for pt in corners_merged_points: dist_cross = min_distance_point2pointlist(pt, cross_points) dist_end = min_distance_point2pointlist(pt, end_points) if dist_cross < threshold_distance and dist_end > threshold_distance / 3.: corners_points.append(pt) print("corner pints num: %d" % len(corners_points)) for pt in corners_points: contour_rgb[pt[1]][pt[0]] = (255, 0, 0) # segment contour to sub-contours based on the corner points def segmentContourBasedOnCornerPoints(contour_sorted, corner_points): """ Segment contour to sub-contours based on the corner points :param contour_sorted: :param corner_points: :return: """ if contour_sorted is None or corner_points is None: return # sub conotour index sub_contour_index = [] for pt in corner_points: index = contour_sorted.index(pt) sub_contour_index.append(index) print("sub contour index num: %d" % len(sub_contour_index)) sub_contours = [] for i in range(len(sub_contour_index)): if i == len(sub_contour_index) - 1: sub_contour = contour_sorted[sub_contour_index[i]:len( contour_sorted)] + contour_sorted[0:sub_contour_index[0] + 1] else: sub_contour = contour_sorted[ sub_contour_index[i]:sub_contour_index[i + 1] + 1] sub_contours.append(sub_contour) print("sub contours num: %d" % len(sub_contours)) return sub_contours # segment contour to sub-contours for contour in contours: cont_sorted = sortPointsOnContourOfImage(contour) sub_contours = segmentContourBasedOnCornerPoints( cont_sorted, corners_points) # cluster corner points corner_points_cluster = [] used_index = [] colinear_couple = [] for i in range(len(corners_points)): if i in used_index: continue for j in range(len(corners_points)): if i == j or j in used_index: continue min_offset = min(abs(corners_points[i][0] - corners_points[j][0]), abs(corners_points[i][1] - corners_points[j][1])) if min_offset < 20: couple = [corners_points[i], corners_points[j]] colinear_couple.append(couple) used_index.append(j) print("co linear num: %d" % len(colinear_couple)) print("sub contours num: %d" % len(sub_contours)) stroke1_img = np.ones_like(contour) * 255 stroke1_img = np.array(stroke1_img, dtype=np.uint8) stroke1_img_rgb = cv2.cvtColor(stroke1_img, cv2.COLOR_GRAY2RGB) for pt in sub_contours[0]: stroke1_img_rgb[pt[1]][pt[0]] = (0, 0, 0) stroke1_img[pt[1]][pt[0]] = 0 for pt in sub_contours[2]: stroke1_img_rgb[pt[1]][pt[0]] = (0, 0, 0) stroke1_img[pt[1]][pt[0]] = 0 cv2.line(stroke1_img_rgb, sub_contours[0][0], sub_contours[2][-1], (0, 0, 255), 1) cv2.line(stroke1_img_rgb, sub_contours[0][-1], sub_contours[2][0], (0, 0, 255), 1) cv2.line(stroke1_img, sub_contours[0][0], sub_contours[2][-1], 0, 1) cv2.line(stroke1_img, sub_contours[0][-1], sub_contours[2][0], 0, 1) stroke2_img = np.ones_like(contour) * 255 stroke2_img = np.array(stroke2_img, dtype=np.uint8) stroke2_img_rgb = cv2.cvtColor(stroke2_img, cv2.COLOR_GRAY2RGB) for pt in sub_contours[1]: stroke2_img_rgb[pt[1]][pt[0]] = (0, 0, 0) stroke2_img[pt[1]][pt[0]] = 0 for pt in sub_contours[3]: stroke2_img_rgb[pt[1]][pt[0]] = (0, 0, 0) stroke2_img[pt[1]][pt[0]] = 0 cv2.line(stroke2_img_rgb, sub_contours[1][0], sub_contours[3][-1], (0, 0, 255), 1) cv2.line(stroke2_img_rgb, sub_contours[1][-1], sub_contours[3][0], (0, 0, 255), 1) cv2.line(stroke2_img, sub_contours[1][0], sub_contours[3][-1], 0, 1) cv2.line(stroke2_img, sub_contours[1][-1], sub_contours[3][0], 0, 1) storke1_points = sortPointsOnContourOfImage(stroke1_img) stroke2_points = sortPointsOnContourOfImage(stroke2_img) stroke1_img = np.ones_like(stroke1_img) * 255 stroke1_img = np.array(stroke1_img, dtype=np.uint8) storke1_points = np.array([storke1_points], "int32") cv2.fillPoly(stroke1_img, storke1_points, 0) stroke2_img = np.ones_like(stroke2_img) * 255 stroke2_img = np.array(stroke2_img, dtype=np.uint8) storke2_points = np.array([stroke2_points], "int32") cv2.fillPoly(stroke2_img, storke2_points, 0) # find corresponding sub-contours based on the co-linear couple # for sub in sub_contours: # pt1 = sub[0] # pt2 = sub[-1] # # couples = [] # for coup in colinear_couple: # if pt1 in coup or pt2 in coup: # # if 4 points, 2 points should be in same sub-contour # if pt1 in coup and pt2 in coup: # continue # couples.append(coup) # print("sub couples num: %d" % len(couples)) # cv2.imshow("img rgb", img_rgb) # cv2.imshow("skeleton", skeleton) # cv2.imshow("skeleton no branches", skeleton_nobranches ) cv2.imshow("skeleton rgb", skeleton_rgb) cv2.imshow("contour rgb", contour_rgb) cv2.imshow("stroke 1", stroke1_img) cv2.imshow("stroke 2", stroke2_img) cv2.imshow("stroke1rgb", stroke1_img_rgb) cv2.imshow("stroke2rgb", stroke2_img_rgb) # for i in range(len(contours)): # cv2.imshow("contour %d" % i, contours[i]) cv2.waitKey(0) cv2.destroyAllWindows()
def main(): # load image img_path = "../templates/stroke_dan.png" img = cv2.imread(img_path, 0) _, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY) img_rgb = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) # get contour of image contour = getContourOfImage(img) contour_rgb = cv2.cvtColor(contour, cv2.COLOR_GRAY2RGB) # fix breaking points on the contour break_points = [] for y in range(1, contour.shape[0] - 1): for x in range(1, contour.shape[1] - 1): if contour[y][x] == 0.0: num_ = getNumberOfValidPixels(contour, x, y) if num_ == 1: print((x, y)) break_points.append((x, y)) if len(break_points) != 0: contour = cv2.line(contour, break_points[0], break_points[1], color=0, thickness=1) cv2.imshow("c", contour) # order the contour points contour_points_ordered = sortPointsOnContourOfImage(contour) # contour_points_counter_clockwise = order_points(contour, isClockwise=False) print("number of points in ordered contour: %d" % len(contour_points_ordered)) # print("counter clock: %d" % len(contour_points_counter_clockwise)) contour_rgb_clock = contour_rgb.copy() contour_smooth_rgb_clock = contour_rgb.copy() # contour_rgb_counter_clock = contour_rgb.copy() # get key points on contour corners = cv2.goodFeaturesToTrack(contour, 6, 0.01, 10) corners = np.int0(corners) print("number of key points on contour: %d" % len(corners)) index = 0 corner_points_ = [] for i in corners: MAX_DIST = 10000 x, y = i.ravel() pt_ = None if (x, y - 1) in contour_points_ordered: pt_ = (x, y - 1) elif (x + 1, y - 1) in contour_points_ordered: pt_ = (x + 1, y - 1) elif (x + 1, y) in contour_points_ordered: pt_ = (x + 1, y) elif (x + 1, y + 1) in contour_points_ordered: pt_ = (x + 1, y + 1) elif (x, y + 1) in contour_points_ordered: pt_ = (x, y + 1) elif (x - 1, y + 1) in contour_points_ordered: pt_ = (x - 1, y + 1) elif (x - 1, y) in contour_points_ordered: pt_ = (x - 1, y) elif (x - 1, y - 1) in contour_points_ordered: pt_ = (x - 1, y - 1) else: # find the nearest point on the contour minx = 0 miny = 0 for cp in contour_points_ordered: dist = math.sqrt((x - cp[0])**2 + (y - cp[1])**2) if dist < MAX_DIST: MAX_DIST = dist minx = cp[0] miny = cp[1] pt_ = (minx, miny) corner_points_.append(pt_) cv2.circle(contour_rgb, (pt_[0], pt_[1]), 1, (0, 0, 255), -1) cv2.putText(contour_rgb, str(index), (pt_[0], pt_[1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2, cv2.LINE_AA) index += 1 print("orignal corner points number: %d" % len(corner_points_)) # order the corner points in the clockwise direction corner_points = [] index = 0 for pt in contour_points_ordered: if pt in corner_points_: corner_points.append(pt) cv2.circle(contour_rgb_clock, (pt[0], pt[1]), 3, (255, 0, 0), -1) cv2.putText(contour_rgb_clock, str(index), (pt[0], pt[1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2, cv2.LINE_AA) index += 1 print("corner points len: %d" % len(corner_points)) # contour segmentation based on the corner points contour_lines = [] for id in range(len(corner_points)): start_point = corner_points[id] end_point = start_point if id == len(corner_points) - 1: end_point = corner_points[0] else: end_point = corner_points[id + 1] # contour segmentation contour_segmentation = [] start_index = contour_points_ordered.index(start_point) end_index = contour_points_ordered.index(end_point) if start_index <= end_index: # normal index contour_segmentation = contour_points_ordered[ start_index:end_index + 1] else: # end is at contour_segmentation = contour_points_ordered[start_index: len(contour_points_ordered)] + \ contour_points_ordered[0: end_index+1] contour_lines.append(contour_segmentation) print("number of contour segmentation: %d" % len(contour_lines)) # use different color to show the contour segmentation for id in range(len(contour_lines)): if id % 3 == 0: # red lines for pt in contour_lines[id]: contour_rgb_clock[pt[1]][pt[0]] = (0, 0, 255) elif id % 3 == 1: # blue line for pt in contour_lines[id]: contour_rgb_clock[pt[1]][pt[0]] = (255, 0, 0) elif id % 3 == 2: # green line for pt in contour_lines[id]: contour_rgb_clock[pt[1]][pt[0]] = (0, 255, 0) # original and smooth contour smoothed_contour_points = [] for id in range(len(contour_lines)): print("line index: %d" % id) # original contour for pt in contour_lines[id]: contour_smooth_rgb_clock[pt[1]][pt[0]] = (0, 0, 255) # smooth contour li_points = np.array(contour_lines[id]) beziers = fitCurve(li_points, maxError=30) print("len bezier: %d" % len(beziers)) # # print(beziers) for bez in beziers: print(len(bez)) bezier_points = draw_cubic_bezier(bez[0], bez[1], bez[2], bez[3]) for id in range(len(bezier_points) - 1): start_pt = bezier_points[id] end_pt = bezier_points[id + 1] cv2.line(contour_smooth_rgb_clock, start_pt, end_pt, (255, 0, 0)) smoothed_contour_points += bezier_points # fill color in contour with sorted smooth contour points print(len(smoothed_contour_points)) smoothed_contour_points = np.array([smoothed_contour_points], "int32") fill_contour_smooth = np.ones(img.shape) * 255 fill_contour_smooth = np.array(fill_contour_smooth, dtype=np.uint8) fill_contour_smooth = cv2.fillPoly(fill_contour_smooth, smoothed_contour_points, 0) cv2.imshow("src", img) cv2.imshow("contour", contour) # cv2.imshow("corners", contour_rgb) cv2.imshow("contour clock", contour_rgb_clock) cv2.imshow("smooth contour clock", contour_smooth_rgb_clock) # cv2.imshow("contour counter clock", contour_rgb_counter_clock) cv2.imshow("fill contour", fill_contour_smooth) cv2.waitKey(0) cv2.destroyAllWindows()
def autoSmoothContoursOfComponent(component, eplison=10, max_error=200): """ Automatically smooth the contours of component. :param component: :return: """ if component is None: return # 5. Get contours of this component component_contours = getContourOfImage(component) contours = getConnectedComponents(component_contours, connectivity=8) print("contours num: ", len(contours)) # 6. Process contours to get closed and 1-pixel width contours by removing break points contours_processed = [] for cont in contours: cont = removeBreakPointsOfContour(cont) contours_processed.append(cont) print("contours processed num: %d" % len(contours_processed)) # 7. Smooth contours with RDP and cubic bezeir fit curve contours_smoothed = [] for cont in contours_processed: cont_smoothed = [] # sorted points on contour cont_sorted = sortPointsOnContourOfImage(cont) # simplify contour with RDP cont_simp = rdp(cont_sorted, eplison) print("cont simp num: ", len(cont_simp)) # split contour into sub-contours for i in range(len(cont_simp) - 1): start_pt = cont_simp[i] end_pt = cont_simp[i + 1] start_index = cont_sorted.index(start_pt) end_index = cont_sorted.index(end_pt) sub_cont_points = np.array(cont_sorted[start_index:end_index + 1]) beziers = fitCurve(sub_cont_points, maxError=max_error) for bez in beziers: bezier_points = draw_cubic_bezier(bez[0], bez[1], bez[2], bez[3]) cont_smoothed += bezier_points contours_smoothed.append(cont_smoothed) print("contours smoothed num: ", len(contours_smoothed)) # fill black color in contour area if len(contours_smoothed) == 1: # no hole exist, directly fill black in the contour cont = contours_smoothed[0] # cont_points = sortPointsOnContourOfImage(cont) cont_points = np.array([cont], "int32") fill_contour_smooth = np.ones_like(component) * 255 fill_contour_smooth = np.array(fill_contour_smooth, dtype=np.uint8) fill_contour_smooth = cv2.fillPoly(fill_contour_smooth, cont_points, 0) return fill_contour_smooth else: # exist hole, should processed print("there are holes!") fill_img_list = [] hole_points = [] for cont in contours_smoothed: # cont_points = sortPointsOnContourOfImage(cont) cont_points = np.array([cont], "int32") fill_contour_smooth = np.ones_like(component) * 255 fill_contour_smooth = np.array(fill_contour_smooth, dtype=np.uint8) fill_contour_smooth = cv2.fillPoly(fill_contour_smooth, cont_points, 0) valid_num = same_num = 0 for y in range(component.shape[0]): for x in range(component.shape[1]): if component[y][x] == 0.0: valid_num += 1 if fill_contour_smooth[y][x] == 0.0: same_num += 1 if 1.0 * same_num / valid_num > 0.8: fill_img_list.append(fill_contour_smooth) print("ratio: %f" % (1.0 * same_num / valid_num)) else: print("ratio: %f" % (1.0 * same_num / valid_num)) for y in range(fill_contour_smooth.shape[0]): for x in range(fill_contour_smooth.shape[1]): if fill_contour_smooth[y][x] == 0.0: hole_points.append((x, y)) # merge filled images blank_temp = np.ones_like(component) * 255 for fl in fill_img_list: for y in range(fl.shape[0]): for x in range(fl.shape[1]): if fl[y][x] == 0.0: blank_temp[y][x] = fl[y][x] # hole points for pt in hole_points: blank_temp[pt[1]][pt[0]] = 255 return blank_temp