def dewarp_book(image): """Fix and image warp (dewarp an image). Parameters ---------- image : numpy ndarray The input image. Returns ------- numpy ndarray The dewarped image. """ # get input image ration to keep best output resolution quality ratio = image.shape[0] / 500.0 # copy source image for filter operations orig = image.copy() # resize the input image image = imutils.resize(image, height=500) # convert rgb input image to grayscale gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (5, 5), 0) # sigma parameter is used for automatic Canny edge detection sigma = 0.33 # compute the median of the single channel pixel intensities v = np.median(image) # apply automatic Canny edge detection using the computed median lower = int(max(0, (1.0 - sigma) * v)) upper = int(min(255, (1.0 + sigma) * v)) edged = cv2.Canny(image, lower, upper) # perform dilate morphological filter to connect teh image pixel points '''kernel = np.ones((5,5),np.uint8) edged = cv2.dilate(edged,kernel,iterations = 1)''' # find contours cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) cnts = imutils.grab_contours(cnts) cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5] # loop over the contours for c in cnts: peri = cv2.arcLength(c, True) approx = cv2.approxPolyDP(c, 0.02 * peri, True) if len(approx) == 4: screenCnt = approx break # apply the four point transform for book dewarping warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio) return warped
cv2.line(image, pts[0], pts[2], (0, 255, 0), thickness=2) cv2.line(image, pts[1], pts[3], (0, 255, 0), thickness=2) cv2.line(image, pts[2], pts[3], (0, 255, 0), thickness=2) pointIndex = pointIndex + 1 def show_window(): while True: # print(pts,pointIndex-1) cv2.imshow("img", image) if pointIndex == 4: break if cv2.waitKey(20) & 0xFF == 27: break cv2.namedWindow("img") cv2.setMouseCallback("img", draw_circle) show_window() warped, temp, transformation_matrix = four_point_transform( image, np.array(pts[:4], dtype="float32"), pts[4]) np.save("transormation_matrix.npy", transformation_matrix) cv2.circle(warped, tuple(temp[0][0]), 1, (0, 255, 0), -1) # show the original and warped images cv2.imshow("Original", image) # cv2.imshow("Warped", warped) #Uncommment if you want to see the transformed image cv2.waitKey(0)
peri = cv2.arcLength(c, True) approx = cv2.approxPolyDP(c, 0.02 * peri, True) # if our approximated contour has four points, then we # can assume that we have found our screen if len(approx) == 4: screenCnt = approx break # show the contour (outline) of the piece of paper # print("STEP 2: Find contours of paper") # cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2) # cv2.imshow("Outline", image) # cv2.waitKey(0) # cv2.destroyAllWindows() # apply the four point transform to obtain a top-down # view of the original image warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio) # convert the warped image to grayscale, then threshold it # to give it that 'black and white' paper effect warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY) T = threshold_local(warped, 11, offset=10, method="gaussian") warped = (warped > T).astype("uint8") * 255 # show the original and scanned images print("STEP 3: Apply perspective transform") # cv2.imshow("Original", imutils.resize(orig, height = 650)) cv2.imshow("Scanned", imutils.resize(warped, height=650)) cv2.waitKey(0)