def solve_maze(image): try: gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) thresholded_image = utils.adaptive_threshold(gray_image, cv2.THRESH_BINARY_INV) print('Finding Contours') contours, _ = cv2.findContours(thresholded_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) solution_image = np.zeros(gray_image.shape, dtype=np.uint8) cv2.drawContours(solution_image, contours, 0, (255, 255, 255), 5) kernel = np.ones((15, 15), dtype=np.uint8) solution_image = cv2.dilate(solution_image, kernel) eroded_image = cv2.erode(solution_image, kernel) solution_image = cv2.absdiff(solution_image, eroded_image) b, g, r = cv2.split(image) b &= ~solution_image g |= solution_image r &= ~solution_image solution_image = cv2.merge([b, g, r]).astype(np.uint8) return solution_image except Exception: return None
os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "..", "Image_Lib"))) if cmd_subfolder not in sys.path: sys.path.insert(0, cmd_subfolder) import image_utils as utils ap = argparse.ArgumentParser(description="Solve orthogonal mazes") ap.add_argument("-i", "--image", required = True, help = "Path to image file") args = vars(ap.parse_args()) image = cv2.imread(args["image"]) gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # cnts = utils.get_contours(gray_image, 200) # cnt = sorted(cnts, key=cv2.contourArea, reverse=True)[0] thresholded_image = utils.adaptive_threshold(gray_image, cv2.THRESH_BINARY_INV) cv2.imshow("Output", utils.image_resize(thresholded_image, height=600)) cv2.waitKey() _, cnts, _ = cv2.findContours(thresholded_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) if len(cnts) != 2: print len(cnts) raise ValueError("Unable to solve maze - Failed at Contour finding!") solution_image = np.zeros(gray_image.shape, dtype=np.uint8) cv2.drawContours(solution_image, cnts, 0, (255,255,255),cv2.FILLED) cv2.imshow("Output", utils.image_resize(solution_image, height=600)) cv2.waitKey() kernel = np.ones((15, 15), dtype=np.uint8)
approx = cv2.approxPolyDP(c, 0.02 * peri, True) # if our approximated contour has four points, then we # can assume that we have found our screen if len(approx) == 4: screenCnt = approx break # show the contour (outline) of the piece of paper print "STEP 2: Find contours of paper" # cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2) # cv2.imshow("Outline", image) # cv2.waitKey(0) # cv2.destroyAllWindows() # apply the four point transform to obtain a top-down # view of the original image warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio) # convert the warped image to grayscale, then threshold it # to give it that 'black and white' paper effect warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY) warped = utils.adaptive_threshold(warped) warped = warped.astype("uint8") # show the original and scanned images print "STEP 3: Apply perspective transform" # cv2.imshow("Original", utils.image_resize(orig, height = 650)) cv2.imshow("Scanned", utils.image_resize(warped, height=650)) cv2.waitKey(0)
"Image_Lib"))) if cmd_subfolder not in sys.path: sys.path.insert(0, cmd_subfolder) import image_utils as utils ap = argparse.ArgumentParser(description="Solve orthogonal mazes") ap.add_argument("-i", "--image", required=True, help="Path to image file") args = vars(ap.parse_args()) image = cv2.imread(args["image"]) gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # cnts = utils.get_contours(gray_image, 200) # cnt = sorted(cnts, key=cv2.contourArea, reverse=True)[0] thresholded_image = utils.adaptive_threshold(gray_image, cv2.THRESH_BINARY_INV) cv2.imshow("Output", utils.image_resize(thresholded_image, height=600)) cv2.waitKey() _, cnts, _ = cv2.findContours(thresholded_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) if len(cnts) != 2: print len(cnts) raise ValueError("Unable to solve maze - Failed at Contour finding!") solution_image = np.zeros(gray_image.shape, dtype=np.uint8) cv2.drawContours(solution_image, cnts, 0, (255, 255, 255), cv2.FILLED) cv2.imshow("Output", utils.image_resize(solution_image, height=600)) cv2.waitKey()
# if our approximated contour has four points, then we # can assume that we have found our screen if len(approx) == 4: screenCnt = approx break # show the contour (outline) of the piece of paper print "STEP 2: Find contours of paper" # cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2) # cv2.imshow("Outline", image) # cv2.waitKey(0) # cv2.destroyAllWindows() # apply the four point transform to obtain a top-down # view of the original image warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio) # convert the warped image to grayscale, then threshold it # to give it that 'black and white' paper effect warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY) warped = utils.adaptive_threshold(warped) warped = warped.astype("uint8") # show the original and scanned images print "STEP 3: Apply perspective transform" # cv2.imshow("Original", utils.image_resize(orig, height = 650)) cv2.imshow("Scanned", utils.image_resize(warped, height=650)) cv2.waitKey(0)