for c in cnts: peri=cv2.arcLength(c,True) approx=cv2.approxPolyDP(c,0.02*peri,True) if len(approx)==4: screenCnt=approx break print("Step 2: Find contours of paper") cv2.drawContours(image, [screenCnt], -1,(0,255,0),2) # cv2.imshow("Outline",image) # cv2.waitKey(0) # cv2.destroyAllWindows() warped=four_point_transform(orig, screenCnt.reshape(4,2)*ratio) warped=cv2.cvtColor(warped,cv2.COLOR_BGR2GRAY) T=threshold_local(warped,11,offset=10,method="gaussian") warped=(warped > T).astype("uint8")*255 print("Step 3: Apply perspective transform") imS=cv2.resize(warped,(650,650)) # cv2.imshow("output",imS) # cv2.imwrite('Output Image.png',imS) # cv2.waitKey(0) from PIL import Image import PIL.Image from pytesseract import image_to_string
def extract_rois(image_path, scoreboard_id): color_image = cv2.imread(image_path, cv2.IMREAD_COLOR) apply_tl = cv2.matchTemplate(color_image, scoreboard_templates[scoreboard_id]["tl"], cv2.TM_CCOEFF_NORMED) apply_tr = cv2.matchTemplate(color_image, scoreboard_templates[scoreboard_id]["tr"], cv2.TM_CCOEFF_NORMED) apply_bl = cv2.matchTemplate(color_image, scoreboard_templates[scoreboard_id]["bl"], cv2.TM_CCOEFF_NORMED) apply_br = cv2.matchTemplate(color_image, scoreboard_templates[scoreboard_id]["br"], cv2.TM_CCOEFF_NORMED) minval_tl, maxval_tl, minloc_tl, maxloc_tl = cv2.minMaxLoc(apply_tl) minval_tr, maxval_tr, minloc_tr, maxloc_tr = cv2.minMaxLoc(apply_tr) minval_bl, maxval_bl, minloc_bl, maxloc_bl = cv2.minMaxLoc(apply_bl) minval_br, maxval_br, minloc_br, maxloc_br = cv2.minMaxLoc(apply_br) if maxval_tl < .92 or maxval_tr < .92 or maxval_bl < .92 or maxval_br < .92: return {"error": "Couldn't identify scoreboard for %s" % image_path} sbpos_tl = addTuples(maxloc_tl, scoreboard_template_offsets[scoreboard_id]["tl"]) sbpos_tr = addTuples(maxloc_tr, scoreboard_template_offsets[scoreboard_id]["tr"]) sbpos_bl = addTuples(maxloc_bl, scoreboard_template_offsets[scoreboard_id]["bl"]) sbpos_br = addTuples(maxloc_br, scoreboard_template_offsets[scoreboard_id]["br"]) cv2.line(color_image, sbpos_tl, sbpos_tr, (255, 0, 0), 1) cv2.line(color_image, sbpos_tr, sbpos_br, (255, 0, 0), 1) cv2.line(color_image, sbpos_br, sbpos_bl, (255, 0, 0), 1) cv2.line(color_image, sbpos_bl, sbpos_tl, (255, 0, 0), 1) cv2.imshow("scoreboard_vis", color_image) cv2.waitKey(0) cv2.destroyWindow("scoreboard_vis") corrected = fpt.four_point_transform( color_image, np.array([sbpos_tl, sbpos_tr, sbpos_bl, sbpos_br], dtype="float32")) red_divider_y = round(corrected.shape[0] / 3) yellow_divider_y = red_divider_y * 2 offset_left = 30 cell_width = 15.85 # Extract ROIs red_cells = [] yellow_cells = [] for i in range(12): left_with_extra_room = offset_left + round(cell_width * i) right_with_extra_room = left_with_extra_room + round(cell_width) + 4 red_roi = corrected[0:red_divider_y, left_with_extra_room:right_with_extra_room] yellow_roi = corrected[yellow_divider_y:, left_with_extra_room:right_with_extra_room] red_cells.append(red_roi) yellow_cells.append(yellow_roi) return {"red": red_cells, "yellow": yellow_cells, "full": corrected}
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (5, 5), 0) edged = cv2.Canny(gray, 75, 200) #Step2: Find contour representing the paper being scanned img, contours, hierarchies = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) cnts = sorted(contours, key=cv2.contourArea, reverse=True)[:5] for c in cnts: peri = cv2.arcLength(c, True) approx = cv2.approxPolyDP(c, 0.02 * peri, True) if (len(approx) == 4): page = approx break #Step 3: Apply a Perspective Transform & Threshold warped = four_point_transform(orig, page.reshape(4, 2) * ratio) warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY) th1 = cv2.adaptiveThreshold(warped, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 8) th2 = cv2.adaptiveThreshold(warped, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 15, 8) warped = cv2.addWeighted(src1=th1, alpha=0.7, src2=th2, beta=0.3, gamma=0) cv2.imshow("Original", imutils.resize(orig, height=650)) cv2.imshow("Scanned", imutils.resize(warped, height=650)) cv2.waitKey(0)
edged=cv2.Canny(blurred,th1,th2) img,cnts,hierarchies=cv2.findContours(edged.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE) cnts=sorted(cnts,key=cv2.contourArea,reverse=True) for c in cnts: peri=cv2.arcLength(c,True) approx=cv2.approxPolyDP(c,0.02*peri,True) if len(approx)==4: exam_page=approx break paper=four_point_transform(image,exam_page.reshape(4,2)) warped=four_point_transform(gray,exam_page.reshape(4,2)) ret,thresh=cv2.threshold(warped,0,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU) cnts2=cv2.findContours(thresh.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) cnts2=imutils.grab_contours(cnts2) qstn_cnts=[] for c in cnts2: (x,y,w,h)=cv2.boundingRect(c) ar=w/float(h) if w >= 20 and h >= 20 and ar >= 0.9 and ar <= 1.1: qstn_cnts.append(c)