def pipeline(image): orig = image.copy() downsized, ratio = functions.standard_resize(image, new_width=100.0) edged = functions.small_page_edging(downsized) processed = functions.closed_inversion(edged) boxes = functions.box_generation(processed) filtered_boxes = functions.box_filtration(boxes) detected = downsized.copy() rects = [] if not filtered_boxes: # print "FOUND NO BOXES; TRYING DIFFERENT CANNY" # edged = functions.text_edging(orig.copy()) # edged = functions.downsized_text_edging(downsized.copy()) edged = functions.smaller_page_edging(downsized) # rotated = hough.prob_hough_rotation(edged, orig.copy()) # detected = rotated processed = functions.closed_inversion(edged) boxes = functions.box_generation(processed) filtered_boxes = functions.box_filtration(boxes) final_box = functions.merge_boxes(filtered_boxes) if final_box: # final_box = final_box * ratio final_box = box[:, :] * ratio final_box = np.round(small_box) final_box = small_box.astype(int) warped = functions.perspective_transform(orig.copy(), final_box, ratio=ratio) lined = hough.standard_hough(warped) else: print("in demo pipeline") else: for box in boxes: # print box detected = cv2.drawContours(detected, [box], 0, (0, 255, 0), 1) rects.append(cv2.minAreaRect(box)) # print "rect in alternate_rect_attempt: " + str(cv2.minAreaRect(box)) if len(boxes) > 1: # print "got more than 1 box, attempting merge" merged = functions.merge_boxes(boxes) detected = cv2.drawContours(detected, [merged], 0, (255, 0, 0), 2) functions.plot_images( [edged, processed, detected], ["Edge Detection", "Morphological Operations", "Contour Finding"]) return detected
def boxes_from_edged(image, edging_function, **edging_args): edged = edging_function( image, **edging_args ) # perform edge detection on the input image according to the passed in edging function closed_invert = functions.closed_inversion( edged ) # perform closed inversion (morphological closing + color inversion) on the edged image boxes = functions.box_generation( closed_invert ) # generate bounding boxes from the closed inversion output boxes = functions.box_filtration(boxes, image.shape[1], image.shape[0]) # filter bounding boxes merged = functions.merge_boxes(boxes) return merged, boxes, edged, closed_invert
def text_blobbing(image): orig = image.copy() small_orig = functions.standard_resize(image, new_width=100.0, return_ratio=False) start = time.clock() # downsized, ratio = functions.standard_resize(image, new_width = 100.0) edged = functions.text_edging(orig.copy()) # downsized, ratio = functions.standard_resize(edged, new_width = 200.0) # edged = functions.downsized_text_edging(downsized) # dilated = functions.closed_inversion(downsized) # kernel = np.ones((3,3),np.uint8) # was 9x9 # dilated = cv2.dilate(edged, kernel, iterations=3) kernel = np.ones((13, 13), np.uint8) # original 9x9 dilated = cv2.dilate(edged, kernel, iterations=7) # erosion = cv2.erode(dilated, kernel, iterations=2) # DOESN'T WORK ON VIDEO # functions.plot_images([dilated]) # boxes = functions.alternateRectMethod(edged) boxes = functions.box_generation(dilated) # boxes = functions.pureRectMethod(dilated) # WAS USING THIS ON VIDEOS WORKING FAIRLY WELL # boxes = functions.pureRectMethod(erosion) # detected = orig.copy() box = [] detected = small_orig.copy() if not boxes: # print "NO BOXES FOUND" final = detected else: detected = cv2.drawContours(orig, boxes, 0, (0, 255, 0), 3) box = boxes[0] final = functions.perspective_transform(orig.copy(), box) # for box in boxes: # detected = cv2.drawContours(detected,[box],0,(0,255,0),1) # box = boxes[0] # final = functions.perspective_transform(orig.copy(), box) end = time.clock() # print "Frame took " + (str (end-start)) + " time to process" functions.plot_images([orig, dilated, detected, final], ["orig", "dilated", "text_region_method", "final"])
def vanilla_box_drawing(image, drawing_image): boxes = functions.box_generation(image) # functions.boxes_comparison(image, boxes) # boxes = functions.all_boxes(image) # image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) # this is just so can see the green bounding box boxes = functions.box_filtration(boxes, image.shape[1], image.shape[0]) box = [] # detected = drawing_image if not boxes: # print "NO BOXES FOUND" return drawing_image else: for box in boxes: # print box # # print type(box) drawing_image = cv2.drawContours(drawing_image, [box], 0, (0, 255, 0), 1) return drawing_image
def vanilla_boxing(image): boxes = functions.box_generation(image) functions.boxes_comparison(image, boxes) # boxes = functions.all_boxes(image) image = cv2.cvtColor( image, cv2.COLOR_GRAY2RGB) # this is just so can see the green bounding box box = [] detected = image if not boxes: print("NO BOXES FOUND") else: for box in boxes: # # print box detected = cv2.drawContours(detected, [box], 0, (0, 255, 0), 1) # box = boxes[0] # final = functions.finalize(orig.copy(), box, ratio) # functions.plot_images([image, detected], ["edged", "detected"]) return detected
def downsized_text_blobbing(image): orig = image.copy() # <---- RESIZING -----> # image, ratio = functions.standard_resize(image, new_width=150.0) # <---- RESIZING -----> # # edged = functions.text_edging(orig.copy()) # edged = functions.text_edging(image.copy()) edged = functions.downsized_text_edging(image.copy()) kernel = np.ones((3, 3), np.uint8) # original 9x9 dilated = cv2.dilate(edged, kernel, iterations=3) # original was 5 iterations points = functions.box_generation(dilated) # print "# printing points in text_region_method2" + str(points) points = points[0] * ratio # print "# printing points in text_region_method2" + str(points) # detection = cv2.drawContours(image.copy(), [points], -1, (0, 255, 0), 2) # final = functions.finalize(orig.copy(), points) final = functions.perspective_transform(orig.copy(), points) functions.plot_images([orig, edged, dilated, final], ["Original", "Edged", "Dilated", "Final"])