for contour in contours: x, y, width, height = cv2.boundingRect(contour) area = width * height area_list.append(area) width_list.append(width) height_list.append(height) return area_list, width_list, height_list # Reading the images extracted_img = cv2.imread('extracted_img.jpg') img2 = cv2.imread('img2_underlined.jpg') img2 = cv2.resize(img2, (600, 1200)) img = cv2.imread('extracted_img.jpg') img = crop_img(img) extracted_img = crop_img(extracted_img) extracted_img = img2 _, extracted_img = cv2.threshold(extracted_img, 127, 255, cv2.THRESH_BINARY) extracted_img = cv2.Canny(np.asarray(extracted_img), 100, 200) _, contours, _ = cv2.findContours(extracted_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) al, wl, hl = counters_area_list(contours, extracted_img) mask = np.zeros(extracted_img.shape, dtype="uint8") for i, c in enumerate(contours):
"-a", "--annotated-img", type=str, help="please enter the path to the annotated image with extension") args = parser.parse_args() original_img_path = args.original_img annotated_img_path = args.annotated_img # Loading the images original_img, annotated_img = api.load_and_resize(original_img_path, annotated_img_path) # Cropping and obtining the central text parts from the image cropped_original = crop_img(original_img) cv2.imshow('croped', cropped_original) # Finding the features and matching the images transform_coordinates = api.orb_flann_matcher(cropped_original, annotated_img) # Find extra text using the match detected extra_text_contours = api.detect_extra_text(annotated_img, transform_coordinates) # Find the underlines underline_contours = api.detect_underline(annotated_img, transform_coordinates) # Convert the images into color annotated_img = cv2.cvtColor(annotated_img, cv2.COLOR_GRAY2BGR) # Make a copy of the annotated image
""" import cv2 import numpy as np from crop_original import crop_img, find_components original_img = cv2.imread('img1.jpg', cv2.IMREAD_GRAYSCALE) annotated_img = cv2.imread('img2_underlined.jpg') original_img = cv2.resize(original_img, (600, 1200)) annotated_img = cv2.resize(annotated_img, (600, 1200)) final_img = cv2.imread('img2_underlined.jpg') final_img = cv2.resize(final_img, (600, 1200)) original_img = crop_img(original_img) gray_annotated_img = cv2.cvtColor(annotated_img, cv2.COLOR_BGR2GRAY) # Thresholding removes background noise like dust particles and small lines # that appear while scanning the physical document # threshold(src.img, threshold-value, max_val, thresholding_type) #_,original_img = cv2.threshold(original_img,127,255,cv2.THRESH_BINARY) #_,annotated_img = cv2.threshold(annotated_img,127,255,cv2.THRESH_BINARY) #sobely = cv2.Sobel(original_img, cv2.CV_64F, 0, 1) def find_match(original_img, annotated_img): # Features