def estimate_furigana(img, segmentation): (w,h)=img.shape[:2] if arg.boolean_value('verbose'): print 'Estimateding furigana in ' + str(h) + 'x' + str(w) + ' image.' text_areas = segmentation #form binary image from grayscale binary_threshold = arg.integer_value('binary_threshold',default_value=defaults.BINARY_THRESHOLD) if arg.boolean_value('verbose'): print 'binarizing images with threshold value of ' + str(binary_threshold) binary = clean.binarize(img,threshold=binary_threshold) binary_average_size = cc.average_size(binary) if arg.boolean_value('verbose'): print 'average cc size for binaryized grayscale image is ' + str(binary_average_size) #apply mask and return images text_mask = binary_mask(text_areas) cleaned = cv2.bitwise_not(text_mask*binary) cleaned_average_size = cc.average_size(cleaned) if arg.boolean_value('verbose'): print 'average cc size for cleaned, binaryized grayscale image is ' + str(cleaned_average_size) columns = scipy.ndimage.filters.gaussian_filter(cleaned,(defaults.FURIGANA_VERTICAL_SIGMA_MULTIPLIER*binary_average_size,defaults.FURIGANA_HORIZONTAL_SIGMA_MULTIPLIER*binary_average_size)) columns = clean.binarize(columns,threshold=defaults.FURIGANA_BINARY_THRESHOLD) furigana = columns*text_mask #go through the columns in each text area, and: #1) Estimate the standard column width (it should be similar to the average connected component width) #2) Separate out those columns which are significantly thinner (>75%) than the standard width boxes = cc.get_connected_components(furigana) furigana_lines = [] non_furigana_lines = [] lines_general = [] for box in boxes: line_width = cc_width(box) line_to_left = find_cc_to_left(box, boxes, max_dist=line_width*defaults.FURIGANA_DISTANCE_MULTIPLIER) if line_to_left is None: non_furigana_lines.append(box) continue left_line_width = cc_width(line_to_left) if line_width < left_line_width * defaults.FURIGANA_WIDTH_THRESHOLD: furigana_lines.append(box) else: non_furigana_lines.append(box) furigana_mask = np.zeros(furigana.shape) for f in furigana_lines: furigana_mask[f[0].start:f[0].stop,f[1].start:f[1].stop]=255 #furigana_mask[f]=1 furigana = furigana_mask #furigana * furigana_mask if arg.boolean_value('debug'): furigana = 0.25*(columns*text_mask) + 0.25*img + 0.5*furigana return furigana
def estimate_furigana(img, segmentation): (w,h)=img.shape[:2] if arg.boolean_value('verbose'): print('Estimateding furigana in ' + str(h) + 'x' + str(w) + ' image.') text_areas = segmentation #form binary image from grayscale binary_threshold = arg.integer_value('binary_threshold',default_value=defaults.BINARY_THRESHOLD) if arg.boolean_value('verbose'): print('binarizing images with threshold value of ' + str(binary_threshold)) binary = clean.binarize(img,threshold=binary_threshold) binary_average_size = cc.average_size(binary) if arg.boolean_value('verbose'): print('average cc size for binaryized grayscale image is ' + str(binary_average_size)) #apply mask and return images text_mask = binary_mask(text_areas) cleaned = cv2.bitwise_not(text_mask*binary) cleaned_average_size = cc.average_size(cleaned) if arg.boolean_value('verbose'): print('average cc size for cleaned, binaryized grayscale image is ' + str(cleaned_average_size)) columns = scipy.ndimage.filters.gaussian_filter(cleaned,(defaults.FURIGANA_VERTICAL_SIGMA_MULTIPLIER*binary_average_size,defaults.FURIGANA_HORIZONTAL_SIGMA_MULTIPLIER*binary_average_size)) columns = clean.binarize(columns,threshold=defaults.FURIGANA_BINARY_THRESHOLD) furigana = columns*text_mask #go through the columns in each text area, and: #1) Estimate the standard column width (it should be similar to the average connected component width) #2) Separate out those columns which are significantly thinner (>75%) than the standard width boxes = cc.get_connected_components(furigana) furigana_lines = [] non_furigana_lines = [] lines_general = [] for box in boxes: line_width = cc_width(box) line_to_left = find_cc_to_left(box, boxes, max_dist=line_width*defaults.FURIGANA_DISTANCE_MULTIPLIER) if line_to_left is None: non_furigana_lines.append(box) continue left_line_width = cc_width(line_to_left) if line_width < left_line_width * defaults.FURIGANA_WIDTH_THRESHOLD: furigana_lines.append(box) else: non_furigana_lines.append(box) furigana_mask = np.zeros(furigana.shape) for f in furigana_lines: furigana_mask[f[0].start:f[0].stop,f[1].start:f[1].stop]=255 #furigana_mask[f]=1 furigana = furigana_mask #furigana * furigana_mask if arg.boolean_value('debug'): furigana = 0.25*(columns*text_mask) + 0.25*img + 0.5*furigana return furigana
def translate_page(img, binary_threshold=defaults.BINARY_THRESHOLD, boxes=False, target_lang="en"): gray = clean.grayscale(img) inv_binary = cv2.bitwise_not( clean.binarize(gray, threshold=binary_threshold)) binary = clean.binarize(gray, threshold=binary_threshold) segmented_image = seg.segment_image(gray) segmented_image = segmented_image[:, :, 2] components = get_connected_components(segmented_image) for component in components: speech = img[component] if speech.shape[0] <= 0 or speech.shape[1] <= 0: continue translation = helper.ocr(speech, target_lang) if len(translation) > 0: #print "added component", translation white_out_text(img, component) add_text(img, component, translation) if boxes: cc.draw_bounding_boxes(img, components, color=(255, 0, 0), line_size=2) return img
def filter_text_like_areas(img, segmentation, average_size): #see if a given rectangular area (2d slice) is very text like #First step is to estimate furigana like elements so they can be masked furigana_areas = furigana.estimate_furigana(img, segmentation) furigana_mask = np.array(furigana_areas == 0, 'B') #binarize the image, clean it via the segmentation and remove furigana too binary_threshold = arg.integer_value( 'binary_threshold', default_value=defaults.BINARY_THRESHOLD) if arg.boolean_value('verbose'): print 'binarizing images with threshold value of ' + str( binary_threshold) binary = clean.binarize(img, threshold=binary_threshold) binary_average_size = cc.average_size(binary) if arg.boolean_value('verbose'): print 'average cc size for binaryized grayscale image is ' + str( binary_average_size) segmentation_mask = np.array(segmentation != 0, 'B') cleaned = binary * segmentation_mask * furigana_mask inv_cleaned = cv2.bitwise_not(cleaned) areas = cc.get_connected_components(segmentation) text_like_areas = [] nontext_like_areas = [] for area in areas: #if area_is_text_like(cleaned, area, average_size): if text_like_histogram(cleaned, area, average_size): text_like_areas.append(area) else: nontext_like_areas.append(area) return (text_like_areas, nontext_like_areas)
def filter_text_like_areas(img, segmentation, average_size): #see if a given rectangular area (2d slice) is very text like #First step is to estimate furigana like elements so they can be masked furigana_areas = furigana.estimate_furigana(img, segmentation) furigana_mask = np.array(furigana_areas==0,'B') #binarize the image, clean it via the segmentation and remove furigana too binary_threshold = arg.integer_value('binary_threshold',default_value=defaults.BINARY_THRESHOLD) if arg.boolean_value('verbose'): print 'binarizing images with threshold value of ' + str(binary_threshold) binary = clean.binarize(img,threshold=binary_threshold) binary_average_size = cc.average_size(binary) if arg.boolean_value('verbose'): print 'average cc size for binaryized grayscale image is ' + str(binary_average_size) segmentation_mask = np.array(segmentation!=0,'B') cleaned = binary * segmentation_mask * furigana_mask inv_cleaned = cv2.bitwise_not(cleaned) areas = cc.get_connected_components(segmentation) text_like_areas = [] nontext_like_areas = [] for area in areas: #if area_is_text_like(cleaned, area, average_size): if text_like_histogram(cleaned, area, average_size): text_like_areas.append(area) else: nontext_like_areas.append(area) return (text_like_areas, nontext_like_areas)
def main(): parser = arg.parser parser = argparse.ArgumentParser(description='Basic OCR on raw manga scan.') parser.add_argument('infile', help='Input (color) raw Manga scan image to clean.') parser.add_argument('-o','--output', dest='outfile', help='Output (color) cleaned raw manga scan image.') parser.add_argument('-v','--verbose', help='Verbose operation. Print status messages during processing', action="store_true") #parser.add_argument('-d','--debug', help='Overlay input image into output.', action="store_true") parser.add_argument('--sigma', help='Std Dev of gaussian preprocesing filter.',type=float,default=None) parser.add_argument('--binary_threshold', help='Binarization threshold value from 0 to 255.',type=int,default=defaults.BINARY_THRESHOLD) parser.add_argument('--furigana', help='Attempt to suppress furigana characters to improve OCR.', action="store_true") parser.add_argument('--segment_threshold', help='Threshold for nonzero pixels to separete vert/horiz text lines.',type=int,default=defaults.SEGMENTATION_THRESHOLD) arg.value = parser.parse_args() infile = arg.string_value('infile') outfile = arg.string_value('outfile', default_value=infile + '.html') if not os.path.isfile(infile): print ('Please provide a regular existing input file. Use -h option for help.') sys.exit(-1) if arg.boolean_value('verbose'): print ('\tProcessing file ' + infile) print ('\tGenerating output ' + outfile) img = cv2.imread(infile) gray = clean.grayscale(img) binary = clean.binarize(gray) segmented = segmentation.segment_image_file(infile) components = cc.get_connected_components(segmented) #perhaps do more strict filtering of connected components because sections of characters #will not be dripped from run length smoothed areas? Yes. Results quite good. #filtered = cc.filter_by_size(img,components,average_size*100,average_size*1) blurbs = ocr_on_bounding_boxes(binary, components) for blurb in blurbs: print (str(blurb.x)+','+str(blurb.y)+' '+str(blurb.w)+'x'+str(blurb.h)+' '+ str(blurb.confidence)+'% :'+ blurb.text)
def main(): parser = arg.parser parser = argparse.ArgumentParser(description='Basic OCR on raw manga scan.') parser.add_argument('infile', help='Input (color) raw Manga scan image to clean.') parser.add_argument('-o','--output', dest='outfile', help='Output (color) cleaned raw manga scan image.') parser.add_argument('-v','--verbose', help='Verbose operation. Print status messages during processing', action="store_true") #parser.add_argument('-d','--debug', help='Overlay input image into output.', action="store_true") parser.add_argument('--sigma', help='Std Dev of gaussian preprocesing filter.',type=float,default=None) parser.add_argument('--binary_threshold', help='Binarization threshold value from 0 to 255.',type=int,default=defaults.BINARY_THRESHOLD) parser.add_argument('--furigana', help='Attempt to suppress furigana characters to improve OCR.', action="store_true") parser.add_argument('--segment_threshold', help='Threshold for nonzero pixels to separete vert/horiz text lines.',type=int,default=defaults.SEGMENTATION_THRESHOLD) arg.value = parser.parse_args() infile = arg.string_value('infile') outfile = arg.string_value('outfile', default_value=infile + '.html') if not os.path.isfile(infile): print 'Please provide a regular existing input file. Use -h option for help.' sys.exit(-1) if arg.boolean_value('verbose'): print '\tProcessing file ' + infile print '\tGenerating output ' + outfile img = cv2.imread(infile) gray = clean.grayscale(img) binary = clean.binarize(gray) segmented = segmentation.segment_image_file(infile) components = cc.get_connected_components(segmented) #perhaps do more strict filtering of connected components because sections of characters #will not be dripped from run length smoothed areas? Yes. Results quite good. #filtered = cc.filter_by_size(img,components,average_size*100,average_size*1) blurbs = ocr_on_bounding_boxes(binary, components) for blurb in blurbs: print str(blurb.x)+','+str(blurb.y)+' '+str(blurb.w)+'x'+str(blurb.h)+' '+ str(blurb.confidence)+'% :'+ blurb.text
default_value=infile + '.text_areas.png') if not os.path.isfile(infile): print( 'Please provide a regular existing input file. Use -h option for help.' ) sys.exit(-1) img = cv2.imread(infile) gray = clean.grayscale(img) binary_threshold = arg.integer_value( 'binary_threshold', default_value=defaults.BINARY_THRESHOLD) if arg.boolean_value('verbose'): print('Binarizing with threshold value of ' + str(binary_threshold)) inv_binary = cv2.bitwise_not( clean.binarize(gray, threshold=binary_threshold)) binary = clean.binarize(gray, threshold=binary_threshold) segmented_image = seg.segment_image(gray) segmented_image = segmented_image[:, :, 2] components = cc.get_connected_components(segmented_image) cc.draw_bounding_boxes(img, components, color=(255, 0, 0), line_size=2) imsave(outfile, img) if arg.boolean_value('display'): cv2.imshow('segmented_image', segmented_image) if cv2.waitKey(0) == 27: cv2.destroyAllWindows() cv2.destroyAllWindows()
def segment_image(img, max_scale=defaults.CC_SCALE_MAX, min_scale=defaults.CC_SCALE_MIN): (h, w) = img.shape[:2] if arg.boolean_value('verbose'): print 'Segmenting ' + str(h) + 'x' + str(w) + ' image.' #create gaussian filtered and unfiltered binary images binary_threshold = arg.integer_value( 'binary_threshold', default_value=defaults.BINARY_THRESHOLD) if arg.boolean_value('verbose'): print 'binarizing images with threshold value of ' + str( binary_threshold) binary = clean.binarize(img, threshold=binary_threshold) binary_average_size = cc.average_size(binary) if arg.boolean_value('verbose'): print 'average cc size for binaryized grayscale image is ' + str( binary_average_size) ''' The necessary sigma needed for Gaussian filtering (to remove screentones and other noise) seems to be a function of the resolution the manga was scanned at (or original page size, I'm not sure). Assuming 'normal' page size for a phonebook style Manga is 17.5cmx11.5cm (6.8x4.5in). A scan of 300dpi will result in an image about 1900x1350, which requires a sigma of 1.5 to 1.8. I'm encountering many smaller images that may be nonstandard scanning dpi values or just smaller magazines. Haven't found hard info on this yet. They require sigma values of about 0.5 to 0.7. I'll therefore (for now) just calculate required (nonspecified) sigma as a linear function of vertical image resolution. ''' sigma = (0.8 / 676.0) * float(h) - 0.9 sigma = arg.float_value('sigma', default_value=sigma) if arg.boolean_value('verbose'): print 'Applying Gaussian filter with sigma (std dev) of ' + str(sigma) gaussian_filtered = scipy.ndimage.gaussian_filter(img, sigma=sigma) gaussian_binary = clean.binarize(gaussian_filtered, threshold=binary_threshold) #Draw out statistics on average connected component size in the rescaled, binary image average_size = cc.average_size(gaussian_binary) if arg.boolean_value('verbose'): print 'Binarized Gaussian filtered image average cc size: ' + str( average_size) max_size = average_size * max_scale min_size = average_size * min_scale #primary mask is connected components filtered by size mask = cc.form_mask(gaussian_binary, max_size, min_size) #secondary mask is formed from canny edges canny_mask = clean.form_canny_mask(gaussian_filtered, mask=mask) #final mask is size filtered connected components on canny mask final_mask = cc.form_mask(canny_mask, max_size, min_size) #apply mask and return images cleaned = cv2.bitwise_not(final_mask * binary) text_only = cleaned2segmented(cleaned, average_size) #if desired, suppress furigana characters (which interfere with OCR) suppress_furigana = arg.boolean_value('furigana') if suppress_furigana: if arg.boolean_value('verbose'): print 'Attempting to suppress furigana characters which interfere with OCR.' furigana_mask = furigana.estimate_furigana(cleaned, text_only) furigana_mask = np.array(furigana_mask == 0, 'B') cleaned = cv2.bitwise_not(cleaned) * furigana_mask cleaned = cv2.bitwise_not(cleaned) text_only = cleaned2segmented(cleaned, average_size) (text_like_areas, nontext_like_areas) = filter_text_like_areas(img, segmentation=text_only, average_size=average_size) if arg.boolean_value('verbose'): print '**********there are ' + str( len(text_like_areas)) + ' text like areas total.' text_only = np.zeros(img.shape) cc.draw_bounding_boxes(text_only, text_like_areas, color=(255), line_size=-1) if arg.boolean_value('debug'): text_only = 0.5 * text_only + 0.5 * img #text_rows = 0.5*text_rows+0.5*gray #text_colums = 0.5*text_columns+0.5*gray #text_only = filter_text_like_areas(img, segmentation=text_only, average_size=average_size) segmented_image = np.zeros((h, w, 3), np.uint8) segmented_image[:, :, 0] = img segmented_image[:, :, 1] = text_only segmented_image[:, :, 2] = text_only return segmented_image
parser.add_argument('--additional_filtering', help='Attempt to filter false text positives by histogram processing.', action="store_true") arg.value = parser.parse_args() infile = arg.string_value('infile') outfile = arg.string_value('outfile',default_value=infile + '.text_areas.png') if not os.path.isfile(infile): print('Please provide a regular existing input file. Use -h option for help.') sys.exit(-1) img = cv2.imread(infile) gray = clean.grayscale(img) binary_threshold=arg.integer_value('binary_threshold',default_value=defaults.BINARY_THRESHOLD) if arg.boolean_value('verbose'): print('Binarizing with threshold value of ' + str(binary_threshold)) inv_binary = cv2.bitwise_not(clean.binarize(gray, threshold=binary_threshold)) binary = clean.binarize(gray, threshold=binary_threshold) segmented_image = seg.segment_image(gray) segmented_image = segmented_image[:,:,2] components = cc.get_connected_components(segmented_image) cc.draw_bounding_boxes(img,components,color=(255,0,0),line_size=2) imsave(outfile, img) if arg.boolean_value('display'): cv2.imshow('segmented_image',segmented_image) if cv2.waitKey(0) == 27: cv2.destroyAllWindows() cv2.destroyAllWindows()
img = cv2.imread(infile) cv2.imshow('srcimg', img) gray = clean.grayscale(img) binary_threshold = arg.integer_value('binary_threshold', default_value=defaults.BINARY_THRESHOLD) if arg.boolean_value('verbose'): print 'Binarizing with threshold value of ' + str(binary_threshold) inv_binary = cv2.bitwise_not(clean.binarize(gray, threshold=binary_threshold)) #cv2.imshow('inv_binary', inv_binary) binary = clean.binarize(gray, threshold=binary_threshold) #cv2.imshow('binary', binary) segmented_image = seg.segment_image(gray) cv2.imshow('segmented_image', segmented_image) segmented_image = segmented_image[:,:,2] components = cc.get_connected_components(segmented_image) cc.draw_bounding_boxes(img,components,color=(255,0,0),line_size=2)
def segment_image(img, max_scale=defaults.CC_SCALE_MAX, min_scale=defaults.CC_SCALE_MIN): (h,w)=img.shape[:2] if arg.boolean_value('verbose'): print 'Segmenting ' + str(h) + 'x' + str(w) + ' image.' #create gaussian filtered and unfiltered binary images binary_threshold = arg.integer_value('binary_threshold',default_value=defaults.BINARY_THRESHOLD) if arg.boolean_value('verbose'): print 'binarizing images with threshold value of ' + str(binary_threshold) binary = clean.binarize(img,threshold=binary_threshold) binary_average_size = cc.average_size(binary) if arg.boolean_value('verbose'): print 'average cc size for binaryized grayscale image is ' + str(binary_average_size) ''' The necessary sigma needed for Gaussian filtering (to remove screentones and other noise) seems to be a function of the resolution the manga was scanned at (or original page size, I'm not sure). Assuming 'normal' page size for a phonebook style Manga is 17.5cmx11.5cm (6.8x4.5in). A scan of 300dpi will result in an image about 1900x1350, which requires a sigma of 1.5 to 1.8. I'm encountering many smaller images that may be nonstandard scanning dpi values or just smaller magazines. Haven't found hard info on this yet. They require sigma values of about 0.5 to 0.7. I'll therefore (for now) just calculate required (nonspecified) sigma as a linear function of vertical image resolution. ''' sigma = (0.8/676.0)*float(h)-0.9 sigma = arg.float_value('sigma',default_value=sigma) if arg.boolean_value('verbose'): print 'Applying Gaussian filter with sigma (std dev) of ' + str(sigma) gaussian_filtered = scipy.ndimage.gaussian_filter(img, sigma=sigma) gaussian_binary = clean.binarize(gaussian_filtered,threshold=binary_threshold) #Draw out statistics on average connected component size in the rescaled, binary image average_size = cc.average_size(gaussian_binary) if arg.boolean_value('verbose'): print 'Binarized Gaussian filtered image average cc size: ' + str(average_size) max_size = average_size*max_scale min_size = average_size*min_scale #primary mask is connected components filtered by size mask = cc.form_mask(gaussian_binary, max_size, min_size) #secondary mask is formed from canny edges canny_mask = clean.form_canny_mask(gaussian_filtered, mask=mask) #final mask is size filtered connected components on canny mask final_mask = cc.form_mask(canny_mask, max_size, min_size) #apply mask and return images cleaned = cv2.bitwise_not(final_mask * binary) text_only = cleaned2segmented(cleaned, average_size) #if desired, suppress furigana characters (which interfere with OCR) suppress_furigana = arg.boolean_value('furigana') if suppress_furigana: if arg.boolean_value('verbose'): print 'Attempting to suppress furigana characters which interfere with OCR.' furigana_mask = furigana.estimate_furigana(cleaned, text_only) furigana_mask = np.array(furigana_mask==0,'B') cleaned = cv2.bitwise_not(cleaned)*furigana_mask cleaned = cv2.bitwise_not(cleaned) text_only = cleaned2segmented(cleaned, average_size) (text_like_areas, nontext_like_areas) = filter_text_like_areas(img, segmentation=text_only, average_size=average_size) if arg.boolean_value('verbose'): print '**********there are ' + str(len(text_like_areas)) + ' text like areas total.' text_only = np.zeros(img.shape) cc.draw_bounding_boxes(text_only, text_like_areas,color=(255),line_size=-1) if arg.boolean_value('debug'): text_only = 0.5*text_only + 0.5*img #text_rows = 0.5*text_rows+0.5*gray #text_colums = 0.5*text_columns+0.5*gray #text_only = filter_text_like_areas(img, segmentation=text_only, average_size=average_size) segmented_image = np.zeros((h,w,3), np.uint8) segmented_image[:,:,0] = img segmented_image[:,:,1] = text_only segmented_image[:,:,2] = text_only return segmented_image
if cv2.contourArea(cnt) > 20000 : epsilon = 0.01*cv2.arcLength(cnt,True) approx = cv2.approxPolyDP(cnt,epsilon,True) if len(approx) == 4 : mask = np.zeros([width, height, 3], dtype = "uint8") mask_gray = cv2.cvtColor(mask,cv2.COLOR_BGR2GRAY) cv2.drawContours(mask_gray,[cnt],0,(255,255,255),cv2.FILLED) mask_gray_white = cv2.bitwise_not(mask_gray) masked_img = cv2.bitwise_and(borderWhite, mask_gray, mask) masked_img = cv2.bitwise_or(mask_gray_white, masked_img) x,y,w,h = cv2.boundingRect(cnt) crop_img = masked_img[y:y+h, x:x+w] binary_threshold=arg.integer_value('binary_threshold',default_value=defaults.BINARY_THRESHOLD) if arg.boolean_value('verbose'): print ('Binarizing with threshold value of ' + str(binary_threshold)) inv_binary = cv2.bitwise_not(clean.binarize(crop_img, threshold=binary_threshold)) binary = clean.binarize(crop_img, threshold=binary_threshold) segmented_image = seg.segment_image(crop_img) segmented_image = segmented_image[:,:,2] mySceneImage = np.copy(segmented_image) image, contours, hierarchy = cv2.findContours(mySceneImage,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: if cv2.contourArea(cnt) > 3000 : cv2.drawContours(crop_img,[cnt],0,(255,255,255),cv2.FILLED) #cv2.imshow('image',crop_img) #cv2.waitKey(0) #cv2.imwrite(pathSolved+"/"+str(counter)+".jpg", crop_img) edges = cv2.Canny(crop_img,500,500) edgesContours = cv2.findContours(edges,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE) CannyLines.append(len(edgesContours[1]))