def get_plot(hvf_image_gray, y_ratio, y_size, x_ratio, x_size, plot_type, icon_type): plot_image = Image_Utils.slice_image(hvf_image_gray, y_ratio, y_size, x_ratio, x_size) hvf_image_gray_process = Image_Utils.preprocess_image( hvf_image_gray.copy()) plot_image_process = Image_Utils.slice_image(hvf_image_gray_process, y_ratio, y_size, x_ratio, x_size) # Get bounding box from processed image: top_left, w, h = Hvf_Plot_Array.get_bounding_box(plot_image_process) bottom_right = (top_left[0] + w, top_left[1] + h) # Need to specifically handle raw value plot - can have a discontinuity in the # x axis (with triangle icon), which causes a mis-fit. So need to fill in x-axis # and try again cv2.line(plot_image_process, (top_left[0], top_left[1] + int(h / 2)), (top_left[0] + w, top_left[1] + int(h / 2)), (0), max(int(h * 0.015), 1)) top_left, w, h = Hvf_Plot_Array.get_bounding_box(plot_image_process) bottom_right = (top_left[0] + w, top_left[1] + h) # For debugging: Draw rectangle around the plot - MUST BE COMMENTED OUT, BECAUSE # IT WILL INTERFERE WITH LATER PLOT EXTRACTIONS #cv2.rectangle(plot_image, top_left, bottom_right, 0, 2) # Debug function for showing the plot: #show_plot_func = (lambda : cv2.imshow("Bound rect for plot " + plot_type, plot_image)) #Logger.get_logger().log_function(Logger.DEBUG_FLAG_DEBUG, show_plot_func); #cv2.waitKey(); # Slice out the axes plot on the original: tight_plot = plot_image[top_left[1]:(top_left[1] + h), top_left[0]:(top_left[0] + w)] # And extract the values from the array: plot_array = Hvf_Plot_Array.extract_values_from_plot( tight_plot, plot_type, icon_type) # Return the array: return plot_array, tight_plot
def perform_ocr(img): # First, preprocessor the image: img = Image_Utils.preprocess_image(img) # Next, convert image to python PIL (because pytesseract using PIL): img_pil = Image.fromarray(img) if not Ocr_Utils.OCR_API_HANDLE: Ocr_Utils.OCR_API_HANDLE = PyTessBaseAPI(psm=PSM.SINGLE_COLUMN) #Ocr_Utils.OCR_API_HANDLE = PyTessBaseAPI(psm=PSM.SINGLE_BLOCK) Ocr_Utils.OCR_API_HANDLE.SetImage(img_pil) text = Ocr_Utils.OCR_API_HANDLE.GetUTF8Text() # Return extracted text: return text
def is_pattern_not_shown(hvf_image_gray, y_ratio, y_size, x_ratio, x_size): # Calculate height/width for calculation later: height = np.size(hvf_image_gray, 0) width = np.size(hvf_image_gray, 1) # Slice image: hvf_image_gray = Image_Utils.preprocess_image(hvf_image_gray) sliced_img = Image_Utils.slice_image(hvf_image_gray, y_ratio, y_size, x_ratio, x_size) # Try to detect a bounding box: top_left, w, h = Hvf_Plot_Array.get_bounding_box(sliced_img) # Calculate the relative (percentage) size of the bounding box compared to slice: box_ratio_w = w / (x_size * width) box_ratio_h = h / (y_size * height) # Define a threshold below which if the size ratio is, we declare that the pattern # is not detected: threshold_size = 0.3 return (box_ratio_w < threshold_size or box_ratio_h < threshold_size)