class OCRService(WebService): def init_rec(self): self.ocr_reader = OCRReader() def preprocess(self, feed=[], fetch=[]): # TODO: to handle batch rec images img_list = [] for feed_data in feed: data = base64.b64decode(feed_data["image"].encode('utf8')) data = np.fromstring(data, np.uint8) im = cv2.imdecode(data, cv2.IMREAD_COLOR) img_list.append(im) feed_list = [] max_wh_ratio = 0 for i, boximg in enumerate(img_list): h, w = boximg.shape[0:2] wh_ratio = w * 1.0 / h max_wh_ratio = max(max_wh_ratio, wh_ratio) for img in img_list: norm_img = self.ocr_reader.resize_norm_img(img, max_wh_ratio) feed = {"image": norm_img} feed_list.append(feed) fetch = ["ctc_greedy_decoder_0.tmp_0", "softmax_0.tmp_0"] return feed_list, fetch def postprocess(self, feed={}, fetch=[], fetch_map=None): rec_res = self.ocr_reader.postprocess(fetch_map, with_score=True) res_lst = [] for res in rec_res: res_lst.append(res[0]) res = {"res": res_lst} return res
class OCRService(WebService): def init_rec(self): self.ocr_reader = OCRReader() def preprocess(self, feed=[], fetch=[]): img_list = [] for feed_data in feed: data = base64.b64decode(feed_data["image"].encode('utf8')) data = np.fromstring(data, np.uint8) im = cv2.imdecode(data, cv2.IMREAD_COLOR) img_list.append(im) max_wh_ratio = 0 for i, boximg in enumerate(img_list): h, w = boximg.shape[0:2] wh_ratio = w * 1.0 / h max_wh_ratio = max(max_wh_ratio, wh_ratio) _, w, h = self.ocr_reader.resize_norm_img(img_list[0], max_wh_ratio).shape imgs = np.zeros((len(img_list), 3, w, h)).astype('float32') for i, img in enumerate(img_list): norm_img = self.ocr_reader.resize_norm_img(img, max_wh_ratio) imgs[i] = norm_img feed = {"image": imgs.copy()} fetch = ["ctc_greedy_decoder_0.tmp_0", "softmax_0.tmp_0"] return feed, fetch def postprocess(self, feed={}, fetch=[], fetch_map=None): rec_res = self.ocr_reader.postprocess(fetch_map, with_score=True) res_lst = [] for res in rec_res: res_lst.append(res[0]) res = {"res": res_lst} return res
class OCRService(WebService): def init_det_client(self, det_port, det_client_config): self.det_preprocess = Sequential([ ResizeByFactor(32, 960), Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), Transpose((2, 0, 1)) ]) self.det_client = Client() self.det_client.load_client_config(det_client_config) self.det_client.connect(["127.0.0.1:{}".format(det_port)]) self.ocr_reader = OCRReader() def preprocess(self, feed=[], fetch=[]): data = base64.b64decode(feed[0]["image"].encode('utf8')) data = np.fromstring(data, np.uint8) im = cv2.imdecode(data, cv2.IMREAD_COLOR) ori_h, ori_w, _ = im.shape det_img = self.det_preprocess(im) det_out = self.det_client.predict(feed={"image": det_img}, fetch=["concat_1.tmp_0"], batch=False) _, new_h, new_w = det_img.shape filter_func = FilterBoxes(10, 10) post_func = DBPostProcess({ "thresh": 0.3, "box_thresh": 0.5, "max_candidates": 1000, "unclip_ratio": 1.5, "min_size": 3 }) sorted_boxes = SortedBoxes() ratio_list = [float(new_h) / ori_h, float(new_w) / ori_w] dt_boxes_list = post_func(det_out["concat_1.tmp_0"], [ratio_list]) dt_boxes = filter_func(dt_boxes_list[0], [ori_h, ori_w]) dt_boxes = sorted_boxes(dt_boxes) get_rotate_crop_image = GetRotateCropImage() feed_list = [] img_list = [] max_wh_ratio = 0 for i, dtbox in enumerate(dt_boxes): boximg = get_rotate_crop_image(im, dt_boxes[i]) img_list.append(boximg) h, w = boximg.shape[0:2] wh_ratio = w * 1.0 / h max_wh_ratio = max(max_wh_ratio, wh_ratio) for img in img_list: norm_img = self.ocr_reader.resize_norm_img(img, max_wh_ratio) feed_list.append(norm_img[np.newaxis, :]) feed_batch = {"image": np.concatenate(feed_list, axis=0)} fetch = ["ctc_greedy_decoder_0.tmp_0", "softmax_0.tmp_0"] return feed_batch, fetch, True def postprocess(self, feed={}, fetch=[], fetch_map=None): rec_res = self.ocr_reader.postprocess(fetch_map, with_score=True) res_lst = [] for res in rec_res: res_lst.append(res[0]) res = {"res": res_lst} return res
def init_det_client(self, det_port, det_client_config): self.det_preprocess = Sequential([ ResizeByFactor(32, 960), Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), Transpose( (2, 0, 1)) ]) self.det_client = Client() self.det_client.load_client_config(det_client_config) self.det_client.connect(["127.0.0.1:{}".format(det_port)]) self.ocr_reader = OCRReader()
def init_det_debugger(self, det_model_config): self.det_preprocess = Sequential([ ResizeByFactor(32, 960), Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), Transpose((2, 0, 1)) ]) self.det_client = Debugger() self.det_client.load_model_config(det_model_config, gpu=True, profile=False) self.ocr_reader = OCRReader()
def init_rec(self): self.ocr_reader = OCRReader() self.text_recognizer = TextRecognizerHelper(global_args)
def init_rec(self): self.ocr_reader = OCRReader()
def init_op(self): self.ocr_reader = OCRReader() self.get_rotate_crop_image = GetRotateCropImage() self.sorted_boxes = SortedBoxes()
class RecOp(Op): def init_op(self): self.ocr_reader = OCRReader() self.get_rotate_crop_image = GetRotateCropImage() self.sorted_boxes = SortedBoxes() """ when opening tensorrt(configure in config.yml) and each time the input shape for inferring is different, using this method for configuring tensorrt dynamic shape to infer in each op model """ def set_dynamic_shape_info(self): min_input_shape = {"x": [1, 3, 32, 10], "lstm_1.tmp_0": [1, 1, 128]} max_input_shape = { "x": [50, 3, 32, 1000], "lstm_1.tmp_0": [500, 50, 128] } opt_input_shape = {"x": [6, 3, 32, 100], "lstm_1.tmp_0": [25, 5, 128]} self.dynamic_shape_info = { "min_input_shape": min_input_shape, "max_input_shape": max_input_shape, "opt_input_shape": opt_input_shape, } def preprocess(self, input_dicts, data_id, log_id): (_, input_dict), = input_dicts.items() raw_im = input_dict["image"] data = np.frombuffer(raw_im, np.uint8) im = cv2.imdecode(data, cv2.IMREAD_COLOR) dt_boxes = input_dict["dt_boxes"] dt_boxes = self.sorted_boxes(dt_boxes) feed_list = [] img_list = [] max_wh_ratio = 0 ## One batch, the type of feed_data is dict. """ for i, dtbox in enumerate(dt_boxes): boximg = self.get_rotate_crop_image(im, dt_boxes[i]) img_list.append(boximg) h, w = boximg.shape[0:2] wh_ratio = w * 1.0 / h max_wh_ratio = max(max_wh_ratio, wh_ratio) _, w, h = self.ocr_reader.resize_norm_img(img_list[0], max_wh_ratio).shape imgs = np.zeros((len(img_list), 3, w, h)).astype('float32') for id, img in enumerate(img_list): norm_img = self.ocr_reader.resize_norm_img(img, max_wh_ratio) imgs[id] = norm_img feed = {"image": imgs.copy()} """ ## Many mini-batchs, the type of feed_data is list. max_batch_size = len(dt_boxes) # If max_batch_size is 0, skipping predict stage if max_batch_size == 0: return {}, True, None, "" boxes_size = len(dt_boxes) batch_size = boxes_size // max_batch_size rem = boxes_size % max_batch_size #_LOGGER.info("max_batch_len:{}, batch_size:{}, rem:{}, boxes_size:{}".format(max_batch_size, batch_size, rem, boxes_size)) for bt_idx in range(0, batch_size + 1): imgs = None boxes_num_in_one_batch = 0 if bt_idx == batch_size: if rem == 0: continue else: boxes_num_in_one_batch = rem elif bt_idx < batch_size: boxes_num_in_one_batch = max_batch_size else: _LOGGER.error( "batch_size error, bt_idx={}, batch_size={}".format( bt_idx, batch_size)) break start = bt_idx * max_batch_size end = start + boxes_num_in_one_batch img_list = [] for box_idx in range(start, end): boximg = self.get_rotate_crop_image(im, dt_boxes[box_idx]) img_list.append(boximg) h, w = boximg.shape[0:2] wh_ratio = w * 1.0 / h max_wh_ratio = max(max_wh_ratio, wh_ratio) _, w, h = self.ocr_reader.resize_norm_img(img_list[0], max_wh_ratio).shape #_LOGGER.info("---- idx:{}, w:{}, h:{}".format(bt_idx, w, h)) imgs = np.zeros( (boxes_num_in_one_batch, 3, w, h)).astype('float32') for id, img in enumerate(img_list): norm_img = self.ocr_reader.resize_norm_img(img, max_wh_ratio) imgs[id] = norm_img feed = {"x": imgs.copy()} feed_list.append(feed) #_LOGGER.info("feed_list : {}".format(feed_list)) return feed_list, False, None, "" def postprocess(self, input_dicts, fetch_data, data_id, log_id): res_list = [] if isinstance(fetch_data, dict): if len(fetch_data) > 0: rec_batch_res = self.ocr_reader.postprocess_ocrv2( fetch_data, with_score=True) for res in rec_batch_res: res_list.append(res[0]) elif isinstance(fetch_data, list): for one_batch in fetch_data: one_batch_res = self.ocr_reader.postprocess_ocrv2( one_batch, with_score=True) for res in one_batch_res: res_list.append(res[0]) res = {"res": str(res_list)} return res, None, ""
client.load_client_config(sys.argv[1:]) client.connect(["127.0.0.1:9293"]) import paddle test_img_dir = "imgs/" def cv2_to_base64(image): return base64.b64encode(image) #data.tostring()).decode('utf8') for img_file in os.listdir(test_img_dir): with open(os.path.join(test_img_dir, img_file), 'rb') as file: image_data = file.read() image = cv2_to_base64(image_data) fetch_map = client.predict( feed={"image": image}, fetch=["ctc_greedy_decoder_0.tmp_0", "softmax_0.tmp_0"], batch=True) result = {} result["score"] = fetch_map["softmax_0.tmp_0"] del fetch_map["softmax_0.tmp_0"] rec_res = OCRReader().postprocess(fetch_map, with_score=False) res_lst = [] for res in rec_res: res_lst.append(res[0]) result["res"] = res_lst print(result)
def init_rec(self): self.ocr_reader = OCRReader() self.text_classifier = TextClassifierHelper(global_args)
class OCRService(WebService): def init_det_debugger(self, det_model_config): self.det_preprocess = Sequential([ ResizeByFactor(32, 960), Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), Transpose((2, 0, 1)) ]) self.det_client = Debugger() if sys.argv[1] == 'gpu': self.det_client.load_model_config(det_model_config, gpu=True, profile=False) elif sys.argv[1] == 'cpu': self.det_client.load_model_config(det_model_config, gpu=False, profile=False) self.ocr_reader = OCRReader() def preprocess(self, feed=[], fetch=[]): data = base64.b64decode(feed[0]["image"].encode('utf8')) data = np.fromstring(data, np.uint8) im = cv2.imdecode(data, cv2.IMREAD_COLOR) ori_h, ori_w, _ = im.shape det_img = self.det_preprocess(im) _, new_h, new_w = det_img.shape det_img = det_img[np.newaxis, :] det_img = det_img.copy() det_out = self.det_client.predict(feed={"image": det_img}, fetch=["concat_1.tmp_0"]) filter_func = FilterBoxes(10, 10) post_func = DBPostProcess({ "thresh": 0.3, "box_thresh": 0.5, "max_candidates": 1000, "unclip_ratio": 1.5, "min_size": 3 }) sorted_boxes = SortedBoxes() ratio_list = [float(new_h) / ori_h, float(new_w) / ori_w] dt_boxes_list = post_func(det_out["concat_1.tmp_0"], [ratio_list]) dt_boxes = filter_func(dt_boxes_list[0], [ori_h, ori_w]) dt_boxes = sorted_boxes(dt_boxes) get_rotate_crop_image = GetRotateCropImage() img_list = [] max_wh_ratio = 0 for i, dtbox in enumerate(dt_boxes): boximg = get_rotate_crop_image(im, dt_boxes[i]) img_list.append(boximg) h, w = boximg.shape[0:2] wh_ratio = w * 1.0 / h max_wh_ratio = max(max_wh_ratio, wh_ratio) if len(img_list) == 0: return [], [] _, w, h = self.ocr_reader.resize_norm_img(img_list[0], max_wh_ratio).shape imgs = np.zeros((len(img_list), 3, w, h)).astype('float32') for id, img in enumerate(img_list): norm_img = self.ocr_reader.resize_norm_img(img, max_wh_ratio) imgs[id] = norm_img feed = {"image": imgs.copy()} fetch = ["ctc_greedy_decoder_0.tmp_0", "softmax_0.tmp_0"] return feed, fetch def postprocess(self, feed={}, fetch=[], fetch_map=None): rec_res = self.ocr_reader.postprocess(fetch_map, with_score=True) res_lst = [] for res in rec_res: res_lst.append(res[0]) res = {"res": res_lst} return res