def crnnRec(im, boxes, leftAdjust=False, rightAdjust=False, alph=0.2, f=1.0): """ crnn模型,ocr识别 @@model, @@converter, @@im:Array @@text_recs:text box @@ifIm:是否输出box对应的img """ results = [] im = Image.fromarray(im) for index, box in enumerate(boxes): degree, w, h, cx, cy = solve(box) partImg, newW, newH = rotate_cut_img(im, degree, box, w, h, leftAdjust, rightAdjust, alph) newBox = xy_rotate_box(cx, cy, newW, newH, degree) # partImg_ = partImg.convert('L') # simPred = crnnOcr(partImg_)##识别的文本 # if simPred.strip()!=u'': # results.append({'cx':cx*f,'cy':cy*f,'text':simPred,'w':newW*f,'h':newH*f,'degree':degree*180.0/np.pi}) results.append({ 'cx': cx * f, 'cy': cy * f, 'text': '', 'w': newW * f, 'h': newH * f, 'degree': degree * 180.0 / np.pi }) return results
def plot_boxes(img, angle, result, color=(0, 0, 0)): tmp = np.array(img) c = color w, h = img.size thick = int((h + w) / 300) i = 0 if angle in [90, 270]: imgW, imgH = img.size[::-1] else: imgW, imgH = img.size for line in result: cx = line['cx'] cy = line['cy'] degree = line['angle'] w = line['w'] h = line['h'] x1, y1, x2, y2, x3, y3, x4, y4 = xy_rotate_box(cx, cy, w, h, degree) x1, y1, x2, y2, x3, y3, x4, y4 = box_rotate( [x1, y1, x2, y2, x3, y3, x4, y4], angle=(360 - angle) % 360, imgH=imgH, imgW=imgW) cx = np.mean([x1, x2, x3, x4]) cy = np.mean([y1, y2, y3, y4]) cv2.line(tmp, (int(x1), int(y1)), (int(x2), int(y2)), c, 1) cv2.line(tmp, (int(x2), int(y2)), (int(x3), int(y3)), c, 1) cv2.line(tmp, (int(x3), int(y3)), (int(x4), int(y4)), c, 1) cv2.line(tmp, (int(x4), int(y4)), (int(x1), int(y1)), c, 1) mess = str(i) cv2.putText(tmp, mess, (int(cx), int(cy)), 0, 1e-3 * h, c, thick // 2) i += 1 return Image.fromarray(tmp)
def plot_boxes(img, result, angle=0, color=(0, 0, 0)): tmp = np.array(img) c = color h, w = img.shape[:2] thick = int((h + w) / 300) i = 0 if angle in [90, 270]: imgW, imgH = img.shape[:2] else: imgH, imgW = img.shape[:2] for line in result: cx = line['cx'] cy = line['cy'] degree = line['degree'] w = line['w'] h = line['h'] x1, y1, x2, y2, x3, y3, x4, y4 = xy_rotate_box(cx, cy, w, h, degree / 180 * np.pi) x1, y1, x2, y2, x3, y3, x4, y4 = box_rotate( [x1, y1, x2, y2, x3, y3, x4, y4], angle=(360 - angle) % 360, imgH=imgH, imgW=imgW) cx = np.mean([x1, x2, x3, x4]) cy = np.mean([y1, y2, y3, y4]) cv2.line(tmp, (int(x1), int(y1)), (int(x2), int(y2)), c, 1) cv2.line(tmp, (int(x2), int(y2)), (int(x3), int(y3)), c, 1) cv2.line(tmp, (int(x3), int(y3)), (int(x4), int(y4)), c, 1) cv2.line(tmp, (int(x4), int(y4)), (int(x1), int(y1)), c, 1) mess = str(i) cv2.putText(tmp, mess, (int(cx), int(cy)), 0, 1e-3 * h, c, thick // 2) i += 1 _image = Image.fromarray(tmp).convert('RGB') _image.save('test/result.png') return _image
def api_root(): app.logger.info(PROJECT_HOME) if request.method == 'POST' and request.files['image']: starttime = time.time() img = request.files['image'] img_cv = cv2.imdecode(np.fromstring(img.read(), np.uint8), cv2.IMREAD_COLOR) boxes, rec_res = text_sys(img_cv) for box in boxes: xy_sum = np.sum(box, axis=0) / 4.0 cx = xy_sum[0] cy = xy_sum[1] degree = np.arcsin( (box[1][1] - box[0][1]) / (box[1][0] - box[0][0])) w = abs(box[0][0] - box[1][0]) h = abs(box[0][1] - box[3][1]) x1, y1, x2, y2, x3, y3, x4, y4 = xy_rotate_box( cx, cy, w, h, degree / 180 * np.pi) box[0][0] = x1 box[0][1] = y1 box[1][0] = x2 box[1][1] = y2 box[2][0] = x3 box[2][1] = y3 box[3][0] = x4 box[3][1] = y4 assorted_results = [{ 'box': boxes[i], 'txt': rec_res[i][0] } for i in range(len(rec_res))] res = trainTicket.trainTicket(assorted_results, img=img_cv) res = res.res elapse = time.time() - starttime app.logger.info("Predict time : %.3fs" % elapse) return json.dumps(res, ensure_ascii=False) else: return "Where is the image?"