def test_api_post2(): path = "C:\\Users\\CAU\\Desktop\\capstone\\text_recognition\demo_image" if os.path.exists(path): for file in os.scandir(path): os.remove(file.path) imagefile = request.files['image'] filename = werkzeug.utils.secure_filename(imagefile.filename) print("\nReceived image File name : " + imagefile.filename) imagefile.save("./text_detection/test/" + filename) detection.run_detection() img_files, img_bbox = load_files() crop_img(img_files, img_bbox) pred_str = recognition.run_recognition() for i, file in enumerate(img_files): txt = pd.read_csv(img_bbox[i], header=None) df = pd.DataFrame(columns=["x1", "y1", "x2", "y2", "x3", "y3", "x4", "y4", "result_text"]) for num, _col in enumerate(list(df)[:-1]): df[_col] = txt[num] df["result_text"] = pred_str df.to_csv("./result.csv") return "done"
def test_api_post(): response_header = response_json_ops() # ------ imagefile = request.files['image'] filename = werkzeug.utils.secure_filename(imagefile.filename) print("\nReceived image File name : " + imagefile.filename) imagefile.save("./text_detection/test/" + filename) detection.run_detection() img_files, img_bbox = load_files() crop_img(img_files, img_bbox) pred_str = recognition.run_recognition() # [l, t], [r, t], [r, b], [l, b] for i, file in enumerate(img_files): txt = pd.read_csv(img_bbox[i], header=None) df = pd.DataFrame(columns=[ "x1", "y1", "x2", "y2", "x3", "y3", "x4", "y4", "result_text" ]) for num, _col in enumerate(list(df)[:-1]): df[_col] = txt[num] df["result_text"] = pred_str df.to_csv("./result.csv") return response_header
def test_api_post(): path = "C:\\Users\\CAU\\Desktop\\capstone\\text_recognition\demo_image" if os.path.exists(path): for file in os.scandir(path): os.remove(file.path) imagefile = request.files['image'] filename = werkzeug.utils.secure_filename(imagefile.filename) print("\nReceived image File name : " + imagefile.filename) imagefile.save("./text_detection/test/" + filename) # time.sleep(5) detection.run_detection() # time.sleep(5) img_files, img_bbox = load_files() crop_img(img_files, img_bbox) pred_str = recognition.run_recognition() # underline detection cfg = PredictionConfig() # define the model model = MaskRCNN(mode='inference', model_dir='./', config=cfg) # load model weights model_path = 'mask_rcnn_underline_cfg_0020.h5' model.load_weights(model_path, by_name=True) temp = cv2.imread("./text_detection/test/androidFlask.jpg") yhat = model.detect([temp], verbose=0)[0] print(len(yhat['rois'])) # [l, t], [r, t], [r, b], [l, b] for i, file in enumerate(img_files): txt = pd.read_csv(img_bbox[i], header=None) df = pd.DataFrame(columns=["x1", "y1", "x2", "y2", "x3", "y3", "x4", "y4", "result_text"]) # compare for i, bb in enumerate(txt.values): x1, y1, x2, y2, x3, y3, x4, y4 = bb # textbb = [x1, y1, x3, y3] for underline in yhat['rois']: uy1, ux1, uy2, ux2 = underline if (ux1 + ux2) / 2 > x1 and (ux1 + ux2) / 2 < x3 and y1 < uy1 and uy1 < y3: df = df.append({"x1": x1, "y1": y1, "x2": x2, "y2": y2, "x3": x3, "y3": y3, "x4": x4, "y4": y4, "result_text": pred_str[i]}, ignore_index=True) temp = cv2.rectangle(temp, (x1, y1), (x3, y3), (0, 0, 255), 3) # top-left corner and bottom-right corner of rectangle. df.to_csv("./result.csv") cv2.imwrite("./result.jpg", temp) from keras import backend as K K.clear_session() cuda.select_device(0) cuda.close() del model return "done"
def run_main(): detection.run_detection() img_files, img_bbox = load_files() crop_img(img_files, img_bbox) pred_str = recognition.run_recognition() # [l, t], [r, t], [r, b], [l, b] for i, file in enumerate(img_files): txt = pd.read_csv(img_bbox[i], header=None) df = pd.DataFrame(columns=[ "x1", "y1", "x2", "y2", "x3", "y3", "x4", "y4", "result_text" ]) for num, _col in enumerate(list(df)[:-1]): df[_col] = txt[num] df["result_text"] = pred_str df.to_csv("./result.csv")
def run_main(): detection.run_detection() img_files, img_bbox = load_files() crop_img(img_files, img_bbox) pred_str = recognition.run_recognition() print("recog done") from Mask_RCNN.mrcnn.model import MaskRCNN cfg = PredictionConfig() # define the model model = MaskRCNN(mode='inference', model_dir='./', config=cfg) # load model weights model_path = 'mask_rcnn_underline_cfg_0019.h5' model.load_weights(model_path, by_name=True) temp = cv2.imread("./text_detection/test/androidFlask.jpg") yhat = model.detect([temp], verbose=0)[0] # [l, t], [r, t], [r, b], [l, b] for i, file in enumerate(img_files): txt = pd.read_csv(img_bbox[i], header=None) df = pd.DataFrame(columns=[ "x1", "y1", "x2", "y2", "x3", "y3", "x4", "y4", "result_text" ]) # compare #temp = [] for i, bb in enumerate(txt.values): x1, y1, _, _, x3, y3, _, _ = bb # textbb = [x1, y1, x3, y3] for underline in yhat['rois']: uy1, ux1, uy2, ux2 = underline if (ux1 + ux2) / 2 > x1 and ( ux1 + ux2) / 2 < x3 and y1 < uy1 and uy1 < y3: print(pred_str[i]) for num, _col in enumerate(list(df)[:-1]): df[_col] = txt[num] df["result_text"] = pred_str df.to_csv("./result.csv")