Example #1
0
def dealPicture(picSrc):
    print("picSrc:%s"%picSrc)
    result_dir = './test_result'
    if os.path.exists(result_dir):
        shutil.rmtree(result_dir)
    os.mkdir(result_dir)
    resultString = ""
    image_file = Uploadpic + picSrc
    print("image_file:%s"%image_file)
    image = np.array(Image.open(image_file).convert('RGB'))
    t = time.time()
    //利用原有模型实现识别
    result, image_framed = ocr.model(image)
    //利用pytesseract实现中文识别
    #result, image_framed = ocr.model2(image)
    output_file = os.path.join(result_dir, image_file.split('/')[-1])
    Image.fromarray(image_framed).save(output_file)
    print("Mission complete, it took {:.3f}s".format(time.time() - t))
    print("\nRecognition Result:\n")
    for key in result:
        print(result[key][1])
        resultString += result[key][1]
    resultString = re.sub("[#$%&\'()*+,-./:;<=>?@,。?★、…【】《》?“”‘’![\\]^_`{|}~!]+", "",resultString)
    print("resultString:%s"%resultString)
    return resultString
 def post(self, *args, **kwargs):
     ret = {'result': 'OK'}
     result_dir = './test_result'
     if os.path.exists(result_dir):
         shutil.rmtree(result_dir)
     os.mkdir(result_dir)
     upload_path = os.path.join(os.path.dirname(__file__),
                                'demo')  # 文件的暂存路径
     file_metas = self.request.files.get('file',
                                         None)  # 提取表单中‘name’为‘file’的文件元数据
     filename = file_metas[0]['filename']
     file_path = os.path.join(upload_path, filename)
     with open(file_path, 'wb') as up:
         up.write(file_metas[0]['body'])
     image = np.array(Image.open(file_path).convert('RGB'))
     t = time.time()
     result, image_framed = ocr.model(image)
     output_file = os.path.join(result_dir, file_path.split('/')[-1])
     Image.fromarray(image_framed).save(output_file)
     print("Mission complete, it took {:.3f}s".format(time.time() - t))
     print("单条预测时间", time.time() - t)
     print("\nRecognition Result:\n")
     for key in result:
         print(result[key][1])
     self.write("13213")
Example #3
0
def OCRconsumer(q, targetPath):
    result_str = ""
    while True:
        # if q.empty():
        #     print(1)
        #     continue

        res = q.get()
        if res is None: break
        tic = time.time()
        id, image = res
        result, image_framed = ocr.model(image)
        toc = time.time()
        print("one pic predict complete, it took {:.3f}s".format(toc - tic))
        j = 0
        result_str += str(id)
        result_str += '\n'

        for key in result:
            if j == 0 or j == 1:
                str_tmp = str(result[key][1])
                result_str += str_tmp
                result_str += '\n'
            else:
                break
            j += 1
        result_str += "###\n"
    txt_path = targetPath + "/OnlineShopLicense.txt"
    writer = open(txt_path, 'a')
    writer.write(result_str)
def predict(file):
    image = cv2.imread(file)

    t = time.time()
    result, image_framed = ocr.model(image)
    print("Mission complete, it took {:.3f}s".format(time.time() - t))
    print("\n Recognition Result: \n")
    for key in result:
        print(result[key][1])
    return result[key][1]
Example #5
0
def ocr_img_pse():
    file = request.files['file']
    lan = request.args.get('language', 'CHE')
    angle = request.args.get('angle', 'True')
    logging.info('语言类型%s' % lan)
    logging.info('是否需要角度%s' % angle)
    if angle == 'False':
        angle = False
    else:
        angle = True
    if not file:
        logging.info('图片为空')
        return response(json.dumps({'code': -1, 'msg': '文件为空', 'result': ''}))

    #if file.filename.split('.')[-1] not in ['jpg', 'png', 'jpeg']:
    #   logging.info('图片格式错误:%s' % file.filename)
    #  return response(json.dumps({'code': -2, 'msg': '文件格式错误', 'result': ''}))

    logging.info('图片格式:%s' % file.filename)
    # save_path = 'up_images/' + file.filename
    # file.save('mid.jpg')
    # img = cv2.imread(save_path, 3)
    try:
        img_buffer = np.asarray(bytearray(file.read()), dtype='uint8')
        bytesio = BytesIO(img_buffer)
        img = skimage.io.imread(bytesio)
    except Exception as e:
        logging.info(str(e), exc_info=True)
        return response(json.dumps({
            'code': -2,
            'msg': '文件格式错误',
            'result': ''
        }))

    if len(img.shape) == 2:
        img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
    elif img.shape[-1] == 4:
        img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
    else:
        pass
    a = time.time()
    # cv2.imwrite('mid'+str(a)+'.jpg',img)
    results, img_shape = model(img, lan, angle)
    # logging.info('图片处理完成:'+str(results))
    # return json.dumps({'code':0, 'msg':'', 'result':results, 'shape':img_shape})
    return response(
        json.dumps({
            'code': 0,
            'msg': '',
            'result': results,
            'shape': img_shape
        }))
Example #6
0
def imgArrOCR(image):
    '''
        do ocr operations on a single image numpy array

        return a single string of ocr result
        
        lines split by '\\n'
    '''
    result, image_framed = ocr.model(image)
    result_str = ""
    for key in result:
        result_str += result[key][1]
        result_str += '\n'
    return result_str
Example #7
0
def detect(image):
    t = time.time()
    result, image_framed = ocr.model(image)

    # # save output file for debug
    # output_file = os.path.join(result_dir, os.path.split(image_file)[-1])
    # Image.fromarray(image_framed).save(output_file)

    # print debug message
    print("Mission complete, it took {:.3f}s".format(time.time() - t))
    print("Recognition Result:")
    for rect, text in result:
        print(text)
    return result
def rec_img(task_id):
    if task_id == "file":
        img_file = request.form['image_path']
		if os.path.exists(img_file):
			image = np.array(Image.open(img_file).convert('RGB'))
			t = time.time()
			result, image_framed = ocr.model(image)
			print("Mission complete, it took {:.3f}s".format(time.time() - t))
			re_dict = {}
			for key in result:
				re_dict[result[key][1]] = list(result[key][0])
			return json.dumps(re_dict)
		else:
			return 'Image does not exist!'
Example #9
0
def otr():
    if request.method == 'POST':
        f = request.files['file']

        if not (f and allowed_file(f.filename)):
            return jsonify({
                "error": 1001,
                "msg": "请检查上传的图片类型,仅限于png、PNG、jpg、JPG、bmp"
            })

        user_input = request.form.get("name")

        # 当前文件所在路径
        basepath = os.path.dirname(__file__)

        # 注意:没有的文件夹一定要先创建,不然会提示没有该路径
        upload_path = os.path.join(basepath, 'static/images',
                                   secure_filename(f.filename))
        f.save(upload_path)

        # --------------------------------------------------------------------------
        # image = np.array(Image.open(upload_path).convert('RGB'))
        image = cv2.imread(upload_path)
        t = time.time()
        ot_image = antiMissingMain.getOTDetectROI(image)
        result, image_framed = ocr.model(ot_image)
        output_file = os.path.join("test_result", upload_path.split('/')[-1])
        Image.fromarray(image_framed).save(output_file)
        Image.fromarray(image_framed).save("static/images/test.jpg")

        stringResult = {}
        stringResult[0] = "Mission complete, it took {:.3f}s".format(
            time.time() - t)
        i = 1
        for key in result:
            stringResult[i] = result[key][1]
            i = i + 1

        # os.remove(upload_path)  # 删除已识别的图片
        # --------------------------------------------------------------------------
        image_framed = cv2.cvtColor(np.asarray(image_framed),
                                    cv2.COLOR_RGB2BGR)
        cv2.imwrite(os.path.join(basepath, 'static/images', 'test.jpg'),
                    image_framed)
        return render_template('otr_ok.html',
                               userinput=user_input,
                               val1=time.time(),
                               data_dict=stringResult)

    return render_template('otr.html')
Example #10
0
def pic2word(pic):
    # image = np.array(Image.open(image_file).convert('RGB'))
    image = np.array(pic)
    t = time.time()
    # result: the text
    # image_framed: the image with segmentation
    result, image_framed = ocr.model(image)
    print("Mission complete, it took {:.3f}s".format(time.time() - t))
    print("\nRecognition Result:\n")
    f1 = open('word.txt', 'w')
    f2 = open('to_english.txt', 'w')
    for key in result:
        # print(result[key][1])
        f2.write(result[key][1])
        f1.write(result[key][1] + '\n')
Example #11
0
def fileOCR(imagePath):
    '''
        do ocr operations on a single image file

        return a single string of ocr result
        
        lines split by '\\n'
    '''
    image = np.array(PIL.Image.open(imagePath).convert('RGB'))
    # print(image.shape())
    result, image_framed = ocr.model(image)
    result_str = ""
    for key in result:
        result_str += result[key][1]
        result_str += '\n'
    return result_str
Example #12
0
def predict(path):
    t = time.time()
    image = np.array(Image.open(path).convert('RGB'))
    result, _ = ocr.model(image)
    result = ' '.join([result[key][1] for key in result])

    print("Mission complete, it took {:.3f}s".format(time.time() - t))
    print("Recognition Result:", result)

    response = make_response(result)
    response.headers['Access-Control-Allow-Origin'] = '*'
    response.headers['Access-Control-Allow-Methods'] = 'POST'
    response.headers[
        'Access-Control-Allow-Headers'] = 'x-requested-with,content-type'

    return response
Example #13
0
def ocr_result(arg):
    image = np.array(
        Image.open('/'.join(['./image_input', arg])).convert('RGB'))
    t = time.time()
    result, image_framed = ocr.model(image)
    output_file = '/'.join(['./image_result', arg + '.txt'])
    # Image.fromarray(image_framed).save(output_file)
    print("Mission complete, it took {:.3f}s".format(time.time() - t))
    print("\nRecognition Result:\n")
    txt = ''
    for key in result:
        txt = txt + result[key][1]
    print(txt)
    with open(output_file, 'wb') as f:
        f.write(txt)
        f.close()
    return txt
Example #14
0
def identify_text():
    if request.method == 'POST':
        f = request.files['file']
        # basepath = os.path.dirname(__file__)  # 当前文件所在路径
        # upload_path = os.path.join(basepath, 'static/uploads',f.filename)  # 注意:没有的文件夹一定要先创建,不然会提示没有该路径
        upload_path = os.path.join(os.getcwd(), 'static/uploads/', f.filename)
        print(upload_path)
        f.save(upload_path)
        image = cv2.imread(upload_path, 1)
        #result=ocrText(upload_path)
        res=''
        result, image_framed = ocr.model(image)
        for key in result:
            print(result[key][1])
            res = res+(result[key][1])
        if (os.path.exists(upload_path)):
            os.remove(upload_path)
    return jsonify({'result': res})
Example #15
0
def demo(image, image_name='initial_name.jpg'):

    psenet_start_time = time.time()
    psenet_text_recs, psenet_draw_image = psenet_predict(image)
    psenet_end_time = time.time()
    print("psenet耗时:|{}|".format(psenet_end_time - psenet_start_time))
    psenet_image_name = image_name[:-4] + '_psenet' + ".jpg"
    print("psenet_image_name:|{}|,psenet_result_text_recs:|{}|".format(
        psenet_image_name, psenet_text_recs))
    objectid = container_type(text_recs=psenet_text_recs)
    psenet_result_path = os.path.join(psenet_result_dir, psenet_image_name)
    #@@@@@@
    Image.fromarray(psenet_draw_image).save(psenet_result_path)  #处理十万图片不输出
    # ==================
    # deeptext 文字识别 #deeptext的image传入ndarray类型,但在识别的时候需要PIL类型
    # ===================
    adjust_result_dir = os.path.join(adjust_main_dir, psenet_image_name[:-4])
    makedir(adjust_result_dir)
    ocr_result = []
    ocr_result, cfd_scores = ocr.model(
        image=image,
        text_recs=psenet_text_recs,
        adjust_result_dir=adjust_result_dir,
        original_image_name=psenet_image_name,
    )
    print("psenet_image_name:|{}|,ocr_result:|{}|".format(
        psenet_image_name, ocr_result))
    hscore_text_recs = []
    hscore_ocr_result = []
    # print("====cfd_score:|{}|".format( cfd_scores ))
    for index, confidence_score in enumerate(cfd_scores):
        # print( "=======confidence:|{}|".format( confidence_score))
        if confidence_score >= 0.1:
            hscore_text_recs.append(psenet_text_recs[index])
            hscore_ocr_result.append(ocr_result[index])
        else:
            print("confidence_score:|{}| < 0.1 mv".format(confidence_score))
    ocr_result_file.writelines(psenet_image_name + ' ' + str(ocr_result) +
                               ',' + '\n')
    psenet_point_file.writelines(psenet_image_name + ':' +
                                 str(psenet_text_recs) + '\n')
    return hscore_text_recs, hscore_ocr_result, objectid
    def get(self):
        start = time.time()
        result_dir = './test_result'
        if os.path.exists(result_dir):
            shutil.rmtree(result_dir)
        os.mkdir(result_dir)

        for image_file in sorted(image_files):
            image = np.array(Image.open(image_file).convert('RGB'))
            t = time.time()
            result, image_framed = ocr.model(image)
            output_file = os.path.join(result_dir, image_file.split('/')[-1])
            Image.fromarray(image_framed).save(output_file)
            print("Mission complete, it took {:.3f}s".format(time.time() - t))
            print("单条预测时间", time.time() - t)
            print("\nRecognition Result:\n")
            for key in result:
                print(result[key][1])
        print("总计预测时间", time.time() - start)
        self.write("1233")
Example #17
0
    def get(self):
        args = self.reqparse.parse_args()
        image_url = args['image_url']
        image_file = glob(image_url)
        if len(image_file) < 1:
            print("")
            return Output.error("文件不存在!请先上传文件再进行识别!")

        image = np.array(Image.open(image_file).convert('RGB'))
        t = time.time()
        result, image_framed = ocr.model(image)
        print("Mission complete, it took {:.3f}s".format(time.time() - t))
        print("\nRecognition Result:\n")

        data = ''
        for key in result:
            print(result[key][1])
            # data.append(result[key][1])
            data += result[key][1]
        # val.val()
        return Output.success(data)
Example #18
0
def predict():
    for image_file in sorted(image_files):
        image = np.array(Image.open(image_file).convert('RGB'))

        result, image_framed = ocr.model(image)

        output_file = os.path.join(result_dir, image_file.split('/')[-1])
        Image.fromarray(image_framed).save(output_file)


        for key in result:

            write_path = os.getcwd()+'/test_result/result.txt'
            result_name = image_file.split('/')[-1]
            result_name = result_name.split('.')[0]

            print(result_name+':'+result[key][1])

            f1 = open(write_path,'a')
            f1.write(result_name+':'+result[key][1])
            f1.write('\n')
Example #19
0
def dirOCR(dirpath):
    '''
        do ocr operation on specific directory
        
        return a list contains tuples which likes (filename,ocrresultstring) 
    '''

    filelist = os.listdir(dirpath)
    results = []
    for filename in filelist:
        if os.path.isfile(filename):
            imagePath = os.path.join(dirpath, filename)
            try:
                image = np.array(PIL.Image.open(imagePath).convert('RGB'))
                result, image_framed = ocr.model(image)
                result_str = ""
                for key in result:
                    result_str += result[key][1]
                    result_str += '\n'
                results.append((filename, result_str))
            except Exception as e:
                print(e)
                continue
    return results
Example #20
0
def run(image_file, type):
    # if len(sys.argv)==3:
    #     image_file=sys.argv[1]
    #     type=sys.argv[2]
    # else:
    #     print('wrong!!!')
    image = np.array(Image.open(image_file).convert('RGB'))
    t = time.time()

    result, image_framed = ocr.model(image)
    print(t)
    print("Mission complete, it took {:.3f}s".format(time.time() - t))
    print("\nRecognition Result:\n")
    result_string = ''
    for key in result:
        result_string += result[key][1]
    dict = {
        'certificate_of_degree': deal_certificate_of_degree,
        'patent': deal_patent,
        'IDcard': deal_IDcard,
        'business_license': deal_business_license
    }
    result_data = dict[type](result_string)
    return result_data
Example #21
0
    def post(self):
        files = self.request.files.get('file', None)
        lan = self.get_argument('language', 'CHE')
        angle = self.get_argument('angle', 'True')
        combine = self.get_argument('combine', 'False')
        lines = self.get_argument('lines', [])
        just_detection = self.get_argument('just_detection','False')
        if lines:
            lines = eval(lines)
        if not isinstance(lines, list):
            lines = []

        if not files:
            logging.info('图片为空')
            self.write(self.resp({'code':-1, 'msg': '文件为空', 'result': ''}))
            self.finish()
         
        file = files[0]

        logging.info('表格线:%s' % str(lines))
        logging.info('文件%s' % file.filename)
        logging.info('语言类型%s' % lan)
        logging.info('是否需要角度%s' % angle)
        logging.info('是否需要连接%s' % combine)
        logging.info('只做检测%s' % just_detection)

        if angle == 'False':
            angle = False
        else:
            angle = True

        if combine == 'False':
            combine = False
        else:
            combine = True

        just_detection = False if just_detection == 'False' else True

        try:
            img_buffer = np.asarray(bytearray(file.body), dtype='uint8')
            bytesio = BytesIO(img_buffer)
            img = skimage.io.imread(bytesio)
            logging.info('io图片完成!')
        except Exception as e:
            logging.info(str(e), exc_info=True)
            self.write(self.resp({'code': -2, 'msg': '文件格式错误', 'result': ''}))
            self.finish()

        if len(img.shape) == 2:
            img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
        elif img.shape[-1] == 4:
            img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
        else:
            pass
        try:
            start = time.time()
            results, img_shape = model(img, lan, angle, combine, lines ,just_detection)
            end = time.time()
            logging.info('ocr total time %s' % str(end-start))
            #logging.info(results)
            self.write(self.resp({'code':0, 'msg': '', 'result': results, 'shape': img_shape}))
        except:
            logging.info('error:{}'.format(straceback.format_exc()), exc_info=True)
            self.write(self.resp({'code': -3, 'msg': '模型出错', 'result': ''}))
            self.finish()
    #         + getLine(img, (Point[0] + 1, Point[1] + 1))
    # else:
    #     return ()


if __name__ == '__main__':
    result_dir = './test_result'
    if os.path.exists(result_dir):
        shutil.rmtree(result_dir)
    os.mkdir(result_dir)

    for image_file in sorted(image_files):
        AllLine = []
        image = np.array(Image.open(image_file).convert('RGB'))
        t = time.time()
        result, image_framed, boxes, scale = ocr.model(image)
        output_file = os.path.join(result_dir, image_file.split('/')[-1])
        Image.fromarray(image_framed).save(output_file)
        img = cv2.resize(image,
                         None,
                         None,
                         fx=scale,
                         fy=scale,
                         interpolation=cv2.INTER_LINEAR)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        # img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        print("Mission complete, it took {:.3f}s".format(time.time() - t))
        print("\nRecognition Result:\n")

        for box in boxes:
            for i in range(box[1], box[5]):
Example #23
0
     difference = cv2.subtract(im, im2)
     result = not np.any(difference)
 while (not result):
     x, y = im.shape[0:2]
     im = im[0:lastrose - 22, ]
     # plt.figure(figsize=(20, 20))
     # plt.imshow(im)
     # plt.show()
     print(im.shape)
     im = cv2.resize(im, (int(im.shape[1] * 2), int(im.shape[0] * 2)))
     for j in range(len(sortrose)):
         sortrose[j] = sortrose[j] * 2
     cv2.bitwise_not(im, im)
     shape = im.shape
     print(shape)
     result, image_framed, isname = ocr.model(im, sortrose, nameMap)
     print("\nRecognition Result:\n")
     total = []
     str1 = ""
     flag = False
     for key in result:
         out = result[key][1]
         # print(str(isname[key]) + ":" + out)
         if isname[key] == 1:
             if str1 != "":
                 if str1.find(":") != -1:
                     if str1.split(":")[-1] != "":
                         total.append(str1)
                         str1 = ""
                 else:
                     if str1 != "":
def bank_flow_identity(imagePath):  # imagePath  './test_images/gs.jpg'
    image = cv2.imread(imagePath, 1)
    print(image)
    #二值化
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    binary = cv2.adaptiveThreshold(~gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                   cv2.THRESH_BINARY, 35, -5)

    rows, cols = binary.shape
    scale = 40
    #识别横线
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (cols // scale, 1))
    eroded = cv2.erode(binary, kernel, iterations=1)
    dilatedcol = cv2.dilate(eroded, kernel, iterations=1)

    #识别竖线
    scale = 20
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, rows // scale))
    eroded = cv2.erode(binary, kernel, iterations=1)
    dilatedrow = cv2.dilate(eroded, kernel, iterations=1)

    #标识交点
    bitwiseAnd = cv2.bitwise_and(dilatedcol, dilatedrow)

    #标识表格
    merge = cv2.add(dilatedcol, dilatedrow)

    #识别黑白图中的白色点
    ys, xs = np.where(bitwiseAnd > 0)
    mylisty = []
    mylistx = []

    #通过排序,获取跳变的x和y的值,说明是交点,否则交点会有好多像素值,我只取最后一点
    i = 0
    myxs = np.sort(xs)
    for i in range(len(myxs) - 1):
        if (myxs[i + 1] - myxs[i] > 10):
            mylistx.append(myxs[i])
        i = i + 1
    mylistx.append(myxs[i])
    # print(mylistx)
    # print(len(mylistx))

    i = 0
    myys = np.sort(ys)
    #print(np.sort(ys))
    for i in range(len(myys) - 1):
        if (myys[i + 1] - myys[i] > 20):
            mylisty.append(myys[i])
        i = i + 1
    mylisty.append(myys[i])

    i = 0
    t = time.time()
    res = ''
    for i in range(13):  #只有13行有效数字
        ROI = image[mylisty[i]:mylisty[i + 1], mylistx[9]:mylistx[10]]

        result, image_framed = ocr.model(ROI)
        for key in result:
            print(result[key][1])
            res = res + result[key][1] + '||'
        i = i + 1
    print("tsk complete, it took {:.3f}s".format(time.time() - t))
    return res
Example #25
0
def ocr_img_pse():
    print('request.files.keys()',list(request.files.keys()))
    lan = request.args.get('language')
    print('test_lan',lan)
    #lan = 'jpe'
    #lan = 'eng'
    #lan = 'chn'
    #print('test_lan',lan)

    logging.info('语言类型%s' % lan)
    if 'json_response' in request.files.to_dict().keys():
        json_response = request.files['json_response']
        print('yes')
        if json_response:
            print('aaaaa')
    elif 'file' in request.files.to_dict().keys():
        try:
            lan = request.form['language']
            print('lan',request.form['language'])
        except:
            lan = request.args.get('language')
            print('test_lan',lan)
        file = request.files['file']
        #llann = request.files['language']
        #print('language',llann)
        if not file:
            logging.info('图片为空')
            return response(json.dumps({'code': -1, 'msg': '文件为空', 'result': ''}))
        logging.info('图片格式:%s' % file.filename)
        try:
            img_buffer = np.asarray(bytearray(file.read()), dtype='uint8')
            bytesio = BytesIO(img_buffer)
            img = skimage.io.imread(bytesio)
        except Exception as e:
            logging.info(str(e), exc_info=True)
            return response(json.dumps({'code': -2, 'msg': '文件格式错误', 'result': ''}))

        if len(img.shape) == 2:
            img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
        elif img.shape[-1] == 4:
            img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
        else:
            pass
        a = time.time()
        try:
            check_label = request.form['checkLabel']
            print('checkLabel', check_label)
        except:
            check_label = 'False'
        #check_label = False
        file_url = 'http://39.104.88.168/image_rec/'
        if check_label == 'False':
            if OCR:
                results,img_shape, after_detect_img_name = model(img, lan,file.filename)
                return response(json.dumps({'code':0, 'msg':'','file_url':file_url,'img_info':results,'after_detect_img_info':after_detect_img_name},cls=MyEncoder))
            else:
                recs, img_shape, after_detect_img_name,rec_name = model(img, lan, file.filename)
                return response(json.dumps({'code': 0, 'msg': '', 'file_url': file_url, 'img_info': recs,
                                            'after_detect_img_info': after_detect_img_name, 'rec_name': rec_name},
                                           cls=MyEncoder))

        else:
            after_detect_img_name = file.filename
            results = []
            txt_file = request.form['txtlabel']
            print(txt_file)
            if txt_file == 'True' :
                check_label_text = open('check_label/label.txt','r').readlines()
                #random.shuffle(check_label_text)
                check_label_text = check_label_text
                for check_label_line in check_label_text:
                    label_img_info = {}
                    label_img_info['img_name'] = check_label_line.split(' ')[0]+'.jpg'
                    label_and_rec_text = {}
                    #label_and_rec_text['label']= check_label_line['label_text']
                    label_and_rec_text['label']= ' '.join(check_label_line.split(' ')[1:]).strip()
                    label_and_rec_text['rec_text'] = '1'
                    label_img_info['text'] = label_and_rec_text
                    results.append(label_img_info)
            else:
                check_json_file = request.form['filename']
                check_label_text = open(check_json_file +'.json','r').readlines()
                # random.shuffle(check_label_text)
                for check_label_line in check_label_text:
                    check_label_line = json.loads(check_label_line)
                    label_img_info = {}
                    label_img_info['img_name'] = check_label_line['img_name']
                    label_and_rec_text = {}
                    label_and_rec_text['label']= check_label_line['text']['label']
                    label_and_rec_text['rec_text']= check_label_line['text']['rec_text']
                    label_img_info['text'] = label_and_rec_text
                    results.append(label_img_info)

            return response(json.dumps({'code':0, 'msg':'','file_url':file_url,'img_info':results,'after_detect_img_info':after_detect_img_name}))
    else:
        label_data = request.data
        print('label_data',label_data)
        if label_data != '':
            label_data = json.loads(label_data)
        aaaa =label_data['text']
        json_label_path = get_json_path(lan)
        print('.'.join(label_data['imgurl'].split('/')[-1].split('.')[:-1]))
        jsonname = json_label_path + '.'.join(label_data['imgurl'].split('/')[-1].split('.')[:-1]) + '.json'
        with open(jsonname,'w') as f:
            json.dump(label_data,f)
        print('保存成功')
        return response(json.dumps({'code':-1,'msg':'保存成功'}))
Example #26
0
#coors_files = glob('./test_coors/*.*')

def crop(image_path, coors, save_dir):
    image_name = os.path.split(image_path)[1].split('.')[0]
    image = cv2.imread(image_path)
    for index, coor in enumerate(coors):
        x1, y1, x2, y2 = coor[0], coor[1], coor[6], coor[7]
	#print(x1, y1, x2, y2)
        roi_image = image[y1: y2 + 1, x1: x2 + 1]
        cv2.imwrite(os.path.join(save_dir, image_name+'_{}.jpg'.format(index)), roi_image)


if __name__ == '__main__':
    result_dir = './test_result'
    if os.path.exists(result_dir):
        shutil.rmtree(result_dir)
    os.mkdir(result_dir)

    for image_file in sorted(image_files):
        image = np.array(Image.open(image_file).convert('RGB'))
        t = time.time()
        result, image_framed , text_recs = ocr.model(image)
        output_file = os.path.join(result_dir, image_file.split('/')[-1])
        Image.fromarray(image_framed).save(output_file)
        print("Mission complete, it took {:.3f}s".format(time.time() - t))
        print("\nRecognition Result:\n")
        for key in result:
            print(result[key][1])
#        crop(image_file,text_recs,'./test_coors')
       
Example #27
0
def up_image():
    # curl -v http://127.0.0.1:9436/ocr -F "image_file=@./test.jpg"
    if request.method == 'POST' and request.files.get('image_file'):
        timec = str(time.time()).replace(".", "")
        file = request.files.get('image_file')
        print(file)
        img_data = file.read()
        img = BytesIO(img_data)
        img = Image.open(img, mode="r")
        # username = request.form.get("name")
        size = img.size
        print("接收图片尺寸: {}".format(size))
        if size[0] < 20 or size[1] < 14:
            content = json.dumps({
                "error_code": 1003,
                'error_message': 'file to small'
            })
            resp = response_headers(content)
            return resp
        # 保存图片
        file_name = "{}_{}.{}".format('temp', timec, 'jpg')
        print("保存图片: {}".format(file_name))
        file_path = os.path.join(api_image_dir + file_name)
        with open(file_path, 'wb') as f:
            f.write(img_data)
            f.close()
        s = time.time()
        value = []

        try:
            image = np.array(img.convert('RGB'))
            t = time.time()
            result, image_framed, scale = ocr.model(image)
            file_name = "{}_{}.{}".format('ocr_', timec, 'jpg')
            dest_path = os.path.join(api_done_dir + file_name)
            # os.system('mv '+file_path+' '+dest_path)
            Image.fromarray(image_framed).save(dest_path)
            print("Mission complete, it took {:.3f}s".format(time.time() - t))
            print("\nRecognition Result:\n")
            texts = []
            for key in result:
                print(result[key][1])
                texts.append(result[key][1])
                value.append({
                    'loc': [int(x) for x in result[key][0]],
                    'rate': result[key][2],
                    'text': result[key][1]
                })

            e = time.time()
            traceback.print_exc()
            print("识别结果: {}".format(json.dumps(value, ensure_ascii=False)))
            result = {
                'error_code': 0,
                'time': timec,  # 时间戳
                'value': value,  # 预测的结果
                'text': u'\n'.join(texts),
                'scale': scale,  #
                'author': 'sloanyyc',
                'speed_time(ms)': int((e - s) * 1000)  # 识别耗费的时间
            }
            return json.dumps(result, ensure_ascii=False)
        except Exception as ex:
            e = time.time()
            print('识别错误', ex)
            result = {
                'error_code': 1004,
                'time': timec,  # 时间戳
                'value': [],  # 预测的结果
                'scale': 1,  #
                'author': 'sloanyyc',
                'speed_time(ms)': int((e - s) * 1000)  # 识别耗费的时间
            }
            return json.dumps(result, ensure_ascii=False)

    else:
        content = json.dumps({
            "error_code": 1001,
            'error_message': 'only file via form post support'
        })
        resp = response_headers(content)
        return resp
Example #28
0
                'author': 'sloanyyc',
                'speed_time(ms)': int((e - s) * 1000)  # 识别耗费的时间
            }
            return json.dumps(result, ensure_ascii=False)
        except Exception as ex:
            e = time.time()
            print('识别错误', ex)
            result = {
                'error_code': 1004,
                'time': timec,  # 时间戳
                'value': [],  # 预测的结果
                'scale': 1,  #
                'author': 'sloanyyc',
                'speed_time(ms)': int((e - s) * 1000)  # 识别耗费的时间
            }
            return json.dumps(result, ensure_ascii=False)

    else:
        content = json.dumps({
            "error_code": 1001,
            'error_message': 'only file via form post support'
        })
        resp = response_headers(content)
        return resp


if __name__ == '__main__':
    first_load_image = Image.open('test.png', mode="r")
    ocr.model(np.array(first_load_image.convert('RGB')))
    app.run(host='0.0.0.0', debug=False, port=9436)
#-*- coding:utf-8 -*-
import os
import ocr
import time
import shutil
import numpy as np
from PIL import Image
from glob import glob

image_files = glob('./test_images/*.*')

if __name__ == '__main__':
    result_dir = './test_result'
    if os.path.exists(result_dir):
        shutil.rmtree(result_dir)
    os.mkdir(result_dir)

    for image_file in sorted(image_files):
        image = np.array(Image.open(image_file).convert('RGB'))
        t = time.time()
        result, image_framed = ocr.model(image)
        output_file = os.path.join(result_dir, image_file.split('/')[-1])
        Image.fromarray(image_framed).save(output_file)
        print("Mission complete, it took {:.3f}s".format(time.time() - t))
        print("\nRecognition Result:\n")
        import pdb
        pdb.set_trace()
        for key in result:
            print(result[key][1])
Example #30
0
import os
import codecs
import sys

if __name__ == '__main__':

    # TODO: you should change the path
    if sys.argv[1] == 'test':
        name = 'test'
    else:
        name = 'train'
    image_files = glob('../../data/' + name + '_images/*.*')
    with codecs.open('../../data/result_' + name + '_ocr.txt', 'w', encoding='utf-8') as f:
        t = time.time()
        for image_file in sorted(image_files):
            filename = os.path.split(image_file)[1]
            image = np.array(Image.open(image_file).convert('RGB'))
            result, image_framed = ocr.model(image)
            text = filename
            for key in result:
                text += '\t'
                text += result[key][1]
            text += '\n'
            print(text)
            f.write(text)
    print('------> total cost', time.time() - t, '------->images', len(image_files))
    print('done')



Example #31
0
#-*- coding:utf-8 -*-
import os
import ocr
import time
import shutil
import numpy as np
from PIL import Image
from glob import glob
image_files = glob('./test_images/*.*')


if __name__ == '__main__':
    result_dir = './test_result'
    # if os.path.exists(result_dir):
    #     shutil.rmtree(result_dir)
    # os.mkdir(result_dir)

    for image_file in sorted(image_files):
        image = np.array(Image.open(image_file).convert('RGB'))
        t = time.time()
        result, image_framed = ocr.model(image, detectAngle=True)
        # output_file = os.path.join(result_dir, image_file.split('/')[-1])
        # Image.fromarray(image_framed).save(output_file)
        print("Mission complete, it took {:.3f}s".format(time.time() - t))
        print("\nRecognition Result:\n")
        for key in result:
            print(result[key][1])