Exemple #1
0
    def ocr(self, job_id):

        picData = tools.readPickle(job_id, 'step2')
        images = picData['images']['Points']
        file_path = picData['filePath']
        logging.info('%s LOAD DATA FROM PICKLE')

        img = cv2.imread(file_path, cv2.IMREAD_UNCHANGED)
        img_shape = img.shape
        logging.info('%s LOAD IMG    width: %d, height: %d' %
                     (tools.getTempTime(), img_shape[1], img_shape[0]))

        datas = []
        for image in images.items():
            data = dict()
            data['name'] = image[0]
            ocrTemp, _ = tools.createRoi(img, image[1], job_id + '/step3',
                                         '3_' + image[0] + '_Roi')
            if data['name'] in ('customer', 'date'):
                data['value'] = tools.callOcr(ocrTemp, job_id + '/step3',
                                              img_shape[1], 1992)
            else:
                data['value'] = tools.callOcr(ocrTemp, job_id + '/step3',
                                              img_shape[1])
            datas.append(data)
            logging.info('%s CALL COR FOR BLOCK %s    result: %s' %
                         (tools.getTempTime(), data['name'], data['value']))

        return datas
Exemple #2
0
    def ocr(self, job_id):
        all_config = recognize.getConfig()

        job_data = tools.loadJobData(job_id)
        logging.debug('Load job data: %s' % str(job_data))

        cur_config = all_config[job_data['type']]
        logging.debug('Load recognize config: %s' % str(cur_config))

        img = cv2.imread(job_data['file'], cv2.IMREAD_UNCHANGED)

        res_orc = []

        for roi_name in cur_config['roi']:
            roi_config = cur_config['roi'][roi_name]

            if roi_config.get('hide', False):
                logging.info('Ignore roi [%s] because it is hidden' % roi_name)
                continue

            roi_orc_data = {
                'name': roi_name,
                'value': None,
            }

            if roi_config.get('ocr', True):
                roi_value_type = roi_config.get('type', 'text')

                if roi_value_type not in tools.OCR_TYPE_MAPPING:
                    logging.error('ROI Type %s not exist, skipped' %
                                  roi_value_type)
                    continue

                roi_img, roi_path = tools.createRoi2(img, roi_name, roi_config,
                                                     job_id + '/step3')
                roi_orc_data['value'] = tools.callOcr(roi_img,
                                                      job_id + '/step3',
                                                      roi_config)

                logging.info('OCR for roi [%s, type=%s] = %s' %
                             (roi_name, roi_value_type, roi_orc_data['value']))

            res_orc.append(roi_orc_data)

        return res_orc
Exemple #3
0
    def detectType(self, image, job_id):
        config = recognize.getConfig()

        ############################################
        # 1. find matched type and config
        ############################################
        grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        logging.info("Start match...")

        # if multi-type is matched, using highest match rate as the matched one.
        # TODO: highest match rate is not preciseness, should be improved here

        cur_match_type_name = None
        cur_match_rate = -1
        cur_match_M = None
        cur_polygons = None

        logging.debug("Start match feature")

        sift = cv2.xfeatures2d.SIFT_create()
        ori_kp, ori_des = sift.detectAndCompute(grey, None)

        for type_name in config:
            match_rate, detect_img, perspective_M, polygons = self.matchAndImageCut(sift, grey,
                                                                                    ori_kp, ori_des,
                                                                                    type_name,
                                                                                    config[type_name]['feature'],
                                                                                    config[type_name]['image'],
                                                                                    job_id)

            if match_rate > 0:
                logging.info("[%s] is matched, rate = %s" % (type_name, match_rate))
                tools.writeImageJob(detect_img, job_id + '/step1', '1 match [%s] detect' % type_name)
                # tools.writeImageJob(cut_img, job_id + '/step1', '1 match [%s] cut' % type_name)

                if match_rate > cur_match_rate:
                    cur_match_rate = match_rate
                    cur_match_type_name = type_name
                    cur_match_M = perspective_M
                    cur_polygons = polygons

            else:
                logging.info("[%s] is not matched" % type_name)

        logging.debug("End match feature")

        if not cur_match_type_name:
            logging.info("No feature matched")
            return self.make_error_response(image)

        logging.info("Match [%s] at %.2f%%, M=%s" % (cur_match_type_name, cur_match_rate, cur_match_M))

        ############################################
        # 2. rotate the image
        # TODO: should support different kink of rotate/perspective way.
        ############################################
        cur_config = config[cur_match_type_name]
        perspective_img = None

        if cur_config['rotate'] == 'perspective':
            perspective_img = cv2.warpPerspective(image, cur_match_M,
                                                  (cur_config['image']['w'], cur_config['image']['h']),
                                                  flags=cv2.INTER_LANCZOS4)

            tools.writeImageJob(perspective_img, job_id + '/step1', '2 perspective-%s' % cur_match_type_name)
        else:
            logging.error('rotate %s is not supported' % cur_config['rotate'])
            return self.make_error_response(image)

        # draw all roi in img
        perspective_draw_img = perspective_img.copy()
        for roiName in cur_config['roi']:
            tools.drawRoi(perspective_draw_img, cur_config['roi'][roiName])

        tools.writeImageJob(perspective_draw_img, job_id + '/step1', '3 mark roi')

        ############################################
        # 3. extract the vaildate area
        ############################################
        validate_roi_name = cur_config['vaildate']['roi']

        validate_roi_config = cur_config['roi'].get(validate_roi_name, None)

        if not validate_roi_config:
            logging.error('Validate ROI[%s] not exist in roi section' % validate_roi_name)

        validate_roi, validate_roi_path = tools.createRoi2(perspective_img, validate_roi_name, validate_roi_config,
                                                           job_id + '/step1')
        orc_result = tools.callOcr(validate_roi, job_id + '/step1', validate_roi_config)

        logging.info('Validate ROI OCR result = %s' % orc_result)

        ############################################
        # 4. create compress jpg image
        ############################################
        compress_path = tools.writeImageJob(perspective_img, job_id + '/step1', 'compressd', quality='compress')
        normlize_path = tools.writeImageJob(perspective_img, job_id + '/step1', 'normlized', quality='lossless')

        ############################################
        # 5. write to yaml
        ############################################
        data = {
            'file': normlize_path,
            'type': cur_match_type_name
        }

        tools.saveJobData(data, job_id)

        logging.info('Save to data.yaml: %s' % str(data))

        return {
            'ori_image': {
                # ori_w, ori_h is the origin image without any change (uploaded by wechat)
                'w': image.shape[1],
                'h': image.shape[0],
            },
            'normalize_image': {
                # w, h, file: normalized image (roate,resize, perspective)
                'w': int(perspective_img.shape[1]) if perspective_img is not None else None,
                'h': int(perspective_img.shape[0]) if perspective_img is not None else None,
                'file': compress_path,
                'extract_polygon': cur_polygons,
            },
            # the detected image type and its value based, the roi is based on normalized image
            # if not match, set None
            'type': {
                'name': cur_match_type_name,
                'desc': cur_config.get('name', cur_match_type_name),
                'value': orc_result,
                'roi': {
                    'x': validate_roi_config['x'],
                    'y': validate_roi_config['y'],
                    'w': validate_roi_config['w'],
                    'h': validate_roi_config['h'],
                    'file': validate_roi_path
                }
            }
        }
Exemple #4
0
    def detectInvoice(self, job_id, file_path):
        img = cv2.imread(file_path, cv2.IMREAD_UNCHANGED)
        origin_shape = img.shape
        logging.info('%s LOAD IMG    width:%d, height:%d' % (tools.getTempTime(), origin_shape[1], origin_shape[0]))

        img, rotate_sum = tools.findLinesandRotate(img)
        rotated_shape = img.shape
        img, img_detect, new_left_top = tools.matchAndImageCut(img)
        img_shape = img.shape
        rotate = np.deg2rad(-1*rotate_sum)
        x = new_left_top[0]-int(rotated_shape[1]/2)
        y = new_left_top[1]-int(rotated_shape[0]/2)
        newX, newY = int(np.cos(rotate)*x-np.sin(rotate)*y), int(np.cos(rotate)*y+np.sin(rotate)*x)
        origX, origY = -newY+int(origin_shape[1]/2), newX+int(origin_shape[0]/2)
        a_location, b_location = tools.detectFeaturePoints(img)
        logging.info(
            '%s IMG AFTER ROTATE AND CUT  width:%d, height:%d' % (tools.getTempTime(), img_shape[1], img_shape[0]))

        img_copy1 = img.copy()
        img_copy2 = img.copy()
        img_compress = img.copy()
        img_black = tools.exBlackWords(img_copy1)
        img_blue = tools.exBlueWords(img_copy2)
        img_black = cv2.cvtColor(img_black, cv2.COLOR_BGR2GRAY)
        img_blue = cv2.cvtColor(img_blue, cv2.COLOR_BGR2GRAY)

        cv2.rectangle(img_black, a_location[0], a_location[1], 0, 2)
        cv2.rectangle(img_black, b_location[0], b_location[1], 0, 2)
        logging.info('%s DRAW DOCTYPE TEMPLATE (%d, %d), (%d, %d)' % (
            tools.getTempTime(), a_location[0][0], a_location[0][1], a_location[1][0], a_location[1][1]))
        logging.info('%s DRAW DOCTYPE TEMPLATE (%d, %d), (%d, %d)' % (
            tools.getTempTime(), b_location[0][0], b_location[0][1], b_location[1][0], b_location[1][1]))

        # create compress image
        k = img_shape[1] / 1000.0
        compress_img = cv2.resize(img_compress, (int(img_shape[1] / k), int(img_shape[0] / k)))
        logging.info('%s COMPRESS IMAGE SAVED' % tools.getTempTime())

        # write img to file
        file_path = tools.writeImageJob(img, job_id + '/step1', '1_rotated')
        compress_path = tools.writeImageJob(compress_img, job_id + '/step1', '1_compressd')
        tools.writeImageJob(img_black, job_id + '/step1', '1_black')
        tools.writeImageJob(img_blue, job_id + '/step1', '1_blue')
        tools.writeImageJob(img_detect, job_id + '/step1', '1_detect')
        logging.info('%s ROTATED IMAGE SAVED IN %s' % (tools.getTempTime(), file_path))

        a_roi, a_path = tools.createRoi(img, a_location, job_id + '/step1', '1_type_Roi')
        b_roi, b_path = tools.createRoi(img, b_location, job_id + '/step1', '1_number_Roi')
        docType = tools.callOcr(a_roi, job_id + '/step1', img_shape[1])
        logging.info('%s CREATE TEMPLATE ROIS' % tools.getTempTime())
        logging.info('%s CALL OCR    docType: %s' % (tools.getTempTime(), docType))

        # write tickle to file for next step to use
        data = dict()
        data['feaLoca'] = [a_location, b_location]
        data['originLeftTop'] = [origX, origY]
        data['filePath'] = file_path
        data['compressPath'] = compress_path
        data['imgShape'] = img_shape
        data['originShape'] = origin_shape
        data['rotateSum'] = rotate_sum
        data['docType'] = docType
        data['feaPath'] = [a_path, b_path]
        tools.writePickle(data, job_id, 'step1')
        logging.info('%s WRITE PICKLE' % tools.getTempTime())

        return data