Example #1
0
def sift_test(save = False):
    global sift_time_use
    # Initiate SIFT detector
    sift = cv2.xfeatures2d.SIFT_create()
    img1 = get_mask("idcard_mask.jpg")  # queryImage
    for i in range(get_idcard_count()):
        img2, img_name = get_idcard(i)  # trainImage
        save_name = "result" + img_name
        start = time.time()
        img1, _ = iu.img_resize(img1, MATCH_PIC_WIDTH)
        img2, _ = iu.img_resize(img2, MATCH_PIC_WIDTH)

        # find the keypoints and descriptors with SIFT
        kp1, des1 = sift.detectAndCompute(img1, None)
        kp2, des2 = sift.detectAndCompute(img2, None)

        # BFMatcher with default params
        bf = cv2.BFMatcher()
        matches = bf.knnMatch(des1, des2, k=2)

        # Apply ratio test
        good = []
        for m, n in matches:
            if m.distance < 0.75 * n.distance:
                good.append(m)
        end = time.time()
        sift_time_use += end - start
        if save:
            folder = "test_sift"
            save_match(folder, save_name, img1, kp1, img2, kp2, good)
            print("finish pic %s" % save_name)
    print("sift pic num=%d time used = %d"%(get_idcard_count(),int(sift_time_use*1000)))
Example #2
0
def orb_test():
    img1 = get_mask("idcard_mask.jpg")  # queryImage
    for i in range(get_idcard_count()):
        img2, img_name = get_idcard(i)  # trainImage
        save_name = "result" + img_name
        img1, _ = iu.img_resize(img1, MATCH_PIC_WIDTH)
        img2, _ = iu.img_resize(img2, MATCH_PIC_WIDTH)
        # Initiate SIFT detector
        orb = cv2.ORB_create()
        # find the keypoints and descriptors with SIFT
        kp1, des1 = orb.detectAndCompute(img1, None)
        kp2, des2 = orb.detectAndCompute(img2, None)
        # create BFMatcher object
        bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        # Match descriptors.
        matches = bf.match(des1, des2)

        # Sort them in the order of their distance.
        matches = sorted(matches, key=lambda x: x.distance)
        img3 = np.zeros((1000, 1000, 3), dtype=np.uint8)
        # Draw first 10 matches.
        # img3 = cv2.drawMatches(img1,kp1,img2,kp2,matches[:10], outImg=img3,flags=cv2.DRAW_MATCHES_FLAGS_DEFAULT)
        #
        # plt.imshow(img3),plt.show()
        folder = "test_orb"
        save_match(folder, save_name, img1, kp1, img2, kp2, matches[:10])
        print("finish pic %s" % save_name)
Example #3
0
def idcard_ocr(request):
    start = time.time()
    ori_time = start
    # 初始化返回对象
    response_data = dict(code=20001, message="", result=None)
    if request.method == 'POST':
        # 0-身份证正面  1-反面
        card_side = request.POST.get('side')
        if not card_side:
            # 默认检测正面
            card_side = 0
        # 是否有伪造风险,如果有则开启风险检测
        detect_risk = request.POST.get('risk')
        if detect_risk is None:
            # 默认无风险检测
            detect_risk = False
        # 是否检测身份证旋转角度
        detect_direction = request.POST.get('direction')
        if detect_direction is None:
            detect_direction = False
        time_used = time.time() - start
        start += time_used
        logger.info("prehandle timeUsed = %d ms" % (int(time_used * 1000)))

        try:
            card_file = request.FILES['image']
            image = Image.open(card_file, mode="r").convert("RGB")
            imgArray = np.asarray(image)
            logger.info("get a image shape = %s" % str(imgArray.shape))
            img_mat = cv2.cvtColor(imgArray, cv2.COLOR_RGB2BGR)
            img_mat, scale = iu.img_resize(img_mat, 1920)
            time_used = time.time() - start
            start += time_used
            logger.info("file load timeUsed = %d ms" % (int(time_used * 1000)))

            img_full, _ = locate_card.locate(img_mat)
            time_used = time.time() - start
            start += time_used
            logger.info("card location timeUsed = %d ms" %
                        (int(time_used * 1000)))

            result_dict = idcardocr.idcardocr(img_full)
            time_used = time.time() - ori_time
            logger.info("total procession timeUsed = %d ms" %
                        (int(time_used * 1000)))
            response_data = dict(code=0, message="ok", result=result_dict)
        except (MultiValueDictKeyError, OSError) as error:
            logger.error("图片参数错误" + traceback.format_exc())
            response_data["message"] = "图片参数错误"
        except ServiceException as error:
            logger.error(str(error))
            response_data["message"] = str(error)
    else:
        response_data["message"] = "请求服务方式错误"
    ret_str = json.dumps(response_data)
    logger.info("respond = %s" % response_data)
    return HttpResponse(ret_str, content_type='application/json')
Example #4
0
    def locate(self, img_target):
        """
        寻找图像中的身份证并进行图像矫正(透视变换)
        :param target:
        :return:
        """
        global second_homo_time
        start = time.time()
        img_target_gray = cv2.cvtColor(
            img_target, cv2.COLOR_BGR2GRAY)  # trainImage in Gray
        img_target_gray_small, _ = iu.img_resize(img_target_gray,
                                                 XFEATURE_IMG_WIDTH)

        img_target_gray = cv2.cvtColor(img_target, cv2.COLOR_BGR2GRAY)

        pts, dst = self.findMatchWithXFeature(img_target_gray_small,
                                              img_template_small)
        hl, wl = img_target_gray.shape
        h_temp, w_temp = img_template_small.shape
        ratio = wl / XFEATURE_IMG_WIDTH
        pts = pts * ratio
        dst = dst * ratio
        # 参数
        img_target_gray = cv2.polylines(img_target_gray, [np.int32(dst)], True,
                                        255, 3, cv2.LINE_AA)
        # FIXME
        # iu.showimg(img_target_gray,"large")

        # 进行图像矫正(透视变换)
        h_time = time.time()
        M_r, mask_r = cv2.findHomography(dst, pts, 0, 5.0)
        second_homo_time += time.time() - h_time
        im_r = cv2.warpPerspective(img_target, M_r,
                                   (int(w_temp * ratio), int(h_temp * ratio)))

        # FIXME
        # iu.showimg(im_r,"after",True)
        time_used = time.time() - start
        start += time_used
        print("match timeUsed = %d ms" % (int(time_used * 1000)))
        # draw_params = dict(matchColor = (0,255,0), # draw matches in green color
        #           singlePointColor = None,
        #           matchesMask = matchesMask, # draw only inliers
        #           flags = 2)
        # img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params)
        # plt.imshow(img3, 'gray'),plt.show()
        # im_r图像矫正结果
        return im_r, img_target_gray
Example #5
0
import numpy as np
import time
import cv2
import os
from django_web.model import ServiceException
from idcard_ocr.settings import BASE_DIR
from django_web.util import img_util as iu

RESOURCE_PATH = os.path.join(BASE_DIR, "django_web/resource")
MASK = os.path.join(BASE_DIR, "django_web/resource", "mask")

template = os.path.join(MASK, 'idcard_mask.jpg')
img_template = cv2.imread(template, 0)  # queryImage in Gray

XFEATURE_IMG_WIDTH = 400
img_template_small, _ = iu.img_resize(img_template, XFEATURE_IMG_WIDTH)
MIN_MATCH_COUNT = 10


class LocateCard:
    def __init__(self):
        pass

    def locate_with_file(self, file_name):
        # img2 = idocr.hist_equal(img2)
        img_target = cv2.imread(file_name)
        return self.locate(img_target)

    # target为需要识别的图像
    def locate(self, img_target):
        """
Example #6
0
def sift_test_with_flann(save = False):
    global sift_time_use
    # Initiate SIFT detector
    sift = cv2.xfeatures2d.SIFT_create()
    img1 = get_mask("idcard_mask.jpg")  # queryImage
    for i in range(get_idcard_count()):
        img2, img_name = get_idcard(i)  # trainImage
        save_name = "result" + img_name
        start = time.time()
        # 模板
        img1, _ = iu.img_resize(img1, 600)
        # 目标图片
        img2, _ = iu.img_resize(img2, 600)

        # find the keypoints and descriptors with SIFT
        kp1, des1 = sift.detectAndCompute(img1, None)
        kp2, des2 = sift.detectAndCompute(img2, None)

        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=10)

        flann = cv2.FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(des1, des2, k=2)

        # Apply ratio test
        good = []
        for m, n in matches:
            if m.distance < 0.70 * n.distance:
                good.append(m)


        if len(good) > MIN_MATCH_COUNT:
            src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
            dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
            # 用HomoGraphy计算图像与图像之间映射关系, M为转换矩阵
            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
            matchesMask = mask.ravel().tolist()
            # 使用转换矩阵M计算出img1在img2的对应形状
            h, w = img1.shape
            pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
            dst = cv2.perspectiveTransform(pts, M)

            # 参数
            img_target_gray = cv2.polylines(img2, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)
            # iu.showimg(img_target_gray)

            # 进行图像矫正(透视变换)
            M_r, mask_r = cv2.findHomography(dst, pts, 0, 5.0)
            ratio = 1
            im_r = cv2.warpPerspective(img2, M_r, (int(w*ratio), int(h*ratio)))
            # FIXME
            # iu.showimg(im_r)
            # save_img("perspective",save_name,im_r)
        else:
            print("身份证匹配度不足 - %d/%d" % (len(good), MIN_MATCH_COUNT))
            matchesMask = None

        end = time.time()
        sift_time_use += end - start
        if save:
            folder = "test_sift_with_flann"
            save_match(folder, save_name, img1, kp1, img2, kp2, good)
            print("finish pic %s" % save_name)
    print("sift pic num=%d time used = %d"%(get_idcard_count(),int(sift_time_use*1000)))