示例#1
0
文件: app.py 项目: umer322/cnh-ocr
def cropRois(image, rects, multHeight=0.73, multWidth=0.97, topHeightCrop=30):
    crops = []
    data = {}
    # TODO cut off angle outliers here too
    angles = []

    for r in rects:
        box = rect2Box(r)
        W = r[1][0]
        H = r[1][1]

        Xs = [i[0] for i in box]
        Ys = [i[1] for i in box]
        x1 = min(Xs)
        x2 = max(Xs)
        y1 = min(Ys)
        y2 = max(Ys)

        rotated = False
        angle = r[2]

        if angle < -45:
            angle += 90
            rotated = True

        # calc the centroid
        center = (int((x1 + x2) / 2), int((y1 + y2) / 2))
        size = (int((x2 - x1)), int((y2 - y1)))
        #cv2.circle(image, center, 2, 255, -1)

        M = cv2.getRotationMatrix2D((size[0] / 2, size[1] / 2), angle, 1.0)

        # prepare the crop
        cropped = cv2.getRectSubPix(image, size, center)
        cropped = cv2.warpAffine(cropped, M, size)
        croppedW = W if not rotated else H
        croppedH = H if not rotated else W

        ratio = float(croppedW) / (croppedH)
        area = float(croppedW) * croppedH

        # if in the ratio
        if (ratio > 2 and ratio < 16):
            #text = "{0:.2f}-{1:.2f} ".format(ratio, area)
            #cv2.putText(image, text, center, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            croppedRotated = cv2.getRectSubPix(
                cropped,
                (int(croppedW * multWidth),
                 int(croppedH *
                     multHeight if croppedH < topHeightCrop else croppedH *
                     0.9)), (size[0] / 2, size[1] / 2))
            # save the angles to calc the avg/std
            angles.append(angle)
            # save the crops
            crops.append(croppedRotated)
            # will process from top to bottom, so save it to sort later
            data[y1] = [croppedRotated, area, ratio, angle]

    return data, np.mean(np.array(angles)), np.std(np.array(angles))
示例#2
0
def rotate_and_resize(image, rotation_matrix, old_size, new_size):
    rotated_image = cv2.warpAffine(src=image,
                                   M=rotation_matrix,
                                   dsize=old_size)
    resized_image = cv2.getRectSubPix(
        rotated_image, new_size, (int(old_size[0] / 2), int(old_size[1] / 2)))
    return resized_image
示例#3
0
def take_center_square(image):
    """take the center square of the original image and returns it"""

    height, width = image.shape[:2]
    min_dimension = min(height, width)
    center = (width / 2, height / 2)

    square_center_image = cv2.getRectSubPix(image,
                                            (min_dimension, min_dimension),
                                            center)

    return square_center_image
def extractPlate(imgOriginal, listOfMatchingChars):
    possiblePlate = PossiblePlate.PossiblePlate()

    listOfMatchingChars.sort(key=lambda matchingChar: matchingChar.intCenterX)
    fltPlateCenterX = (
        listOfMatchingChars[0].intCenterX +
        listOfMatchingChars[len(listOfMatchingChars) - 1].intCenterX) / 2.0
    fltPlateCenterY = (
        listOfMatchingChars[0].intCenterY +
        listOfMatchingChars[len(listOfMatchingChars) - 1].intCenterY) / 2.0
    ptPlateCenter = fltPlateCenterX, fltPlateCenterY
    intPlateWidth = int(
        (listOfMatchingChars[len(listOfMatchingChars) - 1].intBoundingRectX +
         listOfMatchingChars[len(listOfMatchingChars) - 1].intBoundingRectWidth
         - listOfMatchingChars[0].intBoundingRectX) *
        PLATE_WIDTH_PADDING_FACTOR)
    intTotalOfCharHeights = 0

    for matchingChar in listOfMatchingChars:
        intTotalOfCharHeights = intTotalOfCharHeights + matchingChar.intBoundingRectHeight

    fltAverageCharHeight = intTotalOfCharHeights / len(listOfMatchingChars)
    intPlateHeight = int(fltAverageCharHeight * PLATE_HEIGHT_PADDING_FACTOR)
    fltOpposite = listOfMatchingChars[
        len(listOfMatchingChars) -
        1].intCenterY - listOfMatchingChars[0].intCenterY
    fltHypotenuse = DetectChars.distanceBetweenChars(
        listOfMatchingChars[0],
        listOfMatchingChars[len(listOfMatchingChars) - 1])
    fltCorrectionAngleInRad = math.asin(fltOpposite / fltHypotenuse)
    fltCorrectionAngleInDeg = fltCorrectionAngleInRad * (180.0 / math.pi)
    possiblePlate.rrLocationOfPlateInScene = (tuple(ptPlateCenter),
                                              (intPlateWidth, intPlateHeight),
                                              fltCorrectionAngleInDeg)
    rotationMatrix = cv2.getRotationMatrix2D(tuple(ptPlateCenter),
                                             fltCorrectionAngleInDeg, 1.0)
    height, width, numChannels = imgOriginal.shape
    imgRotated = cv2.warpAffine(imgOriginal, rotationMatrix, (width, height))
    imgCropped = cv2.getRectSubPix(imgRotated, (intPlateWidth, intPlateHeight),
                                   tuple(ptPlateCenter))
    possiblePlate.imgPlate = imgCropped
    return possiblePlate
示例#5
0
    def process_image(self, image, debug_mode):
        """Detects license plates in the given 'image' using haar features. 'debug_mode' can be set to true to save the
        found image patches for debugging"""
        gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        lps = self.classifier.detectMultiScale(gray_image)

        if debug_mode:
            for box in lps:
                (left, top, w, h) = box
                lp_image_patch = cv2.getRectSubPix(image, (w, h),
                                                   (left + w / 2, top + h / 2))
                save_debug_image(lp_image_patch,
                                 str(abs(hash(image.tostring()))),
                                 "plate_candidates")

        plates: [Plate] = []
        for box in lps:
            plate = Plate()
            left, top, width, height = box
            plate.box = [top, left, top + height, left + width]
            plates.append(plate)
        return plates
示例#6
0
def retrieve_dipstick_image(image, contour):
    """ Calculate the minimum area bounding the dipstick image, then apply a
    rotation matrix to straighten the rectangle. If the dipstick has been rotated
    into a vertical position it is rotated anticlockwise. The horizontal dipstick
    image is then resized for extracting squares on the dipstick.
    """
    try:
        rectangle = cv.minAreaRect(contour)
    except cv.error:
        return None

    center, size, theta = rectangle
    height, width = image.shape[:2]

    center, size = tuple(map(int, center)), tuple(map(int, size))
    matrix = cv.getRotationMatrix2D(center, theta, 1)
    dst = cv.warpAffine(image, matrix, (width, height))
    dipstick = cv.getRectSubPix(dst, size, center)

    if dipstick.shape[0] > dipstick.shape[1]:
        dipstick = cv.rotate(dipstick, cv.ROTATE_90_COUNTERCLOCKWISE)

    dipstick = imutils.resize(dipstick, width=800)
    return dipstick
示例#7
0
def videoCap():
    # pytesseract.pytesseract.tesseract_cmd = "C:/Program Files/Tesseract-OCR/tesseract"
    cap = cv2.VideoCapture(
        'C:/Users/jeawa/Desktop/project/LPR_Project/project/video/test_car.mp4'
    )

    count = 0
    before_chars = ""

    while True:
        el = cv2.getTickCount()
        fps = cap.get(5)

        ret, img_ori = cap.read()

        height, width, channel = img_ori.shape  # 높이, 너비, 채널 확보

        gray = cv2.cvtColor(img_ori, cv2.COLOR_BGR2GRAY)

        # img_ori = cv2.imread('LPR_Project/project/image/3.jpg')  # 이미지 불러오기
        # gray = cv2.cvtColor(img_ori, cv2.COLOR_BGR2GRAY)

        img_blurred = cv2.GaussianBlur(gray, ksize=(3, 3), sigmaX=0)  # 노이즈 제거

        img_thresh = cv2.adaptiveThreshold(
            img_blurred,
            maxValue=255.0,
            adaptiveMethod=cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
            thresholdType=cv2.THRESH_BINARY_INV,
            blockSize=19,  # 12 통과 15,20
            C=9)

        contours, _ = cv2.findContours(  # 윤곽선을 찾기
            img_thresh,
            mode=cv2.RETR_LIST,  #
            method=cv2.CHAIN_APPROX_TC89_KCOS  #
        )

        temp_result = np.zeros((height, width, channel), dtype=np.uint8)

        cv2.drawContours(
            temp_result,  # 원본이미지
            contours=contours,  # contours 정보
            contourIdx=-1,  # -1 : 전체
            color=(255, 255, 255),
            thickness=1)

        contours_dict = []
        for contour in contours:
            x, y, w, h = cv2.boundingRect(contour)  # 컴투어를 감싸는 사각형
            # cv2.rectangle(
            #     temp_result,
            #     pt1=(x, y),
            #     pt2=(x+w, y+h),
            #     color=(255, 255, 255),
            #     thickness=1
            # )
            contours_dict.append({
                'contour': contour,
                'x': x,
                'y': y,
                'w': w,
                'h': h,
                'cx': x + (w / 2),  # 중심 좌표
                'cy': y + (h / 2)
            })
        MIN_AREA, MAX_AREA = 100, 1000  # boundingRect(사각형)의 최소넓이
        MIN_WIDTH, MIN_HEIGHT = 2, 8  # boundingRect의 최소 넓이, 높이 2, 8
        MIN_RATIO, MAX_RATIO = 0.3, 0.9  # boundingRect의 가로 세로 비율

        possible_contours = []  # 위의 조건의 만족하는 사각형
        cnt = 0
        for d in contours_dict:
            area = d['w'] * d['h']  # 가로 * 세로 = 면적
            ratio = d['w'] / d['h']  # 가로 / 세로 = 비율

            if MIN_AREA < area < MAX_AREA and d['w'] > MIN_WIDTH and d[
                    'h'] > MIN_HEIGHT and MIN_RATIO < ratio < MAX_RATIO:
                d['idx'] = cnt  # 조건의 맞는 값을 idx에 저장한다.
                cnt += 1
                possible_contours.append(d)  # possible_contour를 업데이트한다.

        # temp_result = np.zeros((height, width, channel), dtype=np.uint8)

        for d in possible_contours:
            # cv2.drawContours(temp_result, d['contour'], -1, (255, 255, 255))
            cv2.rectangle(temp_result,
                          pt1=(d['x'], d['y']),
                          pt2=(d['x'] + d['w'], d['y'] + d['h']),
                          color=(0, 0, 255),
                          thickness=1)

        MAX_DIAG_MULTIPLYER = 4  # 대각선의 5배 안에 있어야함
        MAX_ANGLE_DIFF = 12.0  # 12.0            #세타의 최대값         12, 0.7, 0.5, 0.6, 3
        MAX_AREA_DIFF = 0.3  # 0.5               #면적의 차이
        MAX_WIDTH_DIFF = 0.5  # 너비차이
        MAX_HEIGHT_DIFF = 0.6  # 높이차이
        MIN_N_MATCHED = 4  # 3                   #위의 조건이 3개 미만이면 뺀다

        def find_chars(contour_list):  #
            matched_result_idx = []  # idx 값 저장
            for d1 in contour_list:
                matched_contours_idx = []
                for d2 in contour_list:
                    if d1['idx'] == d2['idx']:  # d1 과 d2가 같으면 컨티뉴
                        continue

                    dx = abs(d1['cx'] - d2['cx'])
                    dy = abs(d1['cy'] - d2['cy'])

                    diagonal_length1 = np.sqrt(d1['w']**2 + d1['h']**2)

                    distance = np.linalg.norm(
                        np.array([d1['cx'], d1['cy']]) -
                        np.array([d2['cx'], d2['cy']]))  # 대각선 길이
                    if dx == 0:
                        angle_diff = 90  # 0일 때 각도 90도 (예외처리)
                    else:
                        angle_diff = np.degrees(np.arctan(dy / dx))  # 세타 구하기
                    area_diff = abs(d1['w'] * d1['h'] - d2['w'] * d2['h']) / (
                        d1['w'] * d1['h'])  # 면적 비율
                    width_diff = abs(d1['w'] - d2['w']) / d1['w']  # 너비비율
                    height_diff = abs(d1['h'] - d2['h']) / d1['h']  # 높이비율
                    # 조건들
                    if distance < diagonal_length1 * MAX_DIAG_MULTIPLYER \
                            and angle_diff < MAX_ANGLE_DIFF and area_diff < MAX_AREA_DIFF \
                            and width_diff < MAX_WIDTH_DIFF and height_diff < MAX_HEIGHT_DIFF:
                        # d2만 넣었기 때문에 마지막으로 d1을 넣은다
                        matched_contours_idx.append(d2['idx'])

                # append this contour
                matched_contours_idx.append(d1['idx'])

                if len(matched_contours_idx) < MIN_N_MATCHED:  # 3개 이하이면 번호판 x
                    continue

                matched_result_idx.append(matched_contours_idx)  # 최종 후보군

                unmatched_contour_idx = []  # 최종후보군이 아닌 애들
                for d4 in contour_list:
                    if d4['idx'] not in matched_contours_idx:
                        unmatched_contour_idx.append(d4['idx'])

                unmatched_contour = np.take(possible_contours,
                                            unmatched_contour_idx)

                # recursive
                recursive_contour_list = find_chars(unmatched_contour)

                for idx in recursive_contour_list:
                    matched_result_idx.append(idx)  # 번호판 이외의 값을 재정의

                break
            return matched_result_idx

        result_idx = find_chars(possible_contours)
        matched_result = []
        for idx_list in result_idx:
            matched_result.append(np.take(possible_contours, idx_list))
        # visualize possible contours
        temp_result = np.zeros((height, width, channel), dtype=np.uint8)
        for r in matched_result:
            for d in r:
                cv2.rectangle(temp_result,
                              pt1=(d['x'], d['y']),
                              pt2=(d['x'] + d['w'], d['y'] + d['h']),
                              color=(0, 0, 255),
                              thickness=1)

        PLATE_WIDTH_PADDING = 1.2  # 1.3
        PLATE_HEIGHT_PADDING = 1.5  # 1.5
        MIN_PLATE_RATIO = 3
        MAX_PLATE_RATIO = 7  #10

        plate_imgs = []
        plate_infos = []

        for i, matched_chars in enumerate(matched_result):
            sorted_chars = sorted(matched_chars,
                                  key=lambda x: x['cx'])  # x방향으로 순차적으로 정렬

            plate_cx = (sorted_chars[0]['cx'] +
                        sorted_chars[-1]['cx']) / 2  # 센터 좌표 구하기
            plate_cy = (sorted_chars[0]['cy'] + sorted_chars[-1]['cy']) / 2

            plate_width = (sorted_chars[-1]['x'] + sorted_chars[-1]['w'] -
                           sorted_chars[0]['x']) * PLATE_WIDTH_PADDING  # 너비

            # sum_height = 0
            # for d in sorted_chars:
            #     sum_height += d['h']

            # plate_height = int(sum_height / len(sorted_chars)
            #                    * PLATE_HEIGHT_PADDING)  # 높이
            plate_height = (sorted_chars[-1]['y'] - sorted_chars[0]['y'] +
                            sorted_chars[-1]['h']) * PLATE_HEIGHT_PADDING
            #
            triangle_height = sorted_chars[-1]['cy'] - sorted_chars[0]['cy']
            triangle_hypotenus = np.linalg.norm(
                np.array([sorted_chars[0]['cx'], sorted_chars[0]['cy']]) -
                np.array([sorted_chars[-1]['cx'], sorted_chars[-1]['cy']]))

            angle = np.degrees(np.arcsin(triangle_height / triangle_hypotenus))

            rotation_matrix = cv2.getRotationMatrix2D(center=(plate_cx,
                                                              plate_cy),
                                                      angle=angle,
                                                      scale=1.0)

            # 회전
            img_rotated = cv2.warpAffine(img_thresh,
                                         M=rotation_matrix,
                                         dsize=(width, height))

            img_cropped = cv2.getRectSubPix(img_rotated,
                                            patchSize=(int(plate_width),
                                                       int(plate_height)),
                                            center=(int(plate_cx),
                                                    int(plate_cy)))

            if img_cropped.shape[1] / img_cropped.shape[
                    0] < MIN_PLATE_RATIO or img_cropped.shape[
                        1] / img_cropped.shape[
                            0] < MIN_PLATE_RATIO > MAX_PLATE_RATIO:
                continue

            plate_imgs.append(img_cropped)
            plate_infos.append({
                'x': int(plate_cx - plate_width / 2),
                'y': int(plate_cy - plate_height / 2),
                'w': int(plate_width),
                'h': int(plate_height)
            })
            for i, plate_img in enumerate(plate_imgs):

                plate_img = cv2.resize(plate_img, dsize=(0, 0), fx=1.6, fy=1.6)
                _, plate_img = cv2.threshold(plate_img,
                                             thresh=0.0,
                                             maxval=255.0,
                                             type=cv2.THRESH_BINARY
                                             | cv2.THRESH_OTSU)
                # plt.imshow(plate_img, cmap='gray')
                # plt.show()
                # find contours again (same as above)
                contours, _ = cv2.findContours(plate_img,
                                               mode=cv2.RETR_LIST,
                                               method=cv2.CHAIN_APPROX_SIMPLE)

                plate_min_x, plate_min_y = plate_img.shape[1], plate_img.shape[
                    0]
                plate_max_x, plate_max_y = 0, 0

                for contour in contours:
                    x, y, w, h = cv2.boundingRect(contour)

                    area = w * h
                    ratio = w / h

                    if area > MIN_AREA \
                            and w > MIN_WIDTH and h > MIN_HEIGHT \
                            and MIN_RATIO < ratio < MAX_RATIO:
                        if x < plate_min_x:
                            plate_min_x = x
                        if y < plate_min_y:
                            plate_min_y = y
                        if x + w > plate_max_x:
                            plate_max_x = x + w
                        if y + h > plate_max_y:
                            plate_max_y = y + h

            img_result = plate_img[plate_min_y:plate_max_y,
                                   plate_min_x:plate_max_x]
            img_result = cv2.GaussianBlur(img_result, ksize=(3, 3), sigmaX=0)
            _, img_result = cv2.threshold(img_result,
                                          thresh=0.0,
                                          maxval=255.0,
                                          type=cv2.THRESH_BINARY
                                          | cv2.THRESH_OTSU)
            img_result = cv2.copyMakeBorder(img_result,
                                            top=10,
                                            bottom=10,
                                            left=20,
                                            right=10,
                                            borderType=cv2.BORDER_CONSTANT,
                                            value=(0, 0, 0))
            chars = pytesseract.image_to_string(img_result,
                                                lang='kor',
                                                config='--psm 7 --oem 0')

            result_chars = ''

            has_digit = False
            # <= ord('힣')
            for c in chars:
                if \
                    + ord('가') == ord(c) or \
                    + ord('나') == ord(c) or \
                    + ord('다') == ord(c) or \
                    + ord('라') == ord(c) or \
                    + ord('마') == ord(c) or \
                    + ord('거') == ord(c) or \
                    + ord('너') == ord(c) or \
                    + ord('더') == ord(c) or \
                    + ord('러') == ord(c) or \
                    + ord('머') == ord(c) or \
                    + ord('버') == ord(c) or \
                    + ord('서') == ord(c) or \
                    + ord('어') == ord(c) or \
                    + ord('저') == ord(c) or \
                    + ord('고') == ord(c) or \
                    + ord('노') == ord(c) or \
                    + ord('도') == ord(c) or \
                    + ord('로') == ord(c) or \
                    + ord('모') == ord(c) or \
                    + ord('보') == ord(c) or \
                    + ord('소') == ord(c) or \
                    + ord('오') == ord(c) or \
                    + ord('조') == ord(c) or \
                    + ord('구') == ord(c) or \
                    + ord('누') == ord(c) or \
                    + ord('두') == ord(c) or \
                    + ord('루') == ord(c) or \
                    + ord('무') == ord(c) or \
                    + ord('부') == ord(c) or \
                    + ord('수') == ord(c) or \
                    + ord('우') == ord(c) or \
                    + ord('주') == ord(c) or \
                    + ord('아') == ord(c) or \
                    + ord('바') == ord(c) or \
                    + ord('사') == ord(c) or \
                    + ord('자') == ord(c) or \
                    + ord('배') == ord(c) or \
                    + ord('하') == ord(c) or \
                    + ord('허') == ord(c) or \
                        + ord('호') == ord(c) or c.isdigit():
                    if c.isdigit():
                        has_digit = True
                    result_chars += c
            # print(result_chars)
            # print(len(result_chars))

            if result_chars == "":
                pass
            else:
                if before_chars == result_chars and 6 < len(result_chars) < 9:
                    count += 1
                else:
                    before_chars = result_chars
                    count = 0

            if count == 1:
                print(result_chars)
                count = 0
                before_chars = ""

        # print(fps + " : 프레임 영상입니다.")
        print(fps)
        a = np.hstack((img_ori, temp_result))
        cv2.imshow("Go", a)
        if cv2.waitKey(42) == ord('q'):
            break
        e2 = cv2.getTickCount()
        time = (e2 - el) / cv2.getTickFrequency()
        print(time)
        # print("1프레임 : 1초당 처리속도는 " + time + "입니다.")
    cap.release()
    cv2.destroyAllWindows()
示例#8
0
def get_image_patch_from_contour(image, contour):
    """Returns the specified area from the image"""
    x, y, w, h = cv2.boundingRect(contour)
    size = (int(w * 1.2), int(h * 1.5))
    center = (x + w / 2, y + h / 2)
    return cv2.getRectSubPix(image, size, center)
示例#9
0
def get_image_patch_from_rect(image, rect):
    """Returns the specified area from the image"""
    top, left, bottom, right = rect
    size = (right - left, bottom - top)
    center = (left + size[0] / 2, top + size[1] / 2)
    return cv2.getRectSubPix(image, size, center)