コード例 #1
0
ファイル: image.py プロジェクト: A-tichat/sudoku_detector
def sudoku_detect(image_path):
    image = cv2.imread(image_path)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    # gray = cv2.resize(gray, (480, 480))
    # dst = cv2.fastNlMeansDenoising(gray)
    # cv2.imshow('11', gray)
    # cv2.imshow('lo', dst)
    # cv2.waitKey(0)

    blurred = cv2.GaussianBlur(gray, (7, 7), 3)
    thresh = cv2.adaptiveThreshold(
        blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 57, 5)
    thresh = cv2.bitwise_not(thresh)
    cnts = cv2.findContours(
        thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
    puzzleCnt = None
    for c in cnts:
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)
        if len(approx) == 4:
            puzzleCnt = approx
            break

    puzzle = four_point_transform(image, puzzleCnt.reshape(4, 2))
    warped = four_point_transform(gray, puzzleCnt.reshape(4, 2))
    return (puzzle, warped)
コード例 #2
0
def find_puzzle(image, debug=False):
    # convert image into grayscale and blur it slightly
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    blurred = cv2.GaussianBlur(gray, (7, 7), 3)

    # apply adaptive thresholding and invert the threshold map
    thresh = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
    thresh = cv2.bitwise_not(thresh)

    # check to see if we are visualizing each step of the image
    # processing pipeline (in this case, thresholding)
    if debug:
        cv2.imshow("Puzzle Thresh", thresh)
        cv2.waitKey(0)

    # find contours in the thresholded image and sort them by size in descending order
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)

    # initialize a contour that corresponds to the puzzle outline
    puzzleCnt = None

    # iterate over all contours
    for c in cnts:
        # approximate the contour
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)

        # if the approximated contour has four points
        # then we can assume that we found the outline for the puzzle
        if len(approx) == 4:
            puzzleCnt = approx
            break

    # if the puzzle contour is empty then our script could not find
    # the outline for the sudoku puzzle 
    if puzzleCnt is None:
        raise Exception("Could not find sudoku puzzle outline. Try debugging threshold and contour steps.")
    
    # check for visualization again
    if debug:
        # draw the contour of the puzzle on the image and then display
        output = image.copy()
        cv2.drawContours(output, [puzzleCnt], -1, (0, 255, 0), 2)
        cv2.imshow("Puzzle Outline", output)
        cv2.waitKey(0)

    # apply a four point perspective transform to both the original and
    # the grayscale image to obtain a top-down birds eye view of the puzzle
    puzzle = four_point_transform(image, puzzleCnt.reshape(4, 2))
    warped = four_point_transform(gray, puzzleCnt.reshape(4, 2))

    # check for visualization
    if debug:
        # show the warped image 
        cv2.imshow("Puzzle Transform", puzzle)
        cv2.waitKey(0)

    return (puzzle, warped)
コード例 #3
0
def scan(path):
        image = cv2.imread(path)
        #image = imutils.resize(image, 500)

        gray = cv2.cvtColor(image, 6)
        blurred = cv2.GaussianBlur(gray, (5, 5), 0)
        edged = cv2.Canny(blurred, 100, 200)

        #cv2.imshow('img', edged)

        cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        cnts=imutils.grab_contours(cnts)
        cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]
        screenCnt = None
        for c in cnts:
                peri = cv2.arcLength(c, True)
                approx = cv2.approxPolyDP(c, 0.02*peri, True)
                print(len(approx))
                if len(approx)==4:
                        screenCnt = approx
                        break


        if screenCnt is not None:
                paper = four_point_transform(image, screenCnt.reshape((4, 2)))
                warped = four_point_transform(gray, screenCnt.reshape((4, 2)))
        else:
                warped = gray
        T = threshold_local(warped, 11, offset=10)

        warped = (warped > T).astype("uint8") * 255

        #cv2.imshow('img', imutils.resize(warped, 600))
        return warped
コード例 #4
0
def get_outer_box(original_image, desired_portrait=True):
    portrait = not desired_portrait
    i = 0
    while not portrait == desired_portrait and i < 2:
        outer_box_contour = get_outer_box_contour(original_image)
        tl, bl, br, tr = outer_box_contour[0], outer_box_contour[
            1], outer_box_contour[2], outer_box_contour[3]
        heights = sorted([euclidean(bl, tl), euclidean(br, tr)])
        widths = sorted([euclidean(tr, tl), euclidean(br, bl)])
        try:
            assert heights[1] / heights[0] < 1.05
            assert widths[1] / widths[0] < 1.05
        except:
            raise OmrValidationException('good outer box not found')
        shrink = 5
        original_cropped = four_point_transform(
            original_image, outer_box_contour.reshape(4, 2))
        original_cropped = original_cropped[shrink:-shrink, shrink:-shrink]
        gray = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
        grey_cropped = four_point_transform(gray,
                                            outer_box_contour.reshape(4, 2))
        grey_cropped = grey_cropped[shrink:-shrink, shrink:-shrink]
        height, width, = grey_cropped.shape
        portrait = True if height >= width else False
        if portrait != desired_portrait:
            print(
                'DEBUG: image was not correct orientation, rotating counter-cw 90 degrees'
            )
            original_image = np.array(
                Image.fromarray(original_image).rotate(90, expand=True))
        i += 1
    if not portrait == desired_portrait:
        raise OmrValidationException(
            'outer box not found with correct orientation')
    return grey_cropped, original_cropped
コード例 #5
0
def getPaper(edgeMap, gray, image):
    cnts = cv2.findContours(edgeMap.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]

    targetCnt = None

    if len(cnts) > 0:
        # sort the contours according to their size in
        # descending order
        cnts = sorted(cnts, key=cv2.contourArea, reverse=True)

        for c in cnts:
            # approximate the contour
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.1 * peri, True)

            # if our approximated contour has four points,
            # then we can assume we have found the target
            if len(approx) == 4:
                targetCnt = approx
                break

    # apply a four point perspective transform to the
    # paper images to obtain a rectilinear view of the paper
    # print(type(targetCnt.reshape(4, 2)))
    warped = four_point_transform(gray, targetCnt.reshape(4, 2))
    paper = four_point_transform(gray, targetCnt.reshape(4, 2))
    cv2.imshow("ppr", paper)
    # apply scikit adaptive threshold to image.
    # this is better than a threshold if there is varied lighting on the paper
    thresh = img_as_ubyte(threshold_adaptive(warped, 257, offset=10))
    return (thresh, paper)
コード例 #6
0
def find_suduko(image, debug=False):
    imgGrey=cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
    imgBlur=cv2.GaussianBlur(imgGrey, (3,3), 2)
    thresh=cv2.adaptiveThreshold(imgBlur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,11,2)
    thresh=cv2.bitwise_not(thresh)

    ####Find countours and sort them by size 
    cnts=cv2.findContours(thresh.copy(),cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    cnts=imutils.grab_contours(cnts)
    cnts=sorted(cnts,key=cv2.contourArea,reverse=True)
    puzzleCnt=None
    for c in cnts:
        peri=cv2.arcLength(c,True)
        approx=cv2.approxPolyDP(c,0.02*peri,True)
        if len(approx)==4:
            puzzleCnt=approx
            break
        if puzzleCnt is None:
            #raise Exception(("Could nt find suduko outline,Debugg?"))
            return None
    if debug:
        output=image.copy()
        cv2.drawContours(output,[puzzleCnt],-1,(0,255,0),2)
        cv2.imshow("puzzle OUtline",output)
        cv2.waitKey(0)
    imgWarped=four_point_transform(image, puzzleCnt.reshape(4,2))
    imgWgrey=four_point_transform(imgGrey,puzzleCnt.reshape(4,2))
    if debug:
        cv2.imshow("pustrans",imgWarped)
        cv2.waitKey(0)
    return imgWarped, imgWgrey
コード例 #7
0
def process_image(path_to_img):
    # creates an edge map and convert to gray scale
    image = cv2.imread(path_to_img)
    image = imutils.resize(image, height=1500)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    blurred = cv2.GaussianBlur(gray, (3, 3), 0)
    edged = cv2.Canny(blurred, 50, 200, 255)

    # find contours in the edge map, then sort them by their
    # size in descending order
    cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
    displayCnt = None

    # loop over the contours
    for c in cnts:
        # approximate the contour
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)

        # if the contour has four vertices, then we have found
        # the thermostat display
        if len(approx) == 4:
            displayCnt = approx
            break
    # find the display aka the first 4 sided object we see
    warped = four_point_transform(gray, displayCnt.reshape(4, 2))
    output = four_point_transform(image, displayCnt.reshape(4, 2))

    # output is the display contoured in gray scale
    return warped, output
コード例 #8
0
def extract_sudoku(src: np.array, img: np.array, image: np.array, grey: np.array, debug: bool = False) -> tuple:
    contours = cv2.findContours(img.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    contours = imutils.grab_contours(contours)
    contours = sorted(contours, key=cv2.contourArea, reverse=True)
    sudoku_contour = None

    for contour in contours:
        perimeter = cv2.arcLength(contour, True)
        approx = cv2.approxPolyDP(contour, 0.02 * perimeter, True)
        if len(approx) == 4:
            sudoku_contour = approx
            break
    if sudoku_contour is None:
        print("Cannot Find A Soduku Image Here.")
        return None, None
    sudoku_original = four_point_transform(src, sudoku_contour.reshape(4, 2))
    sudoku_standard = four_point_transform(grey, sudoku_contour.reshape(4, 2))
    sudoku_clear = four_point_transform(image, sudoku_contour.reshape(4, 2))

    if debug:
        display_image = src.copy()
        cv2.drawContours(display_image, [sudoku_contour], -1, (0, 255, 0), 2)
        img_show(display_image, "Contours Image:")
        img_show(sudoku_original, "Transformed Sudoku Image:")
        img_show(sudoku_standard, "Transformed Sudoku Binary Blurred Image:")
        img_show(sudoku_clear, "Transformed Sudoku Binary Clear Image:")
    return sudoku_original, sudoku_standard, sudoku_clear
コード例 #9
0
ファイル: opencv.py プロジェクト: ZoziLaMalice/Sudoku_Solver
def find_puzzle(image):
    # convert the image to grayscale and blur it slightly
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    blurred = cv2.GaussianBlur(gray, (7, 7), 3)

    # apply adaptive thresholding and then invert the threshold map
    thresh = cv2.adaptiveThreshold(blurred, 255,
                                   cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                   cv2.THRESH_BINARY, 11, 2)
    thresh = cv2.bitwise_not(thresh)

    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)

    # loop over the contours
    puzzleContours = None

    for c in cnts:
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)

        if len(approx) == 4:
            puzzleContours = approx
            break

    puzzle = four_point_transform(image, puzzleContours.reshape(4, 2))
    warped = four_point_transform(gray, puzzleContours.reshape(4, 2))

    return (puzzle, warped)
コード例 #10
0
def extract(image):

    show(image, "Normal")
    # Load an color image black and grey
    gray_image = cv2.cvtColor(image, cv2.COLOR_RGBA2GRAY)
    show(gray_image, "Normal en gris")

    blurred = cv2.GaussianBlur(gray_image, (5, 5), 0)
    edge = cv2.Canny(blurred, 50, 200, 255)

    #show(edge, "Edge")

    contours = cv2.findContours(edge.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
    contours = contours[0] if imutils.is_cv2() else contours[1]
    contours = sorted(contours, key=cv2.contourArea, reverse=True)
    displayContours = None

    # loop over the contour
    for i in contours:
        # approximate the contour
        perimeter = cv2.arcLength(i, True)
        approximation = cv2.approxPolyDP(i, 0.1 * perimeter, True)

        if len(approximation) == 4:
            displayContours = approximation
            break

    warped = four_point_transform(gray_image, displayContours.reshape(4, 2))
    output = four_point_transform(image, displayContours.reshape(4, 2))

    print("Extracted image")

    return warped, output
コード例 #11
0
ファイル: MoileOMR.py プロジェクト: onucsecu2/mobileOMR
def warp(image):
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    blurred = cv2.GaussianBlur(gray, (9, 9), 0)
    edged = cv2.Canny(blurred, 70, 200)
    cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    docCnt = None

    # ensure that at least one contour was found
    if len(cnts) > 0:
        # sort the contours according to their size in
        # descending order
        cnts = sorted(cnts, key=cv2.contourArea, reverse=True)

        # loop over the sorted contours
        for c in cnts:
            # approximate the contour
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.02 * peri, True)

            # if our approximated contour has four points,
            # then we can assume we have found the paper
            if len(approx) == 4:
                docCnt = approx
                break

    paper = four_point_transform(image, docCnt.reshape(4, 2))
    warped = four_point_transform(gray, docCnt.reshape(4, 2))
    return warped
コード例 #12
0
def stream_to_number(my_stream):
    try:
        my_stream.seek(0)
        file_bytes = np.asarray(bytearray(my_stream.read()), dtype=np.uint8)
        frame = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
        res = hsv_mask(frame, 70, 100, 255)
        image = res.copy()
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        displayCnt = found_display_contour(gray)
        warped = four_point_transform(gray, displayCnt.reshape(4, 2))
        output = four_point_transform(image, displayCnt.reshape(4, 2))
        # threshold the warped image, then apply a series of morphological
        # operations to cleanup the thresholded image
        warped = cv2.medianBlur(warped, 1)
        thresh = cv2.adaptiveThreshold(warped,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
           cv2.THRESH_BINARY,11,2)
        kernel = np.ones((5, 3), np.uint8)
        thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
        thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (1, 5))
        thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
        number = display_segments_to_number(thresh)
        return number
    except:
        print("Something went wrong with image recognition")
    finally:
        my_stream.close()
コード例 #13
0
    def find_contours(self):

        contours = cv2.findContours(self.edged, cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
        contours = imutils.grab_contours(contours)

        if len(contours) > 0:

            contours = sorted(contours, key=cv2.contourArea, reverse=True)

            for contour in contours:

                peri = cv2.arcLength(contour, True)
                approx = cv2.approxPolyDP(contour, 0.02 * peri, True)

                if len(approx) == 4:

                    cv2.drawContours(self.image, [approx], -1, (255, 0, 170),
                                     2)

        self.current_contour_gray = four_point_transform(
            self.gray, approx.reshape(4, 2))
        self.current_contour_color = four_point_transform(
            self.image, approx.reshape(4, 2))

        cv2.imshow('contours', self.image)
        cv2.imshow('current_contour', self.current_contour_color)
        cv2.waitKey(0)
コード例 #14
0
 def transform_again(self, gray_trans, img_trans):
     gaussian_bulr = cv2.GaussianBlur(gray_trans, (5, 5), 0)
     cv2.imshow("gaussian", gaussian_bulr)
     edged = cv2.Canny(gaussian_bulr, 75, 200)  # 边缘检测,灰度值小于2参这个值的会被丢弃,大于3参这个值会被当成边缘,在中间的部分,自动检测
     cv2.imshow("edged", edged)
     # 1.寻找轮廓
     image, cts, hierarchy = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
     # 2.将轮廓数据以{c:轮廓,peri:周长}dict形式存放到path_list里面
     path_list = []
     for c in cts:
         peri = 0.01 * cv2.arcLength(c, True)
         path_list.append({"c": c, "peri": peri})
     # 3.对集合数据根据周长进行排序
     path_sort = sorted(path_list, key=lambda x: x['peri'], reverse=True)
     # print("path_sort", path_sort)
     # 显示排序第一个的轮廓数据
     # cv2.drawContours(rect, [path_sort[0]['c']], -1, (0, 0, 255), 3)
     # cv2.imshow("draw_contours", gray_trans)
     # 4.取轮廓的矩形坐标
     x, y, w, h = cv2.boundingRect(path_sort[0]['c'])
     cv2.rectangle(gray_trans, (x, y), (x + w, y + h), (0, 255, 0), 2)
     cv2.imshow("rectangle", gray_trans)
     # print("rect.shape()", rect.shape)
     my, mx = gray_trans.shape
     # 5.利用轮廓坐标组成新的透视定位4坐标点
     four_points = [[0, y + h], [mx, y + h], [0, my], [mx, my]]
     # 6.再次进行透视转换
     gray_trans2 = four_point_transform(gray_trans, np.array(four_points))
     img_trans2 = four_point_transform(img_trans, np.array(four_points))
     cv2.imshow("img_trans2", img_trans2)
     return gray_trans2, img_trans2
コード例 #15
0
def scan(image):

    #Removing Background of the image
    data = np.fromfile(image)
    bytes = np.frombuffer(rembg(data), np.uint8)

    #Loading image
    img = cv2.imdecode(bytes, cv2.IMREAD_UNCHANGED)
    orig = img.copy()
    orig1 = img.copy()
    ratio = img.shape[0] / IMG_RESIZE_H

    #Image Thresholding and Median Filtering for simplifying process of detection and removing small details
    img = imutils.resize(img, height=int(IMG_RESIZE_H))
    orig = imutils.resize(orig, height=int(IMG_RESIZE_H))
    _, img = cv2.threshold(img[:, :, 3], 0, 255, cv2.THRESH_BINARY)
    img = cv2.medianBlur(img, 15)

    #Finding contours for bounding box and cropping
    cnts = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)

    #Finding largest contour and drawing bounding box on the image
    target = None
    for c in cnts:
        perimeter = cv2.arcLength(c, True)
        polygon = cv2.approxPolyDP(c,
                                   APPROX_POLY_DP_ACCURACY_RATIO * perimeter,
                                   True)

        if len(polygon) == 4:
            target = polygon

    if target is None:
        cv2.drawContours(orig, [polygon], -1, (0, 255, 0), 2)
    else:
        cv2.drawContours(orig, [target], -1, (0, 255, 0), 2)

    #Displaying intermediate result
    plt.figure(5, figsize=(7, 7))
    plt.imshow(orig, cmap='gray')
    plt.show()

    #Perspective transformation of image
    if target is None:
        boundingBox = orig
        crop = orig1
    else:
        orig = imutils.resize(orig, height=int(IMG_RESIZE_H * ratio))
        boundingBox = perspective.four_point_transform(
            orig,
            target.reshape(4, 2) * ratio)
        crop = perspective.four_point_transform(orig1,
                                                target.reshape(4, 2) * ratio)

    #Saving the cropped image and image with bounding box
    image = os.path.splitext(os.path.basename(image))[0]
    cv2.imwrite("./Outputs/" + image + "_BoundingBox.jpeg", boundingBox)
    cv2.imwrite("./Outputs/" + image + "_Crop.jpeg", crop)
コード例 #16
0
ファイル: computer_vision.py プロジェクト: mjtadema/buffy
    def zoom_to_rectangle(self, canny_in, color_in):
        # find contours in the edge map, then sort them by their
        # size in descending order
        cnts = cv2.findContours(canny_in.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if imutils.is_cv2() else cnts[1]
        cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
        displayCnt = None

        if self.see_pics:
            cv2.drawContours(color_in, cnts, -1, (255, 0, 0), 3)
            cv2.drawContours(color_in, [cnts[0]], 0, (0, 0, 255), 3)
            cv2.drawContours(color_in, [cnts[1]], 0, (0, 255, 0), 3)

            cv2.imshow('Important points', color_in)
            self.trigger()
            cv2.destroyAllWindows()

        # loop over the contours
        for c in cnts:
            # approximate the contour
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.02 * peri, True)

            # if the contour has four vertices, then we have found
            # the thermostat display
            if len(approx) == 4:
                displayCnt = approx
                break
        # extract the thermostat display, apply a perspective transform
        # to it
        gray = cv2.cvtColor(color_in, cv2.COLOR_BGR2GRAY)
        return [four_point_transform(gray, displayCnt.reshape(4, 2)),
                four_point_transform(color_in, displayCnt.reshape(4, 2))]
コード例 #17
0
def markOnImg(img, width, height):
    '''在四点标记的图片上,将涂黑的选项标记,并返回(图片,坐标)'''
    docCnt = getFourPtTrans(img)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    paper = four_point_transform(img, docCnt)
    warped = four_point_transform(gray, docCnt)

    # 灰度图二值化
    thresh = cv2.adaptiveThreshold(warped, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                   cv2.THRESH_BINARY, 15, 2)
    # resize
    thresh = cv2.resize(thresh, (width, height), cv2.INTER_LANCZOS4)
    paper = cv2.resize(paper, (width, height), cv2.INTER_LANCZOS4)
    warped = cv2.resize(warped, (width, height), cv2.INTER_LANCZOS4)

    ChQImg = cv2.blur(thresh, (13, 13))
    # 二值化,120是阈值
    ChQImg = cv2.threshold(ChQImg, 120, 225, cv2.THRESH_BINARY)[1]
    # cv2.imwrite("paper.jpg",paper)
    Answer = []

    # 二值图中找答案轮廓
    cnts = cv2.findContours(ChQImg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[1] if imutils.is_cv3() else cnts[0]
    for c in cnts:
        x, y, w, h = cv2.boundingRect(c)
        if w > 50 and h > 20 and w < 100 and h < 100:
            M = cv2.moments(c)
            cX = int(M["m10"] / M["m00"])
            cY = int(M["m01"] / M["m00"])

            cv2.drawContours(paper, c, -1, (0, 0, 255), 5)
            cv2.circle(paper, (cX, cY), 7, (255, 255, 255), 2)
            Answer.append((cX, cY))
    return paper, Answer
コード例 #18
0
def find_map(image, debug=False):
    # convert the image to grayscale and blur it slightly
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    blurred = cv2.GaussianBlur(gray, (7, 7), 3)
    # apply adaptive thresholding and then invert the threshold map
    thresh = cv2.adaptiveThreshold(blurred, 255,
                                   cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                   cv2.THRESH_BINARY, 11, 2)
    thresh = cv2.bitwise_not(thresh)
    # check to see if we are visualizing each step of the image
    # processing pipeline (in this case, thresholding)
    if debug:
        cv2.imshow("Map Thresh", thresh)
        cv2.waitKey(0)
# find contours in the thresholded image and sort them by size in
# descending order
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
    # initialize a contour that corresponds to the map outline
    mapCnt = None
    # loop over the contours
    for c in cnts:
        # approximate the contour
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)
        # if our approximated contour has four points, then we can
        # assume we have found the outline of the map
        if len(approx) == 4:
            mapCnt = approx
            break
    # if the map contour is empty then our script could not find
    # the outline of the map so raise an error
    if mapCnt is None:
        raise Exception(("Could not find map outline. "
                         "Try debugging your thresholding and contour steps."))
    # check to see if we are visualizing the outline of the detected
    # map
    if debug:
        # draw the contour of the map on the image and then display
        # it to our screen for visualization/debugging purposes
        output = image.copy()
        cv2.drawContours(output, [mapCnt], -1, (0, 255, 0), 2)
        cv2.imshow("map Outline", output)
        cv2.waitKey(0)

# apply a four point perspective transform to both the original
    # image and grayscale image to obtain a top-down bird's eye view
    # of the map
    map = four_point_transform(image, mapCnt.reshape(4, 2))
    warped = four_point_transform(gray, mapCnt.reshape(4, 2))
    # check to see if we are visualizing the perspective transform
    if debug:
        # show the output warped image (again, for debugging purposes)
        cv2.imshow("map Transform", map)
        cv2.waitKey(0)
    # return a 2-tuple of map in both RGB and grayscale
    return (map, warped)
コード例 #19
0
def imganalysis(img, img1):
    ANSWER_KEY = {0: 1, 1: 4, 2: 0, 3: 3, 4: 1}
    #image = cv2.imread(args["image"])
    image = cv2.imread(img)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    blurred = cv2.GaussianBlur(gray, (5, 5), 0)
    edged = cv2.Canny(blurred, 75, 200)
    cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]
    docCnt = None
    if len(cnts) > 0:
        cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
        for c in cnts:
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.02 * peri, True)
            if len(approx) == 4:
                docCnt = approx
                break
    paper = four_point_transform(image, docCnt.reshape(4, 2))
    warped = four_point_transform(gray, docCnt.reshape(4, 2))
    thresh = cv2.threshold(warped, 0, 255,
                           cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]
    questionCnts = []
    for c in cnts:
        (x, y, w, h) = cv2.boundingRect(c)
        ar = w / float(h)
        if w >= 20 and h >= 20 and ar >= 0.9 and ar <= 1.1:
            questionCnts.append(c)
    questionCnts = contours.sort_contours(questionCnts,
                                          method="top-to-bottom")[0]
    correct = 0
    for (q, i) in enumerate(np.arange(0, len(questionCnts), 5)):
        cnts = contours.sort_contours(questionCnts[i:i + 5])[0]
        bubbled = None
        for (j, c) in enumerate(cnts):
            mask = np.zeros(thresh.shape, dtype="uint8")
            cv2.drawContours(mask, [c], -1, 255, -1)
            mask = cv2.bitwise_and(thresh, thresh, mask=mask)
            total = cv2.countNonZero(mask)
            if bubbled is None or total > bubbled[0]:
                bubbled = (total, j)
        color = (0, 0, 255)
        k = ANSWER_KEY[q]
        if k == bubbled[1]:
            color = (0, 255, 0)
            correct += 1
        cv2.drawContours(paper, [cnts[k]], -1, color, 3)
    score = (correct / 5.0) * 100
    print("[INFO] score: {:.2f}%".format(score))
    cv2.putText(paper, "{:.2f}%".format(score), (10, 30),
                cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2)
    XMLFILES_FOLDER = os.path.join(settings.MEDIA_ROOT, 'answers/')
    cv2.imwrite(XMLFILES_FOLDER + img1, paper)
    cv2.waitKey(0)
    return score
コード例 #20
0
ファイル: jeltablfelism.py プロジェクト: knoel96/G-pi-l-t-s
def defineTrafficSign(image):

        image = imutils.resize(image, height=500)
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        blurred = cv2.GaussianBlur(gray, (5, 5), 0)
        edged = cv2.Canny(blurred, 50, 200, 255)
        cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if imutils.is_cv2() else cnts[1]
        cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
        displayCnt = None
        
        for c in cnts:    
                peri = cv2.arcLength(c, True)
                approx = cv2.approxPolyDP(c, 0.02 * peri, True)
                if len(approx) == 4:
                        displayCnt = approx
                        break

               warped = four_point_transform(gray, displayCnt.reshape(4, 2))
        output = four_point_transform(image, displayCnt.reshape(4, 2))
        cv2.drawContours(image, [displayCnt], -1, (0, 0, 255), 5)

        thresh = cv2.threshold(warped, 0, 255, 
                cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (1, 5))
        thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)

        # (roiH, roiW) = roi.shape
        #subHeight = thresh.shape[0]/10
        #subWidth = thresh.shape[1]/10
        (subHeight, subWidth) = np.divide(thresh.shape, 10)
        subHeight = int(subHeight)
        subWidth = int(subWidth)

        cv2.rectangle(output, (subWidth, 4*subHeight), (3*subWidth, 9*subHeight), (0,255,0),2) # left block
        cv2.rectangle(output, (4*subWidth, 4*subHeight), (6*subWidth, 9*subHeight), (0,255,0),2) # center block
        cv2.rectangle(output, (7*subWidth, 4*subHeight), (9*subWidth, 9*subHeight), (0,255,0),2) # right block
        cv2.rectangle(output, (3*subWidth, 2*subHeight), (7*subWidth, 4*subHeight), (0,255,0),2) # top block

        leftBlock = thresh[4*subHeight:9*subHeight, subWidth:3*subWidth]
        centerBlock = thresh[4*subHeight:9*subHeight, 4*subWidth:6*subWidth]
        rightBlock = thresh[4*subHeight:9*subHeight, 7*subWidth:9*subWidth]
        topBlock = thresh[2*subHeight:4*subHeight, 3*subWidth:7*subWidth]  
        
        leftFraction = np.sum(leftBlock)/(leftBlock.shape[0]*leftBlock.shape[1])
        centerFraction = np.sum(centerBlock)/(centerBlock.shape[0]*centerBlock.shape[1])
        rightFraction = np.sum(rightBlock)/(rightBlock.shape[0]*rightBlock.shape[1])
        topFraction = np.sum(topBlock)/(topBlock.shape[0]*topBlock.shape[1])
        
        segments = (leftFraction, centerFraction, rightFraction, topFraction)
        segments = tuple(1 if segment > 230 else 0 for segment in segments)
        

        if segments in SIGNS_LOOKUP:
            cv2.imshow("output", output)
            return SIGNS_LOOKUP[segments]
        else:
            return None
コード例 #21
0
def find_puzzle(image, debug=False):
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    blurred = cv2.GaussianBlur(gray, (7, 7), 3)

    threshold = cv2.adaptiveThreshold(
        blurred,
        255,
        cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
        cv2.THRESH_BINARY,
        11,
        2
    )
    threshold = cv2.bitwise_not(threshold)

    # Visualizing each step of the image processing pipeline
    if debug:
        cv2.imshow('Puzzle Treshold', threshold)
        cv2.waitKey(0)
    
    # Find contours in the thresh image and sort them by size in descending order
    contours = cv2.findContours(
        threshold.copy(),
        cv2.RETR_EXTERNAL,
        cv2.CHAIN_APPROX_SIMPLE
    )
    contours = imutils.grab_contours(contours)
    contours = sorted(contours, key=cv2.contourArea, reverse=True)

    puzzleContour = None

    for c in contours:
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(
            c, 
            0.02 * peri,
            True
        )

        if len(approx) == 4:
            puzzleContour = approx
            break
    
    if puzzleContour is None:
        raise Exception('Could not find Sudoku puzzle...')

    if debug:
        output = image.copy()
        cv2.drawContours(output, [puzzleContour], -1, (0, 255, 0), 2)
        cv2.imshow('Puzzle', output)
        cv2.waitKey(0)

    puzzle = four_point_transform(image, puzzleContour.reshape(4, 2))
    wrapped = four_point_transform(gray, puzzleContour.reshape(4, 2))

    if debug:
        cv2.imshow('Puzzle', output)
        cv2.waitKey(0)
    
    return (puzzle, wrapped)
コード例 #22
0
def find_puzzle(image, debug=False):
    # Convert the image into grayscale and blur it
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    blurred = cv2.GaussianBlur(gray, (7, 7), 3)

    # Apply adaptive thresh and invert the thres map
    thresh = cv2.adaptiveThreshold(blurred, 255,
                                   cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                   cv2.THRESH_BINARY, 11, 2)
    thresh = cv2.bitwise_not(thresh)

    if debug:
        cv2.imshow("Puzzle Thresh", thresh)
        cv2.waitKey(0)

    # Find contours in  the thresh image and sort them by size in descending order
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)

    # Initialize a contour that corresponds to the puccle outline
    puzzlecnt = None

    # Looping over contours
    for i in cnts:
        # Determine the perimeter of the contour
        peri = cv2.arcLength(i, True)
        # Approximating the contour
        approx = cv2.approxPolyDP(i, 0.02 * peri, True)
        # if our approximated contour has four points which means four vertices, then we can assume we have found the outline of the puzzle
        if len(approx) == 4:
            puzzlecnt = approx
            break

    # if the puzzle contour is empty then our script could not find the outline of the sudoku puzzle so raise an error
    if puzzlecnt is None:
        raise Exception(("Could not find sudoku puzzle outline. "
                         "Try debugging your thresholding and contour steps."))

    # Check to see if we are visualizing the outline of the detected
    # sudoku puzzle
    if debug:
        # draw the contour of the puzzle on the image and then display it to our screen for visualization/debugging purposes
        output = image.copy()
        cv2.drawContours(output, [puzzleCnt], -1, (0, 255, 0), 2)
        cv2.imshow("Puzzle Outline", output)
        cv2.waitKey(0)

    # apply a four point perspective transform to both the original image and grayscale image to obtain a top-down bird's eye view of the puzzle
    puzzle = four_point_transform(image, puzzlecnt.reshape(4, 2))
    warped = four_point_transform(gray, puzzlecnt.reshape(4, 2))

    # CHecking
    if debug:
        cv2.imshow("Puzzle Transform", puzzle)
        cv2.waitKey(0)
    return (puzzle, warped)
コード例 #23
0
def find_sudoku(img: np.array, debug=False):
    # Convert image to grayscale and add blur
    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    img_blurred = cv2.GaussianBlur(img_gray, (7, 7), 3)

    # Apply inverted binary adaptive thresholding
    img_thresh = cv2.adaptiveThreshold(img_blurred, 255,
                                       cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                       cv2.THRESH_BINARY_INV, 11, 2)

    if debug:
        cv2.imshow("Sudoku with Threshold Filter", img_thresh)
        cv2.waitKey(0)

    # Find countours in thresholded image
    contours, _ = cv2.findContours(img_thresh.copy(), cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_SIMPLE)
    contours = sorted(contours, key=cv2.contourArea,
                      reverse=True)  # largest contour is first element

    # Find outer contour
    sudoku_contour = None
    for c in contours:
        # Approximation of contour
        perimeter = cv2.arcLength(c, True)
        approximation = cv2.approxPolyDP(
            c, 0.02 * perimeter,
            True)  # use perimeter of contour for approximation accuracy

        # Assume the first contour with 4 points to be the outline of the grid
        if len(approximation) == 4:
            sudoku_contour = approximation
            break

    # No outline found
    if sudoku_contour is None:
        raise Exception("Could not find Sudoku grid outline.")

    # Show debug output
    if debug:
        output = img.copy()
        cv2.drawContours(output, [sudoku_contour], -1, (0, 255, 0), 2)
        cv2.imshow("Sudoku Outline", output)
        cv2.waitKey(0)

    # Apply four point perspective transform to obtain a top-down perspective
    img_sudoku = perspective.four_point_transform(img,
                                                  sudoku_contour.reshape(4, 2))
    img_gray = perspective.four_point_transform(img_gray,
                                                sudoku_contour.reshape(4, 2))

    if debug:
        cv2.imshow("Sudoku Transform", img_sudoku)
        cv2.waitKey(0)

    # Return a tuple of Sudoku in both RGB and grayscale
    return (img_sudoku, img_gray, sudoku_contour)
コード例 #24
0
ファイル: functions.py プロジェクト: Rowansdabomb/gradeable
def transformPage(image):
    pageheight, pagewidth = image.shape[:2]
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    blurred = cv2.GaussianBlur(gray, (5, 5), 0)
    edged = cv2.Canny(blurred, 75, 200)

    # find contours in the edge map, then initialize
    # the contour that corresponds to the document
    cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]
    docCnt = None
    paperEdge = False
    contourCounts = 0
    countourSizeThreshold = 3
    # ensure that at least one contour was found
    if len(cnts) > 0:
        # sort the contours according to their size in
        # descending order
        cnts = sorted(cnts, key=cv2.contourArea, reverse=True)

        # loop over the sorted contours
        if (cv2.contourArea(cnts[0]) > image.size * (.7)):
            for c in cnts:
                # approximate the contour
                peri = cv2.arcLength(c, True)
                approx = cv2.approxPolyDP(c, 0.02 * peri, True)
                contourCounts = contourCounts + 1
                # if our approximated contour has four points
                # and is one of largest by area,
                # then we can assume we have found the paper
                if len(approx) == 4 and contourCounts < countourSizeThreshold:
                    # print('page edge found')
                    paperEdge = True
                    docCnt = approx
                    break
                elif (contourCounts >= countourSizeThreshold):
                    # print('page edge NOT found')
                    break

    # apply a four point perspective transform to both the
    # original image and grayscale image to obtain a top-down
    # birds eye view of the paper
    if (paperEdge):
        paper = four_point_transform(image, docCnt.reshape(4, 2))
        warped = four_point_transform(gray, docCnt.reshape(4, 2))
        # apply Otsu's thresholding method to binarize the warped
        # piece of paper
        thresh = cv2.threshold(warped, 0, 255,
                               cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
    else:
        paper = image
        thresh = cv2.threshold(gray, 0, 255,
                               cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
    result = (paper, thresh)
    return result
コード例 #25
0
def find_puzzle(image: np.ndarray) -> Tuple[np.ndarray]:
    '''
        Finds sudoku puzzle on the image and returns its perspective-transformed version (natural and grayscale)

        Parameters
        ----------
        - image : np.ndarray
                  image containing sudoku puzzle

        Returns
        -------
        Tuple[np.ndarray] {puzzle, grayscale puzzle}
    '''
    # Convert to grayscale
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    # Blur image
    blurred = cv2.GaussianBlur(gray, (7, 7), 3)

    # Apply adaptive thresholding
    thresh = cv2.adaptiveThreshold(blurred, 255,
                                   cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                   cv2.THRESH_BINARY, 11, 2)
    # Invert colors
    thresh = cv2.bitwise_not(thresh)

    # Find contours
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    # Sort by size in descending order
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)

    # Sudoku contour
    puzzleCnt = None

    # Loop over the contours
    for c in cnts:
        # Approximate the contour
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)

        # Assume that 4-points contour is the sudoku one
        if len(approx) == 4:
            puzzleCnt = approx
            break

    if puzzleCnt is None:
        raise Exception(("Sudoku is not found."))

    # Four point perspective transform
    puzzle = four_point_transform(image, puzzleCnt.reshape(4, 2))
    warped = four_point_transform(gray, puzzleCnt.reshape(4, 2))

    # Return results
    return (puzzle, warped)
コード例 #26
0
def cevap_kolon(cevap):
    pts1 = np.array([(2, 50), (300, 50), (2, 1545), (300, 1545)])
    pts2 = np.array([(300, 50), (600, 50), (302, 1545), (602, 1545)])
    pts3 = np.array([(600, 50), (900, 50), (602, 1545), (902, 1545)])
    pts4 = np.array([(900, 50), (1200, 50), (902, 1545), (1202, 1545)])

    col1 = four_point_transform(cevap, pts1)
    col2 = four_point_transform(cevap, pts2)
    col3 = four_point_transform(cevap, pts3)
    col4 = four_point_transform(cevap, pts4)
    return col1, col2, col3, col4
コード例 #27
0
ファイル: board.py プロジェクト: oakhtar147/sudoku-solver
def find_puzzle(image, debug=False):
    # convert the image to grayscale and blur it slightly
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    blurred = cv2.GaussianBlur(gray, (7, 7), 3)

    thresh = cv2.adaptiveThreshold(blurred, 255,
                                   cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                   cv2.THRESH_BINARY, 11, 2)

    # invert the pixels
    thresh = cv2.bitwise_not(thresh)

    if debug:
        cv2.imshow("Puzzle Thresh", thresh)
        cv2.waitKey(2000)
        cv2.destroyWindow("Puzzle Thresh")

    # find contours in the thresholded image and sort them by size in
    # descending order
    contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
    contours = imutils.grab_contours(contours)
    contours = sorted(contours, key=cv2.contourArea, reverse=True)
    # initialize a contour that corresponds to the puzzle outline
    puzzleContour = None
    # loop over the contours

    for contour in contours:
        perimeter = cv2.arcLength(contour, True)
        approx = cv2.approxPolyDP(contour, 0.02 * perimeter, True)

        if len(approx) == 4:
            puzzleContour = approx
            break

    if puzzleContour is None:
        raise Exception(("No puzzle found"))

    if debug:
        output = image.copy()
        cv2.drawContours(output, [puzzleContour], -1, (0, 0, 255), 2)
        cv2.imshow("Puzzle Contours", output)
        cv2.waitKey(2000)
        cv2.destroyWindow("Puzzle Contours")

    puzzle = four_point_transform(image, puzzleContour.reshape(4, 2))
    warped = four_point_transform(gray, puzzleContour.reshape(4, 2))

    if debug:
        cv2.imshow("Puzzle aligned", puzzle)
        cv2.waitKey(2000)
        cv2.destroyWindow("Puzzle aligned")

    return (puzzle, warped)
コード例 #28
0
ファイル: ProcessImg.py プロジェクト: Xanxus41/OMG
def reviseImg():
    image = cv2.imread(Tools.SCREEN_PATH, cv2.IMREAD_COLOR)

    normal_four_points = [[958, 451], [888, 1117], [1600, 451], [1670, 1117]]
    ultimate_four_points = [[894, 186], [913, 453], [1666, 186], [1647, 453]]

    ultimate_img = four_point_transform(image, np.array(ultimate_four_points))
    normal_img = four_point_transform(image, np.array(normal_four_points))

    cv2.imwrite(Tools.ULTIMATE_PATH, ultimate_img)
    cv2.imwrite(Tools.NORMAL_PATH, normal_img)
コード例 #29
0
def findPuzzle(image):
    image = cv2.resize(image, (512, 512))
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    blurred = cv2.GaussianBlur(gray, (7, 7), 3)
    # apply adaptive thresholding and then invert the threshold map
    thresh = cv2.adaptiveThreshold(blurred, 255,
                                   cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                   cv2.THRESH_BINARY, 11, 2)
    thresh = cv2.bitwise_not(thresh)

    # convert the image to grayscale and blur it slightly
    # thresh=preprocess(image)
    #     cv2.imshow("Puzzle Thresh", thresh)
    #     cv2.waitKey(0)
    # find contours in the thresholded image and sort them by size in
    # descending order
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)

    cnts = imutils.grab_contours(cnts)
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
    # initialize a contour that corresponds to the puzzle outline
    puzzleCnt = None
    # loop over the contours
    for c in cnts:
        # approximate the contour
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)
        # if our approximated contour has four points, then we can
        # assume we have found the outline of the puzzle

        if len(approx) == 4:
            puzzleCnt = approx
            break
        #print('BAD')
    if puzzleCnt is None:
        raise Exception(("Could not find Sudoku puzzle outline. "
                         "Try debugging your thresholding and contour steps."))
        # check to see if we are visualizing the outline of the detected
        # Sudoku puzzle
    output = image.copy()
    cv2.drawContours(output, [puzzleCnt], -1, (0, 255, 0), 2)

    #  plt.imshow(output)
    #  cv2.imshow("Puzzle Outline", output)
    #  cv2.waitKey(0)
    puzzle = four_point_transform(image, puzzleCnt.reshape(4, 2))
    warped = four_point_transform(gray, puzzleCnt.reshape(4, 2))
    #  cv2.imshow("Puzzle Transform", puzzle)
    #  cv2.waitKey(0)
    # return a 2-tuple of puzzle in both RGB and grayscale
    return puzzle, warped
コード例 #30
0
ファイル: convert.py プロジェクト: yueyue10/PythonPro
def test():
    # 载入并显示图片
    img = cv2.imread('t3.jpg')
    img = cv2.resize(img, (500, 700), 0, 0)
    # 1.降噪(模糊处理用来减少瑕疵点)
    result = cv2.blur(img, (5, 5))
    # 2.灰度化,就是去色(类似老式照片)
    gray = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
    # 霍夫变换圆检测
    circles = cv2.HoughCircles(gray.copy(),
                               cv2.HOUGH_GRADIENT,
                               1,
                               200,
                               param1=250,
                               param2=15,
                               minRadius=2,
                               maxRadius=20)
    circles = np.round(circles[0, :]).astype('int')

    # 排序circles坐标
    # print("circles1===", circles)
    circles2 = sorted(circles, key=lambda x: x[1])  # 对y轴排序
    # print("circles2", circles2)
    top_list = sorted(circles2[0:2], key=lambda x: x[0])  # 对x轴排序
    bottom_list = sorted(circles2[2:4], key=lambda x: x[0])  # 对x轴排序
    circles3 = np.vstack((top_list, bottom_list))
    # print("circles3", circles3)

    four_points = []
    for idx, (x, y, r) in enumerate(circles3):
        # 绘制圆和半径矩形到output
        cv2.circle(img, (x, y), r, (0, 255, 0), 4)
        if idx == 0:
            four_points.append([x + r, y + r])
        elif idx == 1:
            four_points.append([x - r, y + r])
        elif idx == 2:
            four_points.append([x + r, y - r])
        elif idx == 3:
            four_points.append([x - r, y - r])
    # 透视变换
    gray_trans = four_point_transform(gray, np.array(four_points))
    img_trans = four_point_transform(img, np.array(four_points))
    # 显示新图像
    cv2.imshow('circle', img)
    cv2.imshow('rect_img', img_trans)
    transform(gray_trans, img_trans)
    # 按任意键退出
    cv2.waitKey(0)
    cv2.destroyAllWindows()
コード例 #31
0
ファイル: FormReader.py プロジェクト: michaelwang1994/Main
    def __init__(self, imgpath, is_camera=False):

        if is_camera:
            self.img = cv2.imread(imgpath)
            self.img = cv2.resize(self.img.copy(), (int(.95*self.img.shape[1]), int(.95*self.img.shape[0])), interpolation = cv2.INTER_CUBIC)
            self.imggray_original = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)

            gray = cv2.GaussianBlur(self.imggray_original, (5, 5), 0)
            edged = cv2.Canny(gray, 75, 200)
            cv2.imshow("test", cv2.resize(edged.copy(), (int(.5*self.img.shape[1]), int(.5*self.img.shape[0])), interpolation = cv2.INTER_CUBIC))
            cv2.waitKey(0)

            img, contours, heir = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
            contours = sorted(contours, key=cv2.contourArea, reverse=True)

            # loop over the contours
            for i, contour in enumerate(contours):
                # approximate the contour
                peri = cv2.arcLength(contour, True)
                approx = cv2.approxPolyDP(contour, 0.05 * peri, True)

                # if our approximated contour has four points, then we
                # can assume that we have found our screen
                if len(approx) == 4:
                # if i == 0:
                    screenCnt = approx
                    self.img = four_point_transform(self.img.copy(), screenCnt.reshape(4, 2))
                    self.imggray_original = four_point_transform(gray, screenCnt.reshape(4, 2))
                    self.imggray_original = threshold_adaptive(self.imggray_original, 251, offset=10)
                    self.imggray_original = self.imggray_original.astype("uint8") * 255

                    self.height, self.width, self.channels = self.img.shape
                    cv2.imshow("test", cv2.resize(self.imggray_original, (int(.5 * self.img.shape[1]), int(.5 * self.img.shape[0])), interpolation=cv2.INTER_CUBIC))
                    cv2.waitKey(0)
                    self.img_pil_gray = Image.fromarray(self.imggray_original)
                    self.imgpath = imgpath
                    self.window_height = int(.01 * self.height)
                    self.window_width = int(.01 * self.width)

                    break

        else:
            self.img = cv2.imread(imgpath)
            self.imggray_original = cv2.cvtColor(self.img.copy(), cv2.COLOR_BGR2GRAY)

            self.img_pil_gray = Image.fromarray(self.imggray_original)
            self.imgpath = imgpath
            self.height, self.width, self.channels = self.img.shape
            self.window_height = int(.01 * self.height)
            self.window_width = int(.01 * self.width)
コード例 #32
0
ファイル: misc.py プロジェクト: ivandardi/srpv
def unwarp_region(cluster, image):
    # cluster = sorted(cluster, key=attrgetter('x'))
    left, right = cluster[0], cluster[-1]

    actual_coords = np.array([
        left.top_left,
        left.bottom_left,
        right.top_right,
        right.bottom_right,
    ])

    from imutils.perspective import four_point_transform
    transformed = four_point_transform(image, actual_coords)

    # fix to be binary again
    transformed = cv2.cvtColor(transformed, cv2.COLOR_BGR2GRAY)
    transformed[transformed != 0] = 255

    return transformed
コード例 #33
0
ファイル: testRoboto.py プロジェクト: dwildmark/DA210A
            # compute the solidity of the original contour
            area = cv2.contourArea(c)
            hullArea = cv2.contourArea(cv2.convexHull(c))
            solidity = area / float(hullArea)

            # compute whether or not the width and height, solidity, and
            # aspect ratio of the contour falls within appropriate bounds
            keepDims = w > minWidth and h > minHeight
            keepSolidity = solidity > 0.9
            keepAspectRatio = 0.7 <= aspectRatio <= 1.3
            larger = area > largestArea           

            # ensure that the contour passes all our tests
            if keepDims and keepSolidity and keepAspectRatio and larger:
                warped = perspective.four_point_transform(edged, approx.reshape(4, 2))
                triangle_present = find_triangle(warped)
                if triangle_present:
                    shape_found = True
                    largestArea = area
                    distance = targetWidth / (radiansPerPixel * max(h, w))
                    largestApprox = approx
                    largest_side_px = max(w,h)
                
    if shape_found:
    	state = "running"
        # draw an outline around the target and update the status text
        cv2.drawContours(gray, [largestApprox], -1, (0, 0, 125), 2)
        # Concatenate information string
        status = "D: " + str(int(distance)) + "cm px: " + str(int(largest_side_px))
        # compute the center of the contour region and draw the crosshairs
コード例 #34
0
	# loop over the sorted contours
	for c in cnts:
		# approximate the contour
		peri = cv2.arcLength(c, True)
		approx = cv2.approxPolyDP(c, 0.02 * peri, True)

		# if our approximated contour has four points,
		# then we can assume we have found the paper
		if len(approx) == 4:
			docCnt = approx
			break

# apply a four point perspective transform to both the
# original image and grayscale image to obtain a top-down
# birds eye view of the paper
paper = four_point_transform(image, docCnt.reshape(4, 2))
warped = four_point_transform(gray, docCnt.reshape(4, 2))

# apply Otsu's thresholding method to binarize the warped
# piece of paper
thresh = cv2.threshold(warped, 0, 255,
	cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]

# find contours in the thresholded image, then initialize
# the list of contours that correspond to questions
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
	cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
questionCnts = []

# loop over the contours
コード例 #35
0
# author:	Adrian Rosebrock
# website:	http://www.pyimagesearch.com

# USAGE
# BE SURE TO INSTALL 'imutils' PRIOR TO EXECUTING THIS COMMAND
# python perspective_transform.py

# import the necessary packages
from imutils import perspective
import numpy as np
import cv2

# load the notecard code image, clone it, and initialize the 4 points
# that correspond to the 4 corners of the notecard
notecard = cv2.imread("../demo_images/notecard.png")
clone = notecard.copy()
pts = np.array([(73, 239), (356, 117), (475, 265), (187, 443)])

# loop over the points and draw them on the cloned image
for (x, y) in pts:
    cv2.circle(clone, (x, y), 5, (0, 255, 0), -1)

# apply the four point tranform to obtain a "birds eye view" of
# the notecard
warped = perspective.four_point_transform(notecard, pts)

# show the original and warped images
cv2.imshow("Original", clone)
cv2.imshow("Warped", warped)
cv2.waitKey(0)