コード例 #1
0
def get_rect_square(rect):
    assert len(rect) == 4, "Invalid verteces count"

    pts = order_points(rect)

    s1 = get_tri_square([pts[0], pts[1], pts[2]])
    s2 = get_tri_square([pts[2], pts[3], pts[0]])

    return s1 + s2
コード例 #2
0
def extract_rect(image, pts):
    u"""Извлекает прямоугольник pts из изображения image, применяя к нему преобразование исправления перспективы"""
    rect = order_points(pts)

    (tl, tr, br, bl) = rect
    maxWidth = max(int(np.linalg.norm(br - bl)), int(np.linalg.norm(tr - tl)))
    maxHeight = max(int(np.linalg.norm(br - tr)), int(np.linalg.norm(bl - tl)))

    dst = np.array([[0, 0], [maxWidth - 1, 0], [maxWidth - 1, maxHeight - 1],
                    [0, maxHeight - 1]],
                   dtype="float32")

    transform = cv2.getPerspectiveTransform(rect, dst)
    warped = cv2.warpPerspective(image, transform, (maxWidth, maxHeight))

    LOG.write("[RECTANGLE EXTRACTOR] EXTRACTED", warped)

    return warped
コード例 #3
0
def select_roi(img, win_name, undistort=False):
    '''Select ROI from image.
    
    Prompts user to interactivelly select a ROI in an image.
    
    Parameters
    ------------
    img : ndarray
        frame to select ROI in
    win_name : str
        name of the plotting window
    undistort: bool
        a boolean flag indicating whether to apply undistortion
        
    Returns
    ---------
    pts : ndarray
        4x2 array of box points
    roi : ndarray
        image cropped to pts
    '''
    print("SelectROI (Enter=confirm, Esc=exit)")
    # Prompt for ROI to be analyzed
    try:
        # if undistort: # currently it doesn't help at all
        # img = fit_undistort(img, intrinsic_matrix, distortion_coeffs)

        (c, r, w, h) = cv2.selectROI(win_name,
                                     img,
                                     fromCenter=False,
                                     showCrosshair=False)
        # Esc kills the window
        if cv2.waitKey(0) & 0xff == 27:
            cv2.destroyAllWindows()
    finally:
        cv2.destroyAllWindows()
    # Store bounding box corners
    pts = [[r, c], [r, c + w], [r + h, c], [r + h, c + w]]
    pts = utils.order_points(np.asarray(pts))
    # Pull out roi
    roi = img[r:r + h, c:c + w]
    return pts, roi
コード例 #4
0
def mask_box_ellip(image, ellip):
    """Creates boolean mask of the ellipse and finds minimal-area rectangle.
    
    Parameters
    ------------
    image : 2D array
        region of interest as grayscale image
    ellip : tuple
        Ellipse defined by its centroid, axes lengths and angle.
    
    Returns
    ------------
    pts : array_like
        corners of the minimal-area rectangle in 
        [top_left, top_right, bottom_right, bottom_left] order
    mask : 2D array
        boolean mask of the full ellipse contour
    imageVis : 2D array
        image with the minimal-area rectangle drawn on it
    """
    imageVis = image.copy()
    mask = np.zeros_like(image, dtype=np.uint8)
    cv2.ellipse(mask, ellip, color=(255, 255, 255), thickness=7)
    # [0] selects the longest contour
    contour = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                               cv2.CHAIN_APPROX_SIMPLE)[1][0]

    rect = cv2.minAreaRect(contour)
    pts = cv2.boxPoints(rect)
    pts = np.int64(pts)
    # obtain a consistent order of the points
    pts = utils.order_points(pts)

    cv2.polylines(img=imageVis,
                  pts=[pts],
                  isClosed=True,
                  color=(255, 255, 255),
                  thickness=7)
    cv2.ellipse(imageVis, ellip, color=(255, 255, 255), thickness=7)

    return pts, mask, imageVis
コード例 #5
0
    def compute_center_slit(image, params):
        image_, original = np.copy(image), np.copy(image)

        binary_threshold = params['binary_threshold']
        cannyl = params['canny_thresh_low']
        cannyh = params['canny_thresh_high']

        binary = np.zeros_like(image_)
        binary[image_ > binary_threshold] = 255      
        binary = np.uint8(binary)

        if params['debug']:
            cv2.imshow("Window-1", binary)            
            cv2.waitKey(0)

        im2, contours, hierarchy = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

        try:
            cnt = [ cnt.shape[0] for cnt in contours]
            cnt = contours[np.argmax(cnt)]
            rect = cv2.minAreaRect(cnt)
            box = cv2.boxPoints(rect)
            box = np.array(box, dtype='int')
            box = order_points(box)
        except Exception as e:
            if params['debug']:
                print(e)
            return (-1, -1)

        if not params['quiet']:
            clone = np.dstack((original.copy(), original.copy(), original.copy()))
            for pt in box:
                cv2.circle(clone, (pt[0], pt[1]), 5, (0,0,255), -1)

            plt.figure(figsize=(20, 20))
            plt.imshow(clone)
            plt.show()
    
        return ','.join([f"({b[0]}, {b[1]})" for b in box])
コード例 #6
0
 def test_order_points(self, pts):
     pts_new = utils.order_points(np.random.permutation(pts))
     assert (pts_new == pts).all()
コード例 #7
0
ファイル: scan.py プロジェクト: Bunny1421/OCR_finalProject
    def get_contour(self, rescaled_image):
        """
        Returns a numpy array of shape (4, 2) containing the vertices of the four corners
        of the document in the image. It considers the corners returned from get_corners()
        and uses heuristics to choose the four corners that most likely represent
        the corners of the document. If no corners were found, or the four corners represent
        a quadrilateral that is too small or convex, it returns the original four corners.
        """
        # these constants are carefully chosen
        MORPH = 9
        CANNY = 84
        HOUGH = 25

        IM_HEIGHT, IM_WIDTH, _ = rescaled_image.shape

        # convert the image to grayscale and blur it slightly
        gray = cv2.cvtColor(rescaled_image, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (7, 7), 0)

        # dilate helps to remove potential holes between edge segments
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (MORPH, MORPH))
        dilated = cv2.morphologyEx(gray, cv2.MORPH_CLOSE, kernel)

        # find edges and mark them in the output map using the Canny algorithm
        edged = cv2.Canny(dilated, 0, CANNY)
        test_corners = self.get_corners(edged)

        approx_contours = []

        if len(test_corners) >= 4:
            quads = []

            for quad in itertools.combinations(test_corners, 4):
                points = np.array(quad)
                points = utils.order_points(points)
                points = np.array([[p] for p in points], dtype="int32")
                quads.append(points)

            # get top five quadrilaterals by area
            quads = sorted(quads, key=cv2.contourArea, reverse=True)[:5]
            # sort candidate quadrilaterals by their angle range, which helps remove outliers
            quads = sorted(quads, key=self.angle_range)

            approx = quads[0]
            if self.is_valid_contour(approx, IM_WIDTH, IM_HEIGHT):
                approx_contours.append(approx)

            # for debugging: uncomment the code below to draw the corners and countour found
            # by get_corners() and overlay it on the image

            # cv2.drawContours(rescaled_image, [approx], -1, (20, 20, 255), 2)
            # plt.scatter(*zip(*test_corners))
            # plt.imshow(rescaled_image)
            # plt.show()

        # also attempt to find contours directly from the edged image, which occasionally
        # produces better results
        (cnts, hierarchy) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                                             cv2.CHAIN_APPROX_SIMPLE)
        cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]

        # loop over the contours
        for c in cnts:
            # approximate the contour
            approx = cv2.approxPolyDP(c, 80, True)
            if self.is_valid_contour(approx, IM_WIDTH, IM_HEIGHT):
                approx_contours.append(approx)
                break

        # If we did not find any valid contours, just use the whole image
        if not approx_contours:
            TOP_RIGHT = (IM_WIDTH, 0)
            BOTTOM_RIGHT = (IM_WIDTH, IM_HEIGHT)
            BOTTOM_LEFT = (0, IM_HEIGHT)
            TOP_LEFT = (0, 0)
            screenCnt = np.array([[TOP_RIGHT], [BOTTOM_RIGHT], [BOTTOM_LEFT],
                                  [TOP_LEFT]])

        else:
            screenCnt = max(approx_contours, key=cv2.contourArea)

        return screenCnt.reshape(4, 2)