コード例 #1
0
    def detectAndDescribe(self, image):
        # convert the image to grayscale
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        print("version:", imutils.is_cv4())

        # check to see if we are using OpenCV 3.X
        if (imutils.is_cv3() or imutils.is_cv4()):
            # detect and extract features from the image
            descriptor = cv2.xfeatures2d.SIFT_create()
            #descriptor = cv2.FeatureDetector_create("SIFT")
            (kps, features) = descriptor.detectAndCompute(image, None)

        # otherwise, we are using OpenCV 2.4.X
        #else:
        # detect keypoints in the image
        #detector = cv2.xfeatures2d.SIFT_create()
        #	detector = cv2.FeatureDetector_create("SIFT")
        #	kps = cv2.xfeatures2d.SIFT_create().detect(gray)

        # extract features from the image
        #extractor = cv2.xfeatures2d.SIFT_create()
        #       extractor = cv2.DescriptorExtractor_create("SIFT")
        #	(kps, features) = extractor.compute(gray, kps)

        # convert the keypoints from KeyPoint objects to NumPy
        # arrays
        kps = np.float32([kp.pt for kp in kps])

        # return a tuple of keypoints and features
        return (kps, features)
コード例 #2
0
def get_outer_box_contour(original_image):
    original_image_copy = original_image.copy()
    # show_image(original_image_copy , title='original_image', delayed_show=True)

    gray = cv2.cvtColor(original_image_copy, cv2.COLOR_BGR2GRAY)
    blurred = cv2.medianBlur(gray, 7)
    blurred = cv2.blur(blurred, ksize=(7, 7))
    thresh = cv2.adaptiveThreshold(
        blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
    # show_image(thresh, title='thresh', delayed_show=True)

    kernel = np.ones((13, 13), np.uint8)
    eroded = cv2.erode(thresh, kernel, iterations=1)
    dilated = cv2.dilate(eroded, kernel, iterations=1)
    # show_image(dilated, title='after erosion-dilation', delayed_show=True)

    edged_image = cv2.Canny(
        dilated,
        threshold1=180,
        threshold2=230,
        L2gradient=True,
        apertureSize=3)

    # show_image(edged_image, title='edged_image', delayed_show=True)
    cnts = cv2.findContours(edged_image.copy(), cv2.RETR_LIST,
                            cv2.CHAIN_APPROX_SIMPLE)
    if imutils.is_cv3():
        cnts = cnts[1]
    elif imutils.is_cv4():
        cnts = cnts[0]
    else:
        raise ImportError(
            'must have opencv version 3 or 4, yours is {}'.format(
                cv2.__version__))

    contour_image = edged_image.copy()
    cv2.drawContours(contour_image, cnts, -1, (255, 0, 0), 3)
    # show_image(contour_image, title='contoured_image', delayed_show=False)

    # validate
    image_perim = 2 * sum(edged_image.shape)
    docCnt = None
    assert len(cnts) > 0, 'no contours found when looking for outer box'
    for c in sorted(cnts, key=cv2.contourArea, reverse=True):
        perim = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.05 * perim, True)
        if len(approx) == 4:
            docCnt = approx
            break
    min_acceptable_perim = image_perim * 0.66
    if (type(docCnt) != np.ndarray
            or perim < min_acceptable_perim) and debug_mode():
        temp_image = cv2.cvtColor(edged_image, cv2.COLOR_GRAY2RGB)
        cv2.drawContours(temp_image, [docCnt], -1, (255, 0, 0), 3)
        show_image(temp_image)
        raise OmrException(
            'no suitable outer contour found, '
            'biggest outer contour had perim of {}, needs to be bigger than {}'
            .format(perim, min_acceptable_perim))
    return docCnt
コード例 #3
0
ファイル: detai.py プロジェクト: darknight98alone/nckh
def process_par(image, output, listBigBox, listResult):
    if len(listBigBox) > 0:
        listBigBox.sort(key=lambda x: x[1])
        # print(listBigBox[0][1])
    results = []
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    _, thresh = cv2.threshold(gray, 0, 255,
                              cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
    # assign a rectangle kernel size
    kernel = np.ones((5, 5), 'uint8')
    par_img = cv2.dilate(thresh, kernel, iterations=5)
    if imutils.is_cv2() or imutils.is_cv4():
        (contours, hierarchy) = cv2.findContours(par_img.copy(),
                                                 cv2.RETR_EXTERNAL,
                                                 cv2.CHAIN_APPROX_SIMPLE)
    elif imutils.is_cv3():
        (_, contours, hierarchy) = cv2.findContours(par_img.copy(),
                                                    cv2.RETR_EXTERNAL,
                                                    cv2.CHAIN_APPROX_SIMPLE)
    if len(contours) > 0:
        sorted_contours = sorted(contours,
                                 key=lambda ctr: cv2.boundingRect(ctr)[0] + cv2
                                 .boundingRect(ctr)[1] * image.shape[1])
        for i, cnt in enumerate(sorted_contours):
            x, y, w, h = cv2.boundingRect(cnt)
            cv2.rectangle(output, (x, y), (x + w, y + h), (0, 255, 0), 1)
        cv2.imwrite("rs.jpg", output)
        k = 1
        for i, cnt in enumerate(sorted_contours):
            x, y, w, h = cv2.boundingRect(cnt)
            cv2.rectangle(output, (x, y), (x + w, y + h), (0, 255, 0), 1)
            # printImage(output)
            crop = output[y:y + h, x:x + w]
            if len(listBigBox) > k - 1:
                if y > listBigBox[0][1]:
                    # string_coordinate, listBigBox = appendListBigBox(listBigBox, output, listResult)
                    results.append(
                        ('', listBigBox[k - 1][0], listBigBox[k - 1][1],
                         listBigBox[k - 1][2], listBigBox[k - 1][3], k))
                    k += 1
                    # string_coordinate.sort(key=lambda k: (k[2],k[1]))
                    # string_coordinate.sort(key=functools.cmp_to_key(compare_table))
                    # string_coordinate.sort(key=functools.cmp_to_key(compare_table))
                    # for st in string_coordinate:
                    #     results.append(st)
            cv2.imwrite("temp.jpg", crop)
            output_tesseract = pytesseract.image_to_string(
                Image.open('temp.jpg'), lang='vie')
            if output_tesseract == '':
                continue
            temp = (output_tesseract, x, y, w, h, 0)
            # print(i , " " , temp)
            # print("###########")

            results.append(temp)

            # results.append(pytesseract.image_to_string(Image.open('temp.jpg'),
            #                                            lang='vie'))
    return output, results
コード例 #4
0
def cv2_estimateRigidTransform(from_pts, to_pts, full=False):
    """Estimate transforms in OpenCV 3 or OpenCV 4"""
    if imutils.is_cv4():
        transform = cv2.estimateAffinePartial2D(from_pts, to_pts)[0]
    else:
        transform = cv2.estimateRigidTransform(from_pts, to_pts, full)

    return transform
コード例 #5
0
def test_trajectory_transform_values():
    for window in [15, 30, 60]:
        stabilizer = VidStab(processing_max_dim=float('inf'))
        stabilizer.stabilize(input_path=OSTRICH_VIDEO, output_path='stable.avi', smoothing_window=window)

        pickle_test_transforms(stabilizer, 'pickled_transforms')

        check_transforms(stabilizer, is_cv4=imutils.is_cv4())
コード例 #6
0
    def apply(image, original_image):
        edge = cv2.Canny(image, 50, 100)
        contours = cv2.findContours(
            edge.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        contours = contours[0 if imutils.is_cv2(
        ) else 0 if imutils.is_cv4() else 1]

        cloned_image = original_image.copy()
        cv2.drawContours(cloned_image, contours, -1, (0, 255, 0), 8)

        contours = sorted(contours, key=cv2.contourArea, reverse=True)[:5]
        area = None

        for contour in contours:
            approx = cv2.approxPolyDP(
                contour, 0.02 * cv2.arcLength(contour, True), True)

            if len(approx) == 4:
                area = approx
                break

        if area is None:
            return (None, cloned_image)

        cv2.drawContours(cloned_image, [area], -1, (255, 0, 0), 8)

        points = np.array(area.reshape(4, 2))
        rect = np.zeros((4, 2), dtype=np.float32)

        summed = points.sum(axis=1)
        rect[0] = points[np.argmin(summed)]
        rect[2] = points[np.argmax(summed)]

        diff = np.diff(points, axis=1)
        rect[1] = points[np.argmin(diff)]
        rect[3] = points[np.argmax(diff)]

        (tl, tr, br, bl) = rect

        width_a = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
        width_b = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
        max_width = max(int(width_a), int(width_b))

        height_a = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
        height_b = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
        max_height = max(int(height_a), int(height_b))

        dst = np.array([
            [0, 0],
            [max_width - 1, 0],
            [max_width - 1, max_height - 1],
            [0, max_height - 1]
        ], dtype=np.float32)
        mat = cv2.getPerspectiveTransform(rect, dst)
        original_image = cv2.warpPerspective(
            original_image, mat, (max_width, max_height))

        return (original_image, cloned_image)
コード例 #7
0
def readAndGenerateInstanceSegmentation(outputPath, transformers, inputPath,
                                        imageInfo, annotationsInfo,
                                        ignoreClasses):
    name = imageInfo[0]
    imagePath = inputPath + "/" + name
    (w, h) = imageInfo[1]
    image = cv2.imread(imagePath)
    maskLabels = []
    labels = set()
    for (c, annotation) in annotationsInfo:
        mask = np.zeros((h, w), dtype="uint8")
        annotation = [[annotation[2 * i], annotation[2 * i + 1]]
                      for i in range(0, int(len(annotation) / 2))]
        pts = np.array([[int(x[0]), int(x[1])] for x in annotation], np.int32)
        pts = pts.reshape((-1, 1, 2))
        cv2.fillPoly(mask, [pts], True, 255)
        maskLabels.append((mask, c))
        labels.add(c)

    if not (labels.isdisjoint(ignoreClasses)):
        newtransformer = transformerGenerator("instance_segmentation")
        none = createTechnique("none", {})
        transformers = [newtransformer(none)]

    allNewImagesResult = []
    for (j, transformer) in enumerate(transformers):
        try:

            (newimage,
             newmasklabels) = transformer.transform(image, maskLabels)
            print("maskLabesl: ".format(maskLabels))
            print("newMaskLabels: ".format(newmasklabels))
            image = newimage
            maskLabels = newmasklabels
        except:
            print("Error in image: " + imagePath)

    (hI, wI) = newimage.shape[:2]
    cv2.imwrite(outputPath + str(j) + "_" + name, newimage)
    newSegmentations = []
    for (mask, label) in newmasklabels:
        cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)

        cnts = cnts[0] if imutils.is_cv2() or imutils.is_cv4() else cnts[1]
        if len(cnts) > 0:
            segmentation = [[x[0][0], x[0][1]] for x in cnts[0]]
            # Closing the polygon
            segmentation.append(segmentation[0])

            newSegmentations.append(
                (label, cv2.boundingRect(cnts[0]), segmentation,
                 cv2.contourArea(cnts[0])))

    allNewImagesResult.append(
        (str(j) + "_" + name, (wI, hI), newSegmentations))

    return allNewImagesResult
コード例 #8
0
def test_trajectory_transform_values():
    for window in [15, 30, 60]:
        stabilizer = VidStab()
        stabilizer.gen_transforms(input_path=OSTRICH_VIDEO,
                                  smoothing_window=window)

        pickle_test_transforms(stabilizer, 'pickled_transforms')

        check_transforms(stabilizer, is_cv4=imutils.is_cv4())
コード例 #9
0
ファイル: detection.py プロジェクト: projectrgreen/CLoDSA
def detectBox(imageShape,box,technique):
    mask = np.zeros(imageShape, dtype="uint8")
    (category,(x,y,w,h)) = box
    cv2.rectangle(mask, (x, y), (x+w, y+h), 255, -1)
    newmask = technique.apply(*[mask])

    cnts = cv2.findContours(newmask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() or imutils.is_cv4() else cnts[1]
    if(len(cnts)==0):
        return None
    return (category,cv2.boundingRect(cnts[0]))
コード例 #10
0
def cv2_estimateRigidTransform(from_pts, to_pts, full=False):
    """Estimate transforms in OpenCV 3 or OpenCV 4"""
    if not from_pts.shape[0] or not to_pts.shape[0]:
        return None

    if imutils.is_cv4():
        transform = cv2.estimateAffinePartial2D(from_pts, to_pts)[0]
    else:
        # noinspection PyUnresolvedReferences
        transform = cv2.estimateRigidTransform(from_pts, to_pts, full)

    return transform
コード例 #11
0
def find_contour_features(img, th=60, smooth_krn=(5, 5), draw=True):
    """
    Function Description: Find and draw contours in an image.  Return centroids
    of contours detected, areas, perimeters, and convex hulls
    """

    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  # img to grayscale
    blurred = cv2.GaussianBlur(gray, smooth_krn, 0)  # reduce high level noise
    thres = cv2.threshold(blurred, th, 255,
                          cv2.THRESH_BINARY)[1]  # find threshold
    # check to see if we are using OpenCV 2.X or OpenCV 4
    if imutils.is_cv2() or imutils.is_cv4():
        (contours, _) = cv2.findContours(thres.copy(), cv2.RETR_EXTERNAL,
                                         cv2.CHAIN_APPROX_SIMPLE)
    # check to see if we are using OpenCV 3
    elif imutils.is_cv3():
        (_, contours, _) = cv2.findContours(thres.copy(), cv2.RETR_EXTERNAL,
                                            cv2.CHAIN_APPROX_SIMPLE)
    #contours = contours[1:]
    #contours = imutils.grab_contours(contours)
    if draw:
        cv2.drawContours(img, contours, -1, (0, 255, 0), 2)
    centroids = []
    areas = []
    perimeters = []
    convex_hull = []

    for c in contours:
        M = cv2.moments(c)  # Find image moments for the contour region
        #print(c)
        #print(M)
        cX = int(M["m10"] / basic_functions.zero2one(M["m00"]))
        cY = int(M["m01"] / basic_functions.zero2one(M["m00"]))
        #print(cX,cY)
        #print(int(cX),int(cY))

        centroids = np.append(centroids, (int(cX), int(cY)))
        if draw:
            basic_functions.draw_point(img, (cX, cY))

        a = cv2.contourArea(c)
        areas = np.append(areas, a)

        p = cv2.arcLength(c, True)
        perimeters = np.append(perimeters, p)

        conh = cv2.convexHull(c)
        convex_hull = np.append(convex_hull, conh)

    return img, centroids, areas, perimeters, convex_hull
コード例 #12
0
def test_trajectory_transform_values():
    for window in [15, 30, 60]:
        stabilizer = VidStab()
        stabilizer.gen_transforms(input_path=ostrich_video,
                                  smoothing_window=window)

        pickle_test_transforms(stabilizer, 'pickled_transforms')

        unpickled_transforms = download_pickled_transforms(
            window, cv4=imutils.is_cv4())

        assert np.allclose(stabilizer.transforms, unpickled_transforms[0])
        assert np.allclose(stabilizer.trajectory, unpickled_transforms[1])
        assert np.allclose(stabilizer.smoothed_trajectory,
                           unpickled_transforms[2])
コード例 #13
0
def test_stabilize_frame():
    # Init stabilizer and video reader
    stabilizer = VidStab(processing_max_dim=float('inf'))
    vidcap = cv2.VideoCapture(OSTRICH_VIDEO)

    window_size = 30
    while True:
        _, frame = vidcap.read()

        # Pass frame to stabilizer even if frame is None
        stabilized_frame = stabilizer.stabilize_frame(
            input_frame=frame, smoothing_window=window_size, border_size=10)

        if stabilized_frame is None:
            break

    check_transforms(stabilizer, is_cv4=imutils.is_cv4())
コード例 #14
0
ファイル: textfile.py プロジェクト: hammedb197/data-graph
def extract_from_images(file):
    img = cv2.imread(file)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    (_, binary) = cv2.threshold(gray, 150, 255,
                                cv2.THRESH_BINARY | cv2.THRESH_OTSU)
    # convert2binary
    contours = cv2.findContours(~binary, cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
    contours = contours[0] if imutils.is_cv4() else contours[1]
    heights = [cv2.boundingRect(contour)[3] for contour in contours]
    average_ = sum(heights) / len(heights)

    mask = np.ones(img.shape[:2], dtype="uint8") * 255
    #create empty image of the size of the image
    for c in contours:
        [x, y, w, h] = cv2.boundingRect(c)
        if h > average_ * 2:
            cv2.drawContours(mask, [c], -1, 0, -1)

    title = pytesseract.image_to_string(mask)
    content = pytesseract.image_to_string(img)
    if len(content) == 0:
        content = textract.process(file)
    image = preprocess(content)
    if title == None:
        title = get_title(image)
    sentiment_ = sentiment(image)
    ner_ = ner(image)
    person = []
    location = []
    organization = []
    for x in ner_:
        if x.label_ == 'PERSON':
            person.append({x.label_: x.text})
        if x.label_ == 'ORG':
            organization.append({x.label_: x.text})
        if x.label_ == 'GPE':
            location.append({x.label_: x.text})
    sendToNeo4j(location=location,
                sentiment_=sentiment_,
                content=content,
                title=title,
                organization=organization,
                person=person)
コード例 #15
0
ファイル: objcenter.py プロジェクト: uml4/tracking-pan-tilt
    def update(self, frame, frameCenter):
        # convert the frame to grayscale
        # frame = imutils.resize(frame, width=500)
        # frame = imutils.rotate(frame, angle=180)
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        # construct a mask for the object color, then perform
        # a series of dilations and erosions to remove any small
        # blobs left in the mask
        mask = cv2.inRange(hsv, self.colorLower, self.colorUpper)
        mask = cv2.erode(mask, None, iterations=2)
        mask = cv2.dilate(mask, None, iterations=2)

        # find contours in the mask and initialize the current
        # (x, y) center of the object
        cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if imutils.is_cv4() else cnts[1]
        center = None

        # only proceed if at least one contour was found
        if len(cnts) > 0:
            # find the largest contour in the mask, then use
            # it to compute the minimum enclosing circle and
            # centroid
            c = max(cnts, key=cv2.contourArea)
            ((x, y), radius) = cv2.minEnclosingCircle(c)
            M = cv2.moments(c)
            center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))

            # only proceed if the radius meets a minimum size
            if radius > 10:
                # draw the circle and centroid on the frame,
                # then update the list of tracked points
                # cv2.circle(frame, (int(x), int(y)), int(radius),
                # 	(0, 255, 255), 2)
                # cv2.circle(frame, center, 5, (0, 0, 255), -1)

                return ((int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])),
                        c)
                # return ((int(x), int(y)), c)
            # cv2.imshow("MAsk", mask)
        # otherwise no faces were found, so return the center of the
        # frame
        return (frameCenter, None)
コード例 #16
0
def pickle_test_transforms(vidstab_obj, path):
    suffix = '_cv4.pickle' if imutils.is_cv4() else '.pickle'

    if not os.path.exists(path):
        os.makedirs(path)

    base_paths = [
        '{}/ostrich_transforms_{}{}', '{}/np_ostrich_trajectory_{}{}',
        '{}/np_ostrich_smooth_trajectory_{}{}'
    ]

    paths = [
        p.format(path, vidstab_obj._smoothing_window, suffix)
        for p in base_paths
    ]

    pickle_dump(vidstab_obj.transforms, paths[0])
    pickle_dump(vidstab_obj.trajectory, paths[1])
    pickle_dump(vidstab_obj.smoothed_trajectory, paths[2])
コード例 #17
0
def getTableCoordinate(image):
    """

    :param image:
    :return:
    listResult: x, y coordinates of layout 's bounding box
    listBigBox: x, y coordinates of table in image
    """
    # image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    kernel = np.ones((3, 3), np.uint8)
    image = cv2.dilate(image, kernel, iterations=1)
    (h1, w1) = image.shape
    blured = cv2.GaussianBlur(image, (11, 11), 0)
    canImage = cv2.Canny(blured, 100, 250)
    newimage = np.zeros_like(image)
    if imutils.is_cv2() or imutils.is_cv4():
        (conts, _) = cv2.findContours(canImage.copy(), cv2.RETR_LIST,
                                      cv2.CHAIN_APPROX_SIMPLE)
    elif imutils.is_cv3():
        (_, conts, _) = cv2.findContours(canImage.copy(), cv2.RETR_LIST,
                                         cv2.CHAIN_APPROX_SIMPLE)
    listBigBoxPoint = []
    listBigBox = []
    listPoint = []
    listResult = []
    if len(conts) > 0:
        conts = contours.sort_contours(conts)[0]
        # conts = sorted(conts, key=lambda ctr: cv2.boundingRect(ctr)[0] + cv2.boundingRect(ctr)[1] * image.shape[1] )
        for i in range(len(conts)):
            (x, y, w, h) = cv2.boundingRect(conts[i])
            if w > 10 and h > 10 and w < 0.7 * w1:
                if (x, y) not in listPoint:
                    for j in range(-5, 5, 1):
                        listPoint.append((x + j, y + j))
                    listResult.append((x, y, w, h))
                    cv2.rectangle(newimage, (x, y), (x + w, y + h), 255, 1)
                    # printImage(newimage)
            if w > 10 and h > 10 and w > 0.7 * w1:
                if (x, y) not in listBigBoxPoint:
                    listBigBox.append((x, y, w, h))
                    listBigBoxPoint.append((x, y))
    ## phuong phap xu li tam thoi
    return listResult, listBigBox
コード例 #18
0
def test_stabilize_frame():
    # Init stabilizer and video reader
    stabilizer = VidStab()
    vidcap = cv2.VideoCapture(ostrich_video)

    window_size = 30
    while True:
        grabbed_frame, frame = vidcap.read()

        # Pass frame to stabilizer even if frame is None
        stabilized_frame = stabilizer.stabilize_frame(
            input_frame=frame, smoothing_window=window_size, border_size=10)

        if stabilized_frame is None:
            break

    unpickled_transforms = download_pickled_transforms(window_size,
                                                       cv4=imutils.is_cv4())

    assert np.allclose(stabilizer.transforms, unpickled_transforms[0])
    assert np.allclose(stabilizer.trajectory, unpickled_transforms[1])
    assert np.allclose(stabilizer.smoothed_trajectory, unpickled_transforms[2])
コード例 #19
0
def process_par(image, output, listBigBox):
    if len(listBigBox) > 0:
        listBigBox.sort(key=lambda x: x[1])
    results = []
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    _, thresh = cv2.threshold(gray, 0, 255,
                              cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
    kernel = np.ones((5, 5), 'uint8')
    par_img = cv2.dilate(thresh, kernel, iterations=5)
    if imutils.is_cv2() or imutils.is_cv4():
        (contours, hierarchy) = cv2.findContours(par_img.copy(),
                                                 cv2.RETR_EXTERNAL,
                                                 cv2.CHAIN_APPROX_SIMPLE)
    elif imutils.is_cv3():
        (_, contours, hierarchy) = cv2.findContours(par_img.copy(),
                                                    cv2.RETR_EXTERNAL,
                                                    cv2.CHAIN_APPROX_SIMPLE)
    if len(contours) > 0:
        sorted_contours = sorted(contours,
                                 key=lambda ctr: cv2.boundingRect(ctr)[0] + cv2
                                 .boundingRect(ctr)[1] * image.shape[1])
        k = 1
        for i, cnt in enumerate(sorted_contours):
            x, y, w, h = cv2.boundingRect(cnt)
            cv2.rectangle(output, (x, y), (x + w, y + h), (0, 255, 0), 1)
            crop = output[y:y + h, x:x + w]
            if len(listBigBox) > k - 1:
                if y > listBigBox[0][1]:
                    results.append(
                        ('', listBigBox[k - 1][0], listBigBox[k - 1][1],
                         listBigBox[k - 1][2], listBigBox[k - 1][3], k))
                    k += 1
            cv2.imwrite("temp.jpg", crop)
            output_tesseract = pytesseract.image_to_string(
                Image.open('temp.jpg'), lang='vie')
            if output_tesseract == '':
                continue
            results.append(output_tesseract)
    return output, results
コード例 #20
0
def extract_from_images(img):
    # img = cv2.imread(file)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    (_, binary) = cv2.threshold(gray, 150, 255,
                                cv2.THRESH_BINARY | cv2.THRESH_OTSU)
    # convert2binary
    contours = cv2.findContours(~binary, cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
    contours = contours[0] if imutils.is_cv4() else contours[1]
    heights = [cv2.boundingRect(contour)[3] for contour in contours]
    average_ = sum(heights) / len(heights)

    mask = np.ones(img.shape[:2], dtype="uint8") * 255
    #create empty image of the size of the image
    for c in contours:
        [x, y, w, h] = cv2.boundingRect(c)
        if h > average_ * 2:
            cv2.drawContours(mask, [c], -1, 0, -1)

    title = pytesseract.image_to_string(mask)
    content = pytesseract.image_to_string(img)
    #  if len(content) == 0:
    #      content = textract.process(content)
    return content
コード例 #21
0
                                  cv2.NORM_MINMAX)
gabaritoInteresse = cv2.cvtColor(gabaritoInteresse.copy(), cv2.COLOR_BGR2GRAY)

blurred = cv2.GaussianBlur(gabaritoInteresse, (17, 17), 1)

thresh = cv2.adaptiveThreshold(gabaritoInteresse, 255,
                               cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                               cv2.THRESH_BINARY_INV, 11, 2)

edged = cv2.Canny(blurred, 100, 200)

cnts = cv2.findContours(thresh.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)

heirarchy = cnts[1][0]

cnts = cnts[0] if imutils.is_cv4() else cnts[1]

questions = []

#bolhas, alternativaMarcada, contornosCirculos  =  bubble.bolhas(gabaritoInteresse.copy())

for c in cnts:
    (x, y, w, h) = cv2.boundingRect(c)
    ar = w / float(h)
    if (w >= 18 and h >= 18) and (w <= 25
                                  and h <= 25) and ar >= 0.7 and ar <= 1.3:
        box = [(x // 5) * 5, y]
        #box = [x+w/2, y+h/2, w/2]

        questions.append([c, box])
        #print(x, y)
コード例 #22
0
def getTableCoordinate(image):
    """

    :param image:
    :return:
    listResult: x, y coordinates of layout 's bounding box
    listBigBox: x, y coordinates of table in image
    """
    # image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    kernel = np.ones((3, 3), np.uint8)
    image = cv2.dilate(image, kernel, iterations=1)
    (h1, w1) = image.shape
    blured = cv2.GaussianBlur(image, (11, 11), 0)
    canImage = cv2.Canny(blured, 100, 250)
    newimage = np.zeros_like(image)
    if imutils.is_cv2() or imutils.is_cv4():
        (conts, _) = cv2.findContours(canImage.copy(), cv2.RETR_LIST,
                                      cv2.CHAIN_APPROX_SIMPLE)
    elif imutils.is_cv3():
        (_, conts, _) = cv2.findContours(canImage.copy(), cv2.RETR_LIST,
                                         cv2.CHAIN_APPROX_SIMPLE)
    listBigBox = []
    listResult = []
    if len(conts) > 0:
        conts = contours.sort_contours(conts)[0]
        # conts = sorted(conts, key=lambda ctr: cv2.boundingRect(ctr)[0] + cv2.boundingRect(ctr)[1] * image.shape[1] )
        for i in range(len(conts)):
            (x, y, w, h) = cv2.boundingRect(conts[i])
            if w > 10 and h > 10 and w < 0.7 * w1:
                skip = False
                for temp in listResult:
                    for box in temp:
                        if IOU(box, (x, y, w, h)):
                            skip = True
                            break
                if skip == False:
                    over = False
                    for i, temp in enumerate(listResult):
                        if abs(temp[0][1] - y) < 5:
                            listResult[i].append((x, y, w, h))
                            over = True
                            break
                    if over == False:
                        listResult.append([(x, y, w, h)])
            if w > 10 and h > 10 and w > 0.7 * w1:
                skip = False
                for box in listBigBox:
                    if IOU(box, (x, y, w, h)):
                        skip = True
                        break
                if skip == False:
                    listBigBox.append((x, y, w, h))
    listResult = sorted(listResult, key=lambda x: x[0][1])
    listBigBox = sorted(listBigBox, key=lambda x: x[1])
    for i, temp in enumerate(listResult):
        listResult[i] = sorted(listResult[i], key=lambda x: x[0])
    for temp in listResult:
        for (x, y, w, h) in temp:
            cv2.rectangle(newimage, (x, y), (x + w, y + h), (255, 255, 255), 1)
            printImage(newimage)
    ## phuong phap xu li tam thoi
    return listResult, listBigBox
コード例 #23
0
def get_table_coordinate(image,scale=2):
	"""

	:param image:
	:return:
	list_result: x, y coordinates of layout 's bounding box
	list_big_box: x, y coordinates of table in image
	"""
	# image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
	kernel = np.ones((3, 3), np.uint8)
	image = cv2.dilate(image, kernel, iterations=1)
	(h1, w1) = image.shape
	blured = cv2.GaussianBlur(image, (3, 3), 0)
	canny_image = cv2.Canny(blured, 100, 250)
	if imutils.is_cv2() or imutils.is_cv4():
		(conts, _) = cv2.findContours(canny_image.copy(),
		                              cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
	elif imutils.is_cv3():
		(_, conts, _) = cv2.findContours(
				canny_image.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
	list_big_box = []
	list_result = []
	if len(conts) > 0:
		conts = contours.sort_contours(conts)[0]
		# conts = sorted(conts, key=lambda ctr: cv2.boundingRect(ctr)[0] + cv2.boundingRect(ctr)[1] * image.shape[1] )
		for i in range(len(conts)):
			(x, y, w, h) = cv2.boundingRect(conts[i])
			if 10 < w and  w < 0.7 * w1 and h > 10:
				skip = False
				for temp in list_result:
					for box in temp:
						if IOU(box, (x, y, w, h)):
							skip = True
							break
				if not skip:
					over = False
					for index, temp in enumerate(list_result):
						if abs(temp[0][1] - y) <= 5:
							list_result[index].append((x, y, w, h))
							over = True
					if not over:
						list_result.append([(x, y, w, h)])
				# printImage(newimage)
			if w > 10 and h > 50 and w > 0.7 * w1:
				skip = False
				for box in list_big_box:
					if IOU((x, y, w, h), box):
						skip = True
						break
				if not skip:
					list_big_box.append((x, y, w, h))
	## sort
	for index, _ in enumerate(list_result):
		list_result[index] = sorted(list_result[index], key=lambda x: x[0])
	list_result = sorted(list_result, key=lambda x: x[0][1])
	list_big_box = sorted(list_big_box, key=lambda x: x[1])
	for index,temp in enumerate(list_result):
		for index2,_ in enumerate(temp):
			(x,y,w,h) = list_result[index][index2]
			list_result[index][index2] = (x*scale,y*scale,w*scale,h*scale)
	for index,_ in enumerate(list_big_box):
		(x,y,w,h) = list_big_box[index]
		list_big_box[index] = (x*scale,y*scale,w*scale,h*scale)
	#     for (x,y,w,h) in temp:
	#         cv2.rectangle(newimage, (x, y), (x + w, y + h), 255, 1)
	# for (x,y,w,h) in list_big_box:
	#     cv2.rectangle(newimage, (x, y), (x + w, y + h), 255, 1)
	# printImage(newimage)
	return list_result, list_big_box
コード例 #24
0
testImg1 = dilationT[0:sz22, 0:sz21 // 5]

kernel = np.ones((5, 5), np.uint8)
erosion = cv.erode(testImg1, kernel, iterations=1)
cv.imshow('erosion', erosion)

# 膨胀
dilation = cv.dilate(img, kernel, iterations=1)
# cv.imshow("dilation",dilation)

# 在二值图像中查找轮廓,然后初始化题目对应的轮廓列表
cnts, hierarchy = cv.findContours(erosion.copy(), cv.RETR_EXTERNAL,
                                  cv.CHAIN_APPROX_SIMPLE)
# cv.imshow('dilation',dilation)

cnts = cnts[0] if imutils.is_cv4() else cnts[1]
questionCnts = []

# 对每一个轮廓进行循环处理
for c in cnts:
    #   print(cv.boundingRect(cnts))
    # 计算轮廓的边界框,然后利用边界框数据计算宽高比
    h = cv.boundingRect(cnts)[0] if imutils.is_cv4() else cv.boundingRect(
        cnts)[1]
    # ar = w / float(h)
    # # 为了辨别一个轮廓是一个气泡,要求它的边界框不能太小,在这里边至少是20个像素,而且它的宽高比要近似于1
    if h >= 3:
        questionCnts.append(c)

# 以从顶部到底部的方法将我们的气泡轮廓进行排序,然后初始化正确答案数的变量。
questionCnts = contours.sort_contours(questionCnts, method="top-to-bottom")[0]
コード例 #25
0
 def __init__(self):
     # determine if we are using OpenCV v3.X
     self.isv3 = imutils.is_cv3()
     self.isv4 = imutils.is_cv4()
コード例 #26
0
ファイル: main.py プロジェクト: daoquangvy/RaspberryPiPuppy
    sleep(1)
    print("Grab Frame")
    tmpFrame = vs.read()
    tmpFrame = imutils.resize(tmpFrame, width=500)
    tmpFrame = imutils.rotate(tmpFrame, angle=180)
    hsv = cv2.cvtColor(tmpFrame, cv2.COLOR_BGR2HSV)

    print("Create tmpMask")
    tmpMask = cv2.inRange(hsv, joyconLower, joyconUpper)
    tmpMask = cv2.erode(tmpMask, None, iterations=2)
    tmpMask = cv2.dilate(tmpMask, None, iterations=2)

    print("Find Joycon..")
    countsCircle = cv2.findContours(tmpMask.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
    countsCircle = countsCircle[0] if imutils.is_cv4() else countsCircle[1]
    center = None

    if len(countsCircle) > 0:
        print("Find Object")
        circle = max(countsCircle, key=cv2.contourArea)
        ((x, y), radius) = cv2.minEnclosingCircle(circle)
        MinimumCircle = cv2.moments(circle)
        centerObject = (int(MinimumCircle["m10"] / MinimumCircle["m00"]),
                        int(MinimumCircle["m01"] / MinimumCircle["m00"]))
        tmpValue = readData()
        disCenter = float(tmpValue)
        print(disCenter)
        if (disCenter < 15):
            print("Find Joycon")
            myMQTT.publish("Rasp/Status", "Find Joycon", 0)
コード例 #27
0
# import the necessary packages
import imutils
import cv2

# load the Tetris block image, convert it to grayscale, and threshold
# the image
# noinspection PyUnresolvedReferences
print("OpenCV Version: {}".format(cv2.__version__))
image = cv2.imread("resource_files/tetris_blocks.png")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 225, 255, cv2.THRESH_BINARY_INV)[1]

# check to see if we are using OpenCV 2.X or OpenCV 4
if imutils.is_cv2() or imutils.is_cv4():
    (cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                 cv2.CHAIN_APPROX_SIMPLE)

# check to see if we are using OpenCV 3
elif imutils.is_cv3():
    (_, cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)

# draw the contours on the image
# noinspection PyUnboundLocalVariable
cv2.drawContours(image, cnts, -1, (240, 0, 159), 3)
cv2.imshow("Image", image)
print(image)
cv2.waitKey(0)
cv2.destroyAllWindows()
コード例 #28
0
# import the necessary packages
from __future__ import print_function
import imutils
import cv2

# print the current OpenCV version on your system
print("Your OpenCV version: {}".format(cv2.__version__))

# check to see if you are using OpenCV 2.X
print("Are you using OpenCV 2.X? {}".format(imutils.is_cv2()))

# check to see if you are using OpenCV 3.X
print("Are you using OpenCV 3.X? {}".format(imutils.is_cv3(or_better=False)))

# check to see if you are using OpenCV 4.X
print("Are you using OpenCV 4.X? {}".format(imutils.is_cv4(or_better=False)))

# check to see if you are using *at least* OpenCV 2.X
print("Are you using at least OpenCV 2.X? {}".format(
    imutils.is_cv2(or_better=True)))

# check to see if you are using *at least* OpenCV 3.X
print("Are you using at least OpenCV 3.X? {}".format(
    imutils.is_cv3(or_better=True)))

# check to see if you are using *at least* OpenCV 4.X
print("Are you using at least OpenCV 4.X? {}".format(
    imutils.is_cv4(or_better=False)))

# should throw a deprecation warning
print("Checking for OpenCV 3: {}".format(imutils.check_opencv_version("3")))
コード例 #29
0
def roi(im):

    dim = (1595, 556)
    nomalizacao = np.ones(dim)
    gabaritoInteresse = cv2.addWeighted(
        gabaritoInteresse, 1.07,
        np.zeros(gabaritoInteresse.shape, gabaritoInteresse.dtype), 0, 0)
    gabaritoInteresse = cv2.normalize(gabaritoInteresse, nomalizacao, 150, 255,
                                      cv2.NORM_MINMAX)
    #cv2.imshow("tetste",tetste)
    gabaritoInteresse = cv2.cvtColor(gabaritoInteresse, cv2.COLOR_BGR2GRAY)

    imageBT = bt.bitwise(gabaritoInteresse)

    blurred = cv2.GaussianBlur(gabaritoInteresse, (17, 17), 1)

    thresh = cv2.adaptiveThreshold(gabaritoInteresse, 255,
                                   cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                   cv2.THRESH_BINARY_INV, 11, 2)

    edged = cv2.Canny(blurred, 100, 200)

    cnts = cv2.findContours(thresh.copy(), cv2.RETR_CCOMP,
                            cv2.CHAIN_APPROX_NONE)

    heirarchy = cnts[1][0]

    cnts = cnts[0] if imutils.is_cv4() else cnts[1]

    questions = []

    imageBT = bt.bitwise(gabaritoInteresse)
    #cv2.imshow("imageBT",imageBT)

    for c in cnts:
        (x, y, w, h) = cv2.boundingRect(c)
        ar = w / float(h)
        if (w >= 20 and h >= 20) and (w <= 25
                                      and h <= 25) and ar >= 0.7 and ar <= 1.3:
            box = [(x // 5) * 5, y]
            #box = [x+w/2, y+h/2, w/2]

            questions.append([c, box])
            #print(x, y)
            #cv2.rectangle(gabarito, (x, y), (x+w, y+h), (255, 0, 0), 1)

    questions = sorted(questions, key=lambda q: q[1][1])

    questionCnts = []
    ''' 
      Agora estamos classificando da esquerda para a direita tomando um lote de 30 contornos 
      que são basicamente uma linha inteira e, em seguida, classificá-los a partir da ordem crescente de x 
    '''

    boxes = []
    for i in np.arange(0, len(questions), 30):
        # take a row of bubbles
        q = list(questions[i:i + 30])
        #print(q)
        for o in q:
            boxes.append(o[1])
        # append each contour sorted from left to right in a row
        # sort them using x
        q = sorted(q, key=lambda k: k[1][0])
        for o in q:
            # append each contour sorted from left to right in a row
            #questionCnts.append(o[0])
            questionCnts.append(o[0])

    # each question has 5 possible answers, to loop over the
    # question in batches of 5

    #matrizRespostas = np.empty((0,4))
    #print(matrizRespostas.shape)
    respostas = np.empty(0, int)

    questao = []
    letra = []
    t = 0

    matriz = []

    for (q, i) in enumerate(np.arange(0, len(questionCnts), 30)):
        # calculate the old question no
        row = q // 5
        col = q % 5
        old_question_no = col + row

        #print(q)
        #print(i)
        cnts = contours.sort_contours(questionCnts[i:i + 30])[0]

        #cnts = cnts[0:4:5]

        for (l, k) in enumerate(cnts):
            (x, y, w, h) = cv2.boundingRect(k)
            if (w >= 20 and h >= 20) and (w <= 25 and
                                          h <= 25) and ar >= 0.7 and ar <= 1.3:
                box = [(x // 5) * 5, y]
            #print(x, y)
            respostas = np.append(
                respostas,
                (l),
            )

            #rect = np.array(cv2.boundingRect(k)).reshape(1,4)
            #print(len(rect.shape))
            #matrizRespostas = np.append(matrizRespostas,rect,0)

            cv2.rectangle(bolhas, (x, y), (x + w, y + h), (0, 0, 255), 1)

    #print(len(cnts))
    #respostas = respostas.reshape(75//5,5)
    respostas = respostas.reshape(len(respostas) // 5, 5)
    #respostas = respostas.reshape(89,5)
    print((respostas))
    #respostas = np.reshape(respostas,(5)

    return respostas
コード例 #30
0
# author:    Adrian Rosebrock
# website:   http://www.pyimagesearch.com

# import the necessary packages
from __future__ import print_function
import imutils
import cv2

# print the current OpenCV version on your system
print("Your OpenCV version: {}".format(cv2.__version__))

# check to see if you are using OpenCV 2.X
print("Are you using OpenCV 2.X? {}".format(imutils.is_cv2()))

# check to see if you are using OpenCV 3.X
print("Are you using OpenCV 3.X? {}".format(imutils.is_cv3(or_better=False)))

# check to see if you are using OpenCV 4.X
print("Are you using OpenCV 4.X? {}".format(imutils.is_cv4(or_better=False)))

# check to see if you are using *at least* OpenCV 2.X
print("Are you using at least OpenCV 3.X? {}".format(imutils.is_cv3()))

# check to see if you are using *at least* OpenCV 3.X
print("Are you using at least OpenCV 3.X? {}".format(imutils.is_cv3()))

# check to see if you are using *at least* OpenCV 4.X
print("Are you using at least OpenCV 4.X? {}".format(imutils.is_cv4()))

# should throw a deprecation warning
print("Checking for OpenCV 3: {}".format(imutils.check_opencv_version("3")))
コード例 #31
0
def get_answer(ID, QN):
    url = 'http://192.168.43.58:8080/video'
    cap = cv2.VideoCapture(1)  # cap is a camera object
    j = 0
    #oldPage = 3425
    #newPage = 0;
    from matplotlib import pyplot as plt
    file1 = open(
        "C:/Users/SATYAPRAKASH/PycharmProjects/Test/AnswerSheet/" + str(ID) +
        "_Q" + str(QN) + ".txt", 'w')

    while True:
        ret, frame = cap.read()  # made a frame object
        blurred_frame = cv2.blur(frame, (5, 5), 0)
        hsv = cv2.cvtColor(
            blurred_frame,
            cv2.COLOR_BGR2HSV)  # convert blurred_frame into hsv color

        lower_white = np.array([40, 0, 132])
        upper_white = np.array([145, 75, 245])

        mask = cv2.inRange(hsv, lower_white, upper_white)
        res = cv2.bitwise_and(frame, frame, mask=mask)
        r, thresh = cv2.threshold(mask, 40, 255, 0)

        # check to see if we are using OpenCV 2.X or OpenCV 4
        if imutils.is_cv2() or imutils.is_cv4():
            contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
                                                   cv2.CHAIN_APPROX_NONE)
            # check to see if we are using OpenCV 3
        elif imutils.is_cv3():
            im2, contours, hierarchy = cv2.findContours(
                thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

        for contour in contours:
            area = cv2.contourArea(contour)
            # if area > 100:
            #     cv2.drawContours(frame, contours, -1, 255, 3)
            # find the biggest countour (c) by the area
            c = max(contours, key=cv2.contourArea)
            x, y, w, h = cv2.boundingRect(c)

            # draw the biggest contour (c) in green
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)
            roi = frame[y + 10:y + h - 5, x + 10:x + w - 8]
            cv2.imshow("Roi", roi)  # show image

            if j % 150 == 0:
                cv2.imwrite("Frame" + str(j) + ".jpg", roi)
                content = text_rec("Frame" + str(j) + ".jpg")
                # content = text_detection("Frame"+str(j)+".jpg")
                # newPage = pageNo("Frame"+str(j)+".jpg")
                # if newPage==-1:
                #     continue
                print(content)
                file1.seek(0)
                file1.write(content)
                # for i in content:
                #     file1.write(i)
                #     file1.write("\n")
                #     print(i)

                # if newPage!=oldPage:
                #     print("page no = "+ str(newPage) )
                #     oldPage=newPage
                #     print(content)
                try:
                    os.remove("Frame" + str(j) + ".jpg")
                except:
                    pass
                break

            j = j + 1

            # sleep(5)

        # show the images
        # cv2.imshow("Result", np.hstack([frame, res]))
        cv2.imshow("Roi", roi)  # show image
        # cv2.imshow("Frame", frame)  # show image
        # cv2.imshow("Mask", mask)  # show image
        # cv2.imshow("res", res)  # show image

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    file1.close()
    #final_file(ID , QN)
    cap.release()
    cv2.destroyAllWindows()