Пример #1
0
    def salvar_imagem_processada(self,
                                 tipo_imagem=("CO", "BI", "EC"),
                                 nome_arquivo=str):

        if tipo_imagem == "CO":
            imagem_processada = four_point_transform(
                self.orig,
                self.contornos.reshape(4, 2) * self.ratio)
            imagem_processada = imutils.resize(imagem_processada,
                                               height=self.original_shape[0])
            cv2.imwrite(nome_arquivo, imagem_processada,
                        [cv2.IMWRITE_JPEG_QUALITY, 100])
            return True

        elif tipo_imagem == "EC":
            imagem_processada = four_point_transform(
                self.orig,
                self.contornos.reshape(4, 2) * self.ratio)
            imagem_processada = imutils.resize(imagem_processada,
                                               height=self.original_shape[0])
            imagem_processada = cv2.cvtColor(imagem_processada,
                                             cv2.COLOR_BGR2GRAY)
            cv2.imwrite(nome_arquivo, imagem_processada,
                        [cv2.IMWRITE_JPEG_QUALITY, 100])
            return True

        elif tipo_imagem == "BI":
            cv2.imwrite(nome_arquivo, self.warped,
                        [cv2.IMWRITE_JPEG_QUALITY, 100])
            return True
        else:
            print("Tipo inválido de imagem")
            return False
Пример #2
0
def four_point_transform_with_mask(mask, orin, box):
    warped_mask = four_point_transform(mask, box)
    mask_shape = mask.shape
    orin_shape = orin.shape
    w, h = mask_shape[0:2]
    W, H = orin_shape[0:2]
    rh = H / h
    rw = W / w
    BOX = np.zeros_like(box)
    BOX[:, 0] = box[:, 0] * rh
    BOX[:, 1] = box[:, 1] * rw
    BOX = BOX.astype(np.int16)
    warped_orin = four_point_transform(orin, BOX)
    return warped_mask, warped_orin
Пример #3
0
def shit(img, cords):
    # import the necessary packages
    from transform import four_point_transform
    import numpy as np
    import argparse
    import cv2

    # construct the argument parse and parse the arguments

    # load the image and grab the source coordinates (i.e. the list of
    # of (x, y) points)
    # NOTE: using the 'eval' function is bad form, but for this example
    # let's just roll with it -- in future posts I'll show you how to
    # automatically determine the coordinates without pre-supplying them
    image = cv2.imread(img)
    pts = np.array(cords, dtype="float32")

    # apply the four point tranform to obtain a "birds eye view" of
    # the image
    warped = four_point_transform(image, pts)

    # show the original and warped images
    ##    cv2.imshow("Original", image)
    ##    cv2.imshow("Warped", warped)
    ##    cv2.waitKey(0)
    cv2.imwrite('cropped.png', warped)
Пример #4
0
    def get_frame_and_warp(self):

        # Get frame
        ret, frame = self._cap.read()

        # resize for efficency (our camera has too high of a resolution!)
        frame = imutils.resize(frame, width=600)

        # get height and width
        h, w = frame.shape[:2]

        # Don't draw the labels onto the warp, because we check that for colors!
        frame_labled = frame.copy()
        corners = self._get_corners(frame_labled)

        if self._all_corners_found(corners):
            self._corners = corners

        warp = None

        if self._all_corners_found(self._corners):
            centers = []
            for c in self._corners:
                [x, y] = c.get_corner_center()

                # draw pink circles on the 4 corners
                cv2.circle(frame_labled, (int(x), int(y)), 5, (255, 0, 255),
                           -1)
                centers.append([x, y])

            offset = np.absolute(self._corners[0]._corners[0][0] -
                                 centers[0][0])
            centers = np.array(centers)
            warp = four_point_transform(frame, centers, 0)
        return frame_labled, warp
Пример #5
0
def buildApp():
    ''' entrance of the application '''

    argp = argparse.ArgumentParser()
    argp.add_argument("-i",
                      "--image",
                      required=True,
                      help="Path to the image to be scanned")
    argp.add_argument("-c",
                      "--coords",
                      help="comma seperated list of source points")
    args = vars(argp.parse_args())

    image = cv2.imread(args["image"])

    ratio = image.shape[0] / 500.0
    orig = image.copy()
    image = imutils.resize(image, height=500)

    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 0)
    edged = cv2.Canny(gray, 75, 200)

    cnt = findContour(edged, image)

    print("Reshaped cnt", cnt)
    warped = four_point_transform(orig, cnt * ratio)
    warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
    threshold = threshold_local(warped, 11, offset=10, method="gaussian")
    warped = (warped > threshold).astype("uint8") * 255
    cv2.imshow("tranformed: ", warped)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
Пример #6
0
    def get_frame_and_warp(self):

        # Get frame
        ret, frame = self._cap.read()

        # resize for efficency (our camera has too high of a resolution!)
        frame = imutils.resize(frame, width=600)

        # get height and width
        h, w = frame.shape[:2]

        # Create zero matrix to load projection

        corners = self._get_corners(frame)

        if self._all_corners_found(corners):
            self._corners = corners

        warp = None

        if self._all_corners_found(self._corners):
            centers = []
            for c in self._corners:
                [x, y] = c.get_corner_center()

                # draw pink circles on the 4 corners
                cv2.circle(frame, (int(x), int(y)), 5, (255, 0, 255), -1)
                centers.append([x, y])
            centers = np.array(centers)
            warp = four_point_transform(frame, centers)
        return frame, warp
Пример #7
0
def scan(image):

    RESCALED_HEIGHT = 500.0
    OUTPUT_DIR = 'output'
    assert (image is not None)

    ratio = image.shape[0] / RESCALED_HEIGHT
    orig = image.copy()
    rescaled_image = imutils.resize(image, height=int(RESCALED_HEIGHT))

    screenCnt = get_contour(rescaled_image)

    screenCnt = interactive_get_contour(screenCnt, rescaled_image)

    warped = transform.four_point_transform(orig, screenCnt * ratio)

    gray = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)

    sharpen = cv2.GaussianBlur(gray, (0, 0), 3)
    sharpen = cv2.addWeighted(gray, 1.5, sharpen, -0.5, 0)

    thresh = cv2.adaptiveThreshold(sharpen, 255,
                                   cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                   cv2.THRESH_BINARY, 21, 15)
    return thresh
Пример #8
0
def transform_perspective(image, board):
	ratio = image.shape[0] / 500.0
	warped = transform.four_point_transform(image, board.reshape(4, 2) * ratio)
	warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
	warped = threshold_adaptive(warped, 250, offset = 10)
	warped = warped.astype("uint8") * 255
	cv2.imwrite("scanned.png", warped)
Пример #9
0
def convert(uid, path):
    # try:
    image = cv2.imread(path)

    IM_HEIGHT, IM_WIDTH, _ = image.shape

    A, B, C, D = db.getpoints(uid)

    countours = np.array(
        [[A[0], A[1]], [B[0], B[1]], [C[0], C[1]], [D[0], D[1]]],
        dtype=np.int32)

    warped = transform.four_point_transform(image, countours)

    gray = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)

    # sharpen image
    sharpen = cv2.GaussianBlur(gray, (0, 0), 3)
    sharpen = cv2.addWeighted(gray, 1.5, sharpen, -0.5, 0)

    # apply adaptive threshold to get black and white effect
    thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                   cv2.THRESH_BINARY, 21, 15)

    z = pixeldata(thresh)

    if PROD != True:
        cv2.namedWindow('new', cv2.WINDOW_NORMAL)
        cv2.imshow("new", thresh)
        cv2.waitKey(0)

    return z, True, IM_HEIGHT, IM_WIDTH
    # except:
    print("Error in the Convert file")
    return -1, False
Пример #10
0
def warp_image():
    ap = argparse.ArgumentParser()
    ap.add_argument("-i", "--image", help="path to the image file")
    ap.add_argument("-c",
                    "--coords",
                    help="comma seperated list of source points")
    args = vars(ap.parse_args())

    # load the image and grab the source coordinates (i.e. the list of
    # of (x, y) points)
    # NOTE: using the 'eval' function is bad form, but for this example
    # let's just roll with it -- in future posts I'll show you how to
    # automatically determine the coordinates without pre-supplying them
    # image = cv2.resize(cv2.imread(args["image"]), (500,500))
    # pts = np.array(eval(args["coords"]), dtype = "float32")
    image = cv2.resize(cv2.imread('chess.jpg'), (500, 500))
    pts = np.array([(149, 81), (480, 78), (353, 388), (22, 390)],
                   dtype="float32")
    # apply the four point tranform to obtain a "birds eye view" of
    # the image
    warped = four_point_transform(image, pts)
    # show the original and warped images
    cv2.imshow("Original", image)
    cv2.imshow("Warped", warped)
    cv2.waitKey(0)
Пример #11
0
def detectCards(image):
    toWarp = image.copy()
    cropped_dict = dict()
    # cropped_list = {}
    adjusted = pp.histogram_adjust(image.copy())
    segmented = pp.segmentation(adjusted)
    contours, hierarchy = cv2.findContours(np.uint8(segmented), cv2.RETR_CCOMP,
                                           cv2.CHAIN_APPROX_SIMPLE)
    contours_sorted = sorted(contours, key=cv2.contourArea, reverse=True)
    hull_list = []
    for cnt in contours_sorted[:10]:
        hull = cv2.convexHull(cnt)
        x, y, w, h = cv2.boundingRect(hull)
        if pp.isCardRect(x, y, w, h, hull_list):
            hull_list.append(hull)
            img_box = cv2.rectangle(image, (x, y), (x + w, y + h),
                                    color=(0, 255, 0),
                                    thickness=4)
            eps = computeEps(w, h)
            rect_point = np.array([
                [x - eps, y - eps],
                [x + w + eps, y - eps],
                [x + w + eps, y + h + eps],
                [x - eps, y + h + eps],
            ])
            warped = four_point_transform(toWarp, rect_point)
            # cropped_list.append(warped)
            cropped_dict[x - eps] = warped
    return cropped_dict, image
Пример #12
0
def template_match(template,image):
    img_rgb = cv2.imread(image)
    img_gray = cv2.cvtColor(img_rgb,cv2.COLOR_BGR2GRAY)
    template = cv2.imread(template,0)

    w,h = template.shape[::-1]
    print(w,h)
    res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
    threshold = 0.48
    loc = np.where(res >= threshold)
    point_square = []
    for pt in zip(*loc[::-1]):
        point_square.append(pt)
        cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
        print(pt)
    cv2.line(img_rgb,(point_square[0][0]+w,point_square[0][1]+h),(point_square[2][0]+w,point_square[2][1]),(0,0,255),2)
    cv2.line(img_rgb,(point_square[0][0]+w,point_square[0][1]+h),(point_square[1][0],point_square[1][1]+h),(0,0,255),2)
    cv2.line(img_rgb,(point_square[3][0],point_square[3][1]),(point_square[1][0],point_square[1][1]+h),(0,0,255),2)
    cv2.line(img_rgb,(point_square[3][0],point_square[3][1]),(point_square[2][0]+w,point_square[2][1]),(0,0,255),2)
    
    points = [[point_square[0][0]+w,point_square[0][1]+h],
                [point_square[1][0],point_square[1][1]+h],
                [point_square[2][0]+w,point_square[2][1]],
                [point_square[3][0],point_square[3][1]]
            ]
            
    warped = four_point_transform(img_rgb, np.array(points).reshape(4, 2))
    cv2.imwrite("transformed.jpeg",warped)
def document_scanner(filename):
	# load the image and compute the ratio of the old height
	# to the new height, clone it, and resize it
	image = cv2.imread(filename)
	ratio = image.shape[0] / 500.0
	orig = image.copy()
	image = imutils.resize(image, height = 500)
	# convert the image to grayscale, blur it, and find edges
	# in the image
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	gray = cv2.GaussianBlur(gray, (5, 5), 0)
	edged = cv2.Canny(gray, 23, 77)
	# show the original image and the edge detected image
	# print("STEP 1: Edge Detection")
	# cv2.imshow("Image", image)
	# cv2.imshow("Edged", edged)
	# cv2.waitKey(0)
	# cv2.destroyAllWindows()

	# find the contours in the edged image, keeping only the
	# largest ones, and initialize the screen contour
	cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
	cnts = imutils.grab_contours(cnts)
	cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5]
	# loop over the contours
	valid = False
	for c in cnts:
		# approximate the contour
		peri = cv2.arcLength(c, True)
		approx = cv2.approxPolyDP(c, 0.01*peri, True)
		# if our approximated contour has four points, then we
		# can assume that we have found our screen
		screenCnt = approx
		# cv2.drawContours(image, [screenCnt], -1, (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)), 1)
		if len(approx) == 4:
			valid = True
			break
	# show the contour (outline) of the piece of paper
	# print("STEP 2: Find contours of paper")
	# cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)
	# # cv2.imshow("Outline", image)
	# # cv2.waitKey(0)
	# # cv2.destroyAllWindows()
	if not valid:
		return image
	# apply the four point transform to obtain a top-down
	# view of the original image
	warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)
	# convert the warped image to grayscale, then threshold it
	# to give it that 'black and white' paper effect
	warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
	T = threshold_local(warped, 11, offset = 10, method = "gaussian")
	warped = (warped > T).astype("uint8") * 255
	# show the original and scanned images
	# print("STEP 3: Apply perspective transform")
	# cv2.imshow("Original", imutils.resize(orig, height = 650))
	# cv2.imshow("Scanned", imutils.resize(warped, height = 650))
	# cv2.waitKey(0)
	return warped
Пример #14
0
    def _apply_transformation(self, box, blackwhite=False):
        warped = four_point_transform(self.original, box * self.ratio)

        if blackwhite:
            warped = cvtColor(warped, COLOR_BGR2GRAY)
            t = threshold_local(warped, 11, offset=10, method="gaussian")
            warped = (warped > t).astype("uint8") * 255

        return warped
Пример #15
0
def warp():
    global screenCnt
    warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)
    # convert the warped image to grayscale, then threshold it
    # to give it that 'black and white' paper effect
    #warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
    #T = threshold_local(warped, 11, offset = 10, method = "gaussian")
    #warped = (warped > T).astype("uint8") * 255
    return warped
Пример #16
0
def preprocess(image, case):
    ratio = image.shape[0] / 500.0
    orig = image.copy()
    image = imutils.resize(image, height=500)

    if case == str(True):

        gray = cv2.GaussianBlur(image, (5, 5), 0)
        gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
        mask = np.zeros((gray.shape), np.uint8)
        kernel1 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))

        close = cv2.morphologyEx(gray, cv2.MORPH_CLOSE, kernel1)
        div = np.float32(gray) / (close)
        res = np.uint8(cv2.normalize(div, div, 0, 255, cv2.NORM_MINMAX))
        res2 = cv2.cvtColor(res, cv2.COLOR_GRAY2BGR)
        edged = cv2.Canny(res, 75, 200)

        cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if imutils.is_cv2() else cnts[1]
        cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]

        # loop over the contours
        for c in cnts:
            # approximate the contour
            rect = cv2.boundingRect(c)
            area = cv2.contourArea(c)

            cv2.rectangle(edged.copy(), (rect[0], rect[1]),
                          (rect[2] + rect[0], rect[3] + rect[1]), (0, 0, 0), 2)
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.02 * peri, True)

            # if our approximated contour has four points, then we
            # can assume that we have found our screen
            if len(approx) == 4:
                screenCnt = approx
                #print(screenCnt)
                break

        # show the contour (outline) of the piece of paper
        #print(screenCnt)
        cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)

        # apply the four point transform to obtain a top-down
        # view of the original image
        warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)
        warped1 = cv2.resize(warped, (610, 610))
        warp = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
        T = threshold_local(warp, 11, offset=10, method="gaussian")
        warp = (warp > T).astype("uint8") * 255
        th3 = cv2.adaptiveThreshold(warp,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
                  cv2.THRESH_BINARY_INV,11,2)
        kernel = np.ones((5, 5), np.uint8)
        dilation = cv2.GaussianBlur(th3, (5, 5), 0)
Пример #17
0
def perspective(original,screenCnt,ratio):
	from transform import four_point_transform
	warped = four_point_transform(original, screenCnt.reshape(4, 2) * ratio)
	warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
	T = threshold_local(warped, 11, offset = 10, method = "gaussian")
	warped = (warped > T).astype("uint8") * 255
	output=imutils.resize(warped, height = 650)
	#cv2.imshow("Contour", output)
	#cv2.waitKey(0)
	return output
Пример #18
0
def apply_perspective_transform(orig, screenCnt, ratio):
    # apply the four point transform to obtain a top-down
    # view of the original image
    warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)

    # convert the warped image to grayscale, then threshold it
    # to give it that 'black and white' paper effect
    warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
    return cv2.adaptiveThreshold(warped, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                 cv2.THRESH_BINARY, 11, 11)
Пример #19
0
    def __getitem__(self, item):
        image = four_point_transform(self.original_img, self.location_txt[item][0:8].reshape((4, 2)))
        # print('image type is: ', type(image))
        label = self.location_txt[item][8]
        # sample = {'image': image, 'label': label, 'showimg': image}
        sample = {'image': image, 'label': label}
        if self.transform:
            sample = self.transform(sample)

        return sample
Пример #20
0
    def find_doc(self):
        '''
        find_doc: finds a potential document in an image, saves it to data/doc_detected.jpg
        returns:
            processed: a processed cv2 image
        '''

        # initialize vars
        ratio = self.im.shape[0] / 500.0
        orig = self.im.copy()

        # preprocess image
        image = imutils.resize(self.im, height=500)
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (5, 5), 0)

        # Find edges
        edged = cv2.Canny(gray, 75, 200)

        # Find 5 largest contours
        cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0]
        cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]
        if cv2.contourArea(cnts[0]) < 10000:
            print("no document detected, running ocr on entire image...\n")
            return self.im

        for c in cnts:

            # Calculates a contour perimeter or a curve length
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.01 * peri, True)  #0.02

            # Find contour with 4 points
            screenCnt = approx
            if len(approx) == 4:
                screenCnt = approx
                break

        if len(screenCnt) > 8:
            print("no document detected, running ocr on entire image...\n")
            return self.im

        cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)
        cv2.imwrite('data/doc_detected_contours.jpg', image)

        # Transform original image using four_point_transform function
        processed = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)

        # processed = cv2.cvtColor(processed, cv2.COLOR_BGR2GRAY)
        # processed = cv2.adaptiveThreshold(processed, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)

        cv2.imwrite('data/doc_detected.jpg', processed)
        return processed
Пример #21
0
    def _apply_transformation(self, ctr, blackwhite=False):
        wrp = four_point_transform(self.original,
                                   ctr.reshape(4, 2) * self.ratio)

        # convert the warped image to grayscale, then threshold it
        # to give it that 'black and white' paper effect
        if blackwhite:
            wrp = cvtColor(wrp, COLOR_BGR2GRAY)
            t = threshold_local(wrp, 11, offset=10, method="gaussian")
            wrp = (wrp > t).astype("uint8") * 255
        return wrp
Пример #22
0
def main(path):
    image = cv2.imread(path)
    ratio = image.shape[0] / 1000.0
    orig = image.copy()
    image = cv2.resize(image, (int(image.shape[1] / ratio), 1000), interpolation=cv2.INTER_CUBIC)

    cv2.imshow("processing", image)
    cv2.waitKey(500)

    # 将图片转换为灰阶图片并进行高斯模糊,然后使用Canny算子查找边缘
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (3, 3), 0)
    edged = cv2.Canny(gray, 75, 200)

    cv2.imshow("processing", edged)
    cv2.waitKey(500)

    # 寻找最大的五个边界
    cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(cnts[1], key=cv2.contourArea, reverse=True)[:5]

    screenCnt = None
    # 遍历边界,
    for c in cnts:
        # 轮廓近似
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)

        # 如果轮廓有4条边,则认为是我们要找的
        if len(approx) == 4:
            screenCnt = approx
            break
    if screenCnt is None:
        print("没有找到目标")
        sys.exit(-1)
    cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)
    cv2.imshow("processing", image)
    cv2.waitKey(500)

    # 将图片拉直并二值化
    warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)
    warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
    warped = cv2.threshold(warped, 0, 255, cv2.THRESH_OTSU)[1]

    # 展示最终效果
    result = cv2.resize(warped, (650, 650), interpolation=cv2.INTER_AREA)
    cv2.imshow("processing", result)
    cv2.waitKey(1000)
    cv2.destroyAllWindows()

    # 存储图片供后续步骤使用
    name = "images/scanned_" + path.split("/")[-1]
    cv2.imwrite(name, result)
    return (name, result)
Пример #23
0
def Perspective_transform(orig, screenCnt, ratio): # Funkcija za 2D transoformaciju pronađenog papira
	warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio) # Pozivamo funkciju iz drugog programa za izravnjavanje slike
	warped = cv2.flip(warped, 1) # Zrcalimo izravnanu sliku po Y osi

	height, width, channels = warped.shape # Tražimo dimenzije novodobivene slike

	# Ako nam je širina veća od visine znači da je slika polegnuta na desnu stranu te je trebamo poravnati za 90 stupnjeva
	if width > height: 
		warped = imutils.rotate_bound(warped, -90)

	Shadow_removal(warped) # Pozivamo funkciju za otklanjanje sjene sa slike
Пример #24
0
def readImg(imgDir, pts):
    image = cv2.imread(imgDir)
    warped1 = four_point_transform(image, pts)
    #cv2.imshow("img", warped1)
    #cv2.waitKey(0)
    cv2.imwrite('imgTemp.png', warped1)
    opencv_threashhold.threshold_img('imgTemp.png')
    #print(ocr.text_from_image_file("imgTemp.png", "eng"))
    data[dec] = int(ocr.text_from_image_file("th.png", "eng"))
    msg = pos[0] + ' = ' + str(data[dec])
    print(msg)
Пример #25
0
 def run_on_image(self, image):
     detect_passport_predictions = self.predictor(image)
     mask = detect_passport_predictions["instances"].pred_masks.numpy()[0]
     mask = np.array(mask, dtype=np.uint8) * 255
     _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE,
                                       cv2.CHAIN_APPROX_SIMPLE)
     rect = cv2.minAreaRect(contours[0])
     box = cv2.boxPoints(rect)
     box = np.int0(box)
     passport = four_point_transform(image, order_points(box))
     return passport
Пример #26
0
def click_event(event, x, y, flags, param):
    if event == cv2.EVENT_LBUTTONDOWN:
        print(x, ",", y)
        refPt.append([x, y])
        font = cv2.FONT_HERSHEY_SIMPLEX
        strXY = str(x) + ", " + str(y)
        cv2.putText(img, strXY, (x, y), font, 0.5, (255, 255, 0), 2)
        cv2.imshow("image", img)
        if len(refPt) == 4:
            pts = np.array(refPt)
            warped = four_point_transform(img, pts)
            cv2.imshow("Warped", warped)
    def canny_edge_detector(self):

        gray = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (5, 5), 0)
        edged = cv2.Canny(gray, 75, 200)
        # show the original image and the edge detected image
        print("STEP 1: Edge Detection")
        cv2.imshow("Image", self.image)
        cv2.imshow("Edged", edged)
        
        cv2.waitKey(0)
        
        cv2.destroyAllWindows()
        
        #return edged
        
        print("c")
        cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5]
        # loop over the contours
        # loop over the contours
        for c in cnts:
            
	        # approximate the contour
	        peri = cv2.arcLength(c,True)
	        approx = cv2.approxPolyDP(c, 0.02 * peri,True)
	        # if our approximated contour has four points, then we
	        # can assume that we have found our screen
	        if len(approx) == 4:
		         screenCnt = approx
		         break
        # show the contour (outline) of the piece of paper
        

        print("STEP 2: Find contours")
        cv2.drawContours(self.image, [screenCnt], -1, (0, 255, 0), 2)
        cv2.imshow("Outline", self.image)
        cv2.waitKey(0)
        cv2.destroyAllWindows()  

        # apply the four point transform to obtain a top-down
        # view of the original image
        warped = four_point_transform(self.orig, screenCnt.reshape(4, 2) * self.ratio)
        # convert the warped image to grayscale, then threshold it
        # to give it that 'black and white' paper effect
        warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
        
        # show the original and scanned images
        print("STEP 3: Apply perspective transform")
        cv2.imshow("Scanned", imutils.resize(warped, height = 650))
        cv2.waitKey(0)      
Пример #28
0
def scan_page(image):
    """Takes an image input that has a document in it and return a birds eye view, high contrast scan of the document"""

    gray, edged = create_edge_image(image)

    contours = find_contours_from_threshold(gray)

    # This code assumes that the largest contour will contain the document of interest
    page_contour = largest_contour(contours)

    # Approximate corners, perspective project and threshold the image
    points, page_approx = find_corners_from_contours(page_contour)

    img, lines = draw_four_lines(image.copy(), page_contour)
    corners = find_intersections(lines, img.shape)

    # Add the contour onto original image and show it
    # cv2.drawContours(img, page_approx, -1, (0, 255, 0), 20)
    # cv2.namedWindow('Corners', cv2.WINDOW_NORMAL)
    for pt in corners:
        cv2.circle(img, (pt[0], pt[1]), 15, (0, 255, 0), -1)
        cv2.namedWindow('Corners2', cv2.WINDOW_NORMAL)

    if resize_display:
        cv2.imshow('Corners2', cv2.resize(img, (560, 710)))
    else:
        cv2.imshow('Corners2', img)
    cv2.imwrite('../../Desktop/bad_corners.jpg', img)
    # cv2.imshow('Corners', cv2.resize(img.copy(), (560, 710)))

    # Apply a perspective transform to the document
    warped = transform.four_point_transform(image, np.array(corners))

    gray = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
    cv2.namedWindow('warped', cv2.WINDOW_NORMAL)
    cv2.imshow('warped', warped)

    # Apply an adaptive threshold on the image to remove contrasting shadows
    scanned_doc = adaptive_threshold(gray, type='adaptive')
    cv2.namedWindow('Scanned Document', cv2.WINDOW_NORMAL)
    if resize_display:
        cv2.imshow('Scanned Document', cv2.resize(scanned_doc.copy(), (560, 710)))
    else:
        cv2.imshow('Scanned Document', scanned_doc)

    # Press "q" to close windows and end program
    while True:
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cv2.destroyAllWindows()

    return scanned_doc
Пример #29
0
    def __recorta_bordas(self):
        """
		Recorta as bordas externas da imagem que foi colocada para aprimorar o preprocessamento.
		Uso exclusivo da classe não podendo ser chamada externamente.	
			
		"""
        warped = four_point_transform(
            self.orig,
            self.contornos.reshape(4, 2) * self.ratio)
        warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
        warped = threshold_adaptive(warped, 251, 'gaussian', offset=20)
        warped = warped.astype("uint8") * 255
        return warped
Пример #30
0
    def warpImage(self, image):
        if self.warp_coords is not None:
            contoured_img = copy.copy(image)

            if self.WARP_COORDS_LOCK is True:
                cv2.drawContours(contoured_img, [self.warp_coords], -1,
                                 (0, 255, 255), 2)

            warped = transform.four_point_transform(
                image, self.warp_coords.reshape(4, 2))
            return warped, contoured_img
        else:
            return None
Пример #31
0
    def _scan(self):
        process_image_height = 500.0
        
        orig = self.cur_frame_full

        image = self.cur_frame_full.copy()
        ratio = image.shape[0] / process_image_height
        image = imutils.resize(image, height = int(process_image_height))

        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (5, 5), 0)
        edged = cv2.Canny(gray, 75, 200)

        # DEBUG
        DEBUG_DISPLAY = False
        if DEBUG_DISPLAY:
            if is_valid_frame(edged):
                cv2.imshow("DEBUG SCAN - Edged", edged)


        # find the largest contours
        contours = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        contours = imutils.grab_contours(contours)
        contours = sorted(contours, key = cv2.contourArea, reverse = True)[:10]

        # process the contours
        document_contours = None
        for c in contours:
            # ignore contours that are too small
            if cv2.contourArea(c) <= self.min_roi_area:
                continue

	    contour_length = cv2.arcLength(c, True)
	    approx_poly = cv2.approxPolyDP(c, 0.02 * contour_length, True)

	    # if approximated poly has four points then...document?
	    if len(approx_poly) == 4:
		document_contours = approx_poly
		break

        if type(document_contours) == type(None):
            print("Document Not Found")
            return

        # draw the contours of the document
        cv2.drawContours(image, [document_contours], -1, (0, 255, 0), 2)
        self.document_detect_frame = image

        # finally, transform the document (i.e. remove rotation)
        document_transform_frame = four_point_transform(orig, document_contours.reshape(4, 2) * ratio)
        self.document_transform_frame = document_transform_frame
def process_print(raw_image):
    coords = "[(489.5, 191.5), (1704, 7), (1645.5, 1918.5), (433.5, 1639)]"

    # convert the raw image to grayscale
    img = cv2.cvtColor(raw_image, cv2.COLOR_RGB2GRAY)
    img = cv2.equalizeHist(img)
    # make the ridges dark and valleys white
    img = cv2.bitwise_not(img)

    pts = np.array(eval(coords), dtype="float32")

    # apply homography
    warped = four_point_transform(img, pts, aMaxWidth = 290, aMaxHeight = 267)

    return warped
Пример #33
0
def map_perspective(base, image, coords, output):
    # load the image and grab the source coordinates (i.e. the list of
    # of (x, y) points)
    # NOTE: using the 'eval' function is bad form, but for this example
    # let's just roll with it -- in future posts I'll show you how to
    # automatically determine the coordinates without pre-supplying them
    image = cv2.imread(image)
    base = cv2.imread(base)
    pts = np.array(eval(coords), dtype = 'float32')

    height = base.shape[0]
    width = base.shape[1]

    # resize the image so the transformation matches up
    resized = cv2.resize(image, (width, height), interpolation = cv2.INTER_AREA)

    # apply the four point transform to obtain a birds eye view of the image
    warped = four_point_transform(resized, height, width, pts)

    cv2.imwrite(output, warped)
Пример #34
0
    def crop(self):
        self._make_dir()
        self._store_original()

        ratio = self.image.shape[0] / 1500.0
        image = imutils.resize(self.image, 1500)


        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (5, 5), 0)
        edged = cv2.Canny(gray, 75, 200)

        (cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5]

        # loop over the contours
        for c in cnts:
            # approximate the contour
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.02 * peri, True)

            # if our approximated contour has four points, then we
            # can assume that we have found our screen

            if len(approx) == 4:
                screenCnt = approx
                break

        coords = []
        for dot in screenCnt:
            x = int(round(dot[0][0] * ratio))
            y = int(round(dot[0][1] * ratio))
            coords.append([[x, y]])

        # cv2.drawContours(image, [approx], -1, (0, 255, 0), 2)
        warped = transform.four_point_transform(image, screenCnt.reshape(4, 2))

        # warped = transform.four_point_transform(self.image, screenCnt.reshape(4, 2) * ratio)


        # self._show(self.image, 'Warped')
        # warped = transform.four_point_transform(image, screenCnt.reshape(4, 2))

        # convert the warped image to grayscale, then threshold it
        # to give it that 'black and white' paper effect
        # warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
        # warped = threshold_adaptive(warped, 250, offset = 10)
        # warped = warped.astype("uint8") * 255

        # import ipdb; ipdb.set_trace()

        height, width, x = warped.shape

        part_0 = imutils.crop(warped, 0, width*0.24, height * 0.267, height * 0.875)
        part_1 = imutils.crop(warped, width*0.24, width*0.46, height * 0.267, height * 0.875)
        part_2 = imutils.crop(warped, width*0.47, width*0.696, height * 0.267, height * 0.875)
        part_3 = imutils.crop(warped, width*0.696, width*0.968, height * 0.267, height * 0.875)

        cv2.imwrite(self._new_file('part_0.jpg'), part_0)
        cv2.imwrite(self._new_file('part_1.jpg'), part_1)
        cv2.imwrite(self._new_file('part_2.jpg'), part_2)
        cv2.imwrite(self._new_file('part_3.jpg'), part_3)

        return self.dir.split('/')[1]
Пример #35
0
def processImage(args):
	# load the image and compute the ratio of the old height
	# to the new height, clone it, and resize it
	image = cv2.imread(args["image"])

	ratio = image.shape[0] / 500.0
	orig = image.copy()
	image = imutils.resize(image, height = 500)

	edged = cv2.Canny(image, 75, 200)

	# res = np.hstack((gray,eqgray))

	# find the contours in the edged image, keeping only the
	# largest ones, and initialize the screen contour
	(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)

	# sort by perimeter first and then area
	cnts = sorted(cnts, key = lambda x: cv2.arcLength(x, False), reverse = True)[:3]
	contour = sorted(cnts, key = lambda x: cv2.contourArea(x, False), reverse = True)[0]

	def removeInlier(points, closeLine=False):
		initial_area = cv2.contourArea(points);
		new_contour = points
		ratios = []
		for i in range(len(points)):
			# new_contour = points.pop(i)
			new_contour = np.delete(new_contour,i,0)
			new_area = cv2.contourArea(new_contour);
			ratios+=[new_area/initial_area]
			new_contour = points
		index = np.argmax(ratios)
		return np.delete(points,index,0)


	# approximate the contour
	peri = cv2.arcLength(contour, True)
	approx = cv2.approxPolyDP(contour, 0.02 * peri, True)
	approx = cv2.convexHull(approx)
	approx = approx.reshape((len(approx),2))
	while len(approx)>4 :
		approx = removeInlier(approx)



	# apply the four point transform to obtain a top-down
	# view of the original image
	warped = four_point_transform(orig, approx.reshape(4, 2) * ratio)

	if args["bw"] == "true":
		warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
		warped = threshold_adaptive(warped, 40, offset = 7)
		warped = warped.astype("uint8") * 255
	else:
		b,g,r = cv2.split(warped)       # get b,g,r
		warped = cv2.merge([r,g,b])     # switch it to rgb

	final = imutils.resize(warped, height = 650)
	sheet_ratio = final.shape[0]/float(final.shape[1])

	fig = plt.figure(frameon=False)
	if str(args["a4"]) == "true":
		fig.set_size_inches(11.69, 8.27)
		ax = plt.Axes(fig, [0., 0., 1., 1.])
		ax.set_axis_off()
		fig.add_axes(ax)
	else:
		fig.set_size_inches(3, 3/sheet_ratio)
		ax = plt.Axes(fig, [0., 0., 1., 1.])
		ax.set_axis_off()
		fig.add_axes(ax)

	if args["bw"] == "true":
		ax.imshow(final, aspect='auto', cmap = plt.get_cmap('gray'))
	else:
		ax.imshow(final, aspect='auto')

	format = str(args["format"])
	path = str(args["out"])
	filename = str(args["name"]).split(".")[0]
	plt.savefig(os.path.join(path, filename + "." + format) , format=format, dpi=int(args["dpi"]))

	if args["koriginal"] == "true":
		orig_path = args["image"]
		orig_format = orig_path.split(".")[-1]
		shutil.copyfile(orig_path, os.path.join(path, filename + "." + orig_format))
Пример #36
0
def scan(image):
    # load the image and compute the ratio of the old height
    # to the new height, clone it, and resize it
    ratio = image.shape[0] / 500.0
    orig = image.copy()
    image = imutils.resize(image, height=500)

    # convert the image to grayscale, blur it, and find edges
    # in the image
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (3, 3), 0)
    edged = cv2.Canny(gray, 50, 125)

    # show the original image and the edge detected image
    print("STEP 1: Edge Detection")
    cv2.imshow("Image", image)
    cv2.waitKey(0)
    cv2.imshow("Gray", gray)
    cv2.waitKey(0)
    cv2.imshow("Edged", edged)
    # cv2.imwrite("receipt_edged.jpg", edged)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

    # find the contours in the edged image, keeping only the
    # largest ones, and initialize the screen contour
    (_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]

    # loop over the contours
    for c in cnts:
        # approximate the contour
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)
        # if our approximated contour has four points, then we
        # can assume that we have found our screen
        if len(approx) == 4:
            screenCnt = approx
            break

    # show the contour (outline) of the piece of paper
    print("STEP 2: Find contours of paper")
    cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)
    cv2.imshow("Outline", image)
    # cv2.imwrite("receipt_outlined.jpg", image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

    # apply the four point transform to obtain a top-down
    # view of the original image
    warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)

    # convert the warped image to grayscale, then threshold it
    # to give it that 'black and white' paper effect
    warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
    warped = threshold_adaptive(warped, 251, offset=10)
    warped = warped.astype("uint8") * 255

    # show the original and scanned images
    print("STEP 3: Apply perspective transform")
    cv2.imshow("Original", imutils.resize(orig, height=650))
    cv2.imshow("Scanned", imutils.resize(warped, height=650))
    cv2.waitKey(0)

    return warped
Пример #37
0
def scan_receipt(image_file):
    '''
    Takes a scanned receipt and returns a top-down scan view of the image.
    '''

    img = cv2.imread(image_file)

    # resize to 500 height, store ratio of original
    ratio = img.shape[0] / 500.0
    orig = img.copy()
    img = imutils.resize(img, height = 500)

    # grayscale, blur, find edges
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 0)

    edged = cv2.Canny(gray, 25, 200) # for sainsbury receipt
    #edged = cv2.Canny(gray, 75, 200) # for scan.png

    # show images
    print 'Step 1: Edge Detection'
    cv2.imshow('image', img)
    cv2.imshow('gray', gray)
    cv2.imshow('edged', edged)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

    # find the contours and get the largest ones
    #_, cnts, _ = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    _, cnts, _ = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]

    for c in cnts:
        # approximate the contour
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)

        # if contour has four points, then it's probably our screen
        if len(approx) == 4:
            screenCnt = approx
            break

    # show the contour (outline) of the piece of paper
    print 'Step 2: Find contours of paper'
    cv2.drawContours(img, [screenCnt], -1, (0, 255, 0), 2)
    cv2.imshow('outline', img)
    cv2.waitKey(0)

    # apply the four point transformation to obtain top-down view of image
    warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)

    # convert to grayscale, threshold to get black and white paper effect
    warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
    warped = threshold_adaptive(warped, 251, offset = 10)
    warped = warped.astype('uint8') * 255

    # show the original and scanned image
    print 'Step 3: Apply perspective transform'
    cv2.imshow('Original', imutils.resize(orig, height = 650))
    cv2.imshow('Scanned', imutils.resize(warped, height = 650))
    cv2.waitKey(0)
Пример #38
0
for c in cnts:
	peri = cv2.arcLength(c, True)
	approx = cv2.approxPolyDP(c, 0.02 * peri, True)

	if(len(approx) == 4):
		screenCnt = approx
		break

print "STEP 2: find contours"
cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)
cv2.imshow("Outline", image)
cv2.waitKey(0)
cv2.destroyAllWindows()

warped = four_point_transform(orig, screenCnt.reshape(4,2) * ratio)

warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
warped = threshold_adaptive(warped, 250, offset = 10)
warped = warped.astype("uint8") *255

print "Step 3: apply perspective transform"
cv2.imshow("Original", imutils.resize(orig, height = 650))
cv2.imshow("Scanned", imutils.resize(warped, height = 650))
cv2.waitKey(0)
cv2.imwrite("warped.png", warped)

cmd = ["C:/Program Files (x86)/Tesseract-OCR/tesseract.exe", "warped.png", "rec"]

process = subprocess.Popen(cmd, stderr = subprocess.STDOUT, stdout=subprocess.PIPE)
outputstring = process.communicate()[0]
Пример #39
0
im = plt.imread(args["image"])
# taking input from user
ax = plt.gca()
fig = plt.gcf()
implot = ax.imshow(im)
# coord = []
# coord = [(241, 316), (438, 312), (602, 447), (54, 447)] DSCN0632
coord = [(251, 314), (443, 306), (616, 435), (85, 445)]
# def onclick(event):
# 	if event.xdata != None and event.ydata != None :
# 		coord.append((int(event.xdata), int(event.ydata)))
# cid = fig.canvas.mpl_connect('button_press_event', onclick)
# plt.show()
# print(coord)

warped = four_point_transform(image, np.array(coord))
plt.imshow(warped), plt.show()
# cv2.waitKey(0)
cv2.destroyAllWindows()

cap = cv2.VideoCapture("testvideos/calibration/DSCN0622.MOV")

while cap.isOpened():
    ret, frame = cap.read()
    if frame is None:
        break
        # roi = frame[250:480, 0:640]
    cv2.circle(frame, (320, 435), 5, (0, 0, 255), -1)
    cv2.imshow("Input", frame)
    warped = four_point_transform(frame, np.array(coord))
    gray = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
Пример #40
0
from matplotlib import pyplot as plt
from transform import four_point_transform

image = cv2.imread("images/receipt-scanned.jpg")
ratio = image.shape[0] / 500
orig = image.copy()
image = imutils.resize(image, height=500)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(gray, 75, 200)
cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[1]

cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]

for c in cnts:
    peri = cv2.arcLength(c, True)
    approx = cv2.approxPolyDP(c, 0.02 * peri, True)
    if len(approx == 4):
        screenCnt = approx
        break

warped = four_point_transform(image, screenCnt.reshape(4, 2))

for point in screenCnt.reshape(4, 2):
    cv2.circle(image,(point[0],point[1]), 5, (0,0,255), 4 )
plt.subplot(121),plt.imshow(image),plt.title('Original')
plt.xticks([]),plt.yticks([])
plt.subplot(122),plt.imshow(warped),plt.title('Warped')
plt.xticks([]),plt.yticks([])
plt.show()
Пример #41
0
def transform_image(image, path):
    new_file_name = create_file_name(path)
    ratio = image.shape[0] / 500.0
    orig = image.copy()
    image = imutils.resize(image, height=500)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 0)
    edged = cv2.Canny(gray, 75, 200)

    # find contours; len(cnts) returns no. of contours found
    (cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    # if there are no contours found
    if len(cnts) < 1:
        height, width, _ = image.shape
        print "height: " + str(height) + " width: " + str(width)
        # return coordinates of the whole image
        all_vertices = [[0, 0], [width, 0], [width, height], [0, height]]

        print "all vertices: " + str(all_vertices)

        contours = all_vertices
       
        # print vertices' coordinates
        for elem in contours:
            text2 = str(elem[0]) + " " + str(elem[1])
            cv2.putText(image, text2, (elem[0], elem[1]), font, 0.5, (255, 255, 0), 2)

        contours_copy = contours
        contours_copy_np = np.array(contours_copy)
        cv2.drawContours(image, [contours_copy_np], -1, (0, 255, 0), 2)
        warped = four_point_transform(orig, contours_copy_np.reshape(4, 2) * ratio)

        cv2.imwrite("%s" % new_file_name, warped, [int(cv2.IMWRITE_JPEG_QUALITY), 90])

        return new_file_name, True

    # if there are some contours found
    else:
        # array of perimeters
        # keeps lengths of perimeters found (no. of perimeters = no. of contours)

        # sort from the longest perimeter to the shortest
        cnts_sorted = sorted(cnts, key=lambda x: cv2.arcLength(x, True), reverse=True)

        peri_arr2 = []
        for elem in cnts_sorted:
            perii = cv2.arcLength(elem, True)
            peri_arr2.append(perii)

        # length of the longest perimeter
        peri_max = peri_arr2[0]

        # approxPolyDP returns coordinates of vertices of the longest perimeter
        approx2 = cv2.approxPolyDP(cnts_sorted[0], 0.02 * peri_max, True)

        # find vertices and put them into array all_vertices
        all_vertices = []
        for a in approx2:
            aa = a[0]
            x_coord = aa[0]
            y_coord = aa[1]
            two_vertices = [x_coord, y_coord]
            all_vertices.append(two_vertices)

        # if only one curve was found
        if len(all_vertices) == 2:
            
            # but if there are other curves
            if len(peri_arr2) > 1:
                peri_max2 = peri_arr2[1]
                approx3 = cv2.approxPolyDP(cnts_sorted[1], 0.02 * peri_max2, True)

                # find another vertical contour
                if len(approx3) == 2:
                    all_vertices2 = []
                    for a in approx3:
                        aa = a[0]
                        x_coord = aa[0]
                        y_coord = aa[1]
                        two_vertices = [x_coord, y_coord]
                        all_vertices2.append(two_vertices)

                    all_vertices = all_vertices + all_vertices2

                # if there is no another vertical contour - use image contour
                else:
                    all_vertices = use_image_contour(all_vertices, image)

            # if there is no other curve found
            else:
                all_vertices = use_image_contour(all_vertices, image)

        # find vertices that are most likely to be receipt vertices
        br = ul = bl = ur = []
        max_sum = 0
        min_sum = 10000
        max_sub_x_y = 0
        max_sub_y_x = 0
        for elem in all_vertices:
            sum_x_and_y = elem[0] + elem[1]
            if sum_x_and_y > max_sum:
                max_sum = sum_x_and_y
                br = elem
            if sum_x_and_y < min_sum:
                min_sum = sum_x_and_y
                ul = elem

            if elem[0] - elem[1] > 0:
                if elem[0] - elem[1] > max_sub_x_y:
                    max_sub_x_y = elem[0] - elem[1]
                    ur = elem

            if elem[1] - elem[0] > 0:
                if elem[1] - elem[0] > max_sub_y_x:
                    max_sub_y_x = elem[1] - elem[0]
                    bl = elem

        contours = []
        contours.append(ul)
        contours.append(ur)
        contours.append(br)
        contours.append(bl)

        # if there are any empty vertices, assign their values to [0,0]
        for elem, val in enumerate(contours):
            if val == []:
                contours[elem] = [0, 0]

        # print vertices' coordinates
        for elem in contours:
            text2 = str(elem[0]) + " " + str(elem[1])
            cv2.putText(image, text2, (elem[0], elem[1]), font, 0.5, (255, 255, 0), 2)

        contours_copy = contours
        for elem, val in enumerate(contours_copy):
            tab = []
            tab.append(val)
            contours_copy[elem] = tab

        contours_copy_np = np.array(contours_copy)

        cv2.drawContours(image, [contours_copy_np], -1, (0, 255, 0), 2)

        warped = four_point_transform(orig, contours_copy_np.reshape(4, 2) * ratio)

        cv2.imwrite("%s" % new_file_name, warped, [int(cv2.IMWRITE_JPEG_QUALITY), 90])

        return new_file_name, True
Пример #42
0
from transform import four_point_transform
import np
import argparse
import cv2

ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", help = "path to image file")
ap.add_argument("-c", "--coords", help = "comma seperated list of source pointer")
args = vars(ap.parse_args())


image = cv2.imread(args["image"])
pts = np.array(eval(args["coords"]), dtype = "float32")

warped = four_point_transform(image, pts)

cv2.imshow("original", image)
cv2.imshow("warped", warped)
cv2.waitKey(0)