Example #1
0
def cropImage():
    # camera.resolution = (1020, 610)
    ts, img = photo.get_image_from_picam(camera)
    height, width, channels = img.shape
    shape, v, coordinates = BoxFinder.findBox(img)
    # print coordinates
    for [coord,_] in coordinates:
        assert(len(coord) == 4)
        if coord != None:
            coord = np.array(coord)
            cropped = four_point_transform(img,coord)
            cv2.imwrite("/root/server/irobot/static/snapshots/snapshot-cropped" + str(ts) + ".jpg",cropped) 
            height, width, channels = cropped.shape
            t = verify_image((ts,cropped))
            if t != None and hack:    
                dic = {'X':[None,'end of route'],
                'A':['left','spin left 3 times'],
                'B':['left','turn right 90 degrees'],
                'C':['left','move back 1 meters'],
                'D':['right','spin left 3 times'],
                'E':['right','turn right 90 degrees'],
                'F':['left','move back 1 meters']}
                return ts,[t] + dic[t]
            elif t!= None and not hack:
                # t is (img, tag_requested)
                img, tag_requested = t
                return ts, [img] + [tag_requested]
                
    return ts,None
Example #2
0
def select_image():
	global panelA, panelB ,im ,warped ,itext ,path
	path = filedialog.askopenfilename()
	print(path)
	if len(path) > 0:
		mainimage = cv2.imread(path)
		mainimage= cv2.copyMakeBorder(mainimage,10,10,10,10,cv2.BORDER_CONSTANT)
		ratio = mainimage.shape[0] / 500.0
		orig = mainimage.copy()
		mainimage = imutils.resize(mainimage, height = 500)

		# convert the mainimage to grayscale, blur it, and find edges
		# in the mainimage
		gray = cv2.cvtColor(mainimage, cv2.COLOR_BGR2GRAY)
		gray = cv2.GaussianBlur(gray, (5, 5), 0)
		edged = cv2.Canny(gray, 75, 200)


		# find the contours in the edged mainimage, keeping only the
		# largest ones, and initialize the screen contour
		cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
		cnts = imutils.grab_contours(cnts)
		cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5]

		# loop over the contours
		for c in cnts:
			# approximate the contour
			peri = cv2.arcLength(c, True)
			approx = cv2.approxPolyDP(c, 0.02 * peri, True)

			# if our approximated contour has four points, then we
			# can assume that we have found our screen
			if len(approx) == 4:
				screenCnt = approx
				break

		# show the contour (outline) of the piece of paper

		# apply the four point transform to obtain a top-down
		# view of the original mainimage
		warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)
		# convert the warped image to grayscale, then threshold it
		# to give it that 'black and white' paper effect
		warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)

		mainimage = cv2.resize(mainimage, (500,800))
		mainimage = Image.fromarray(mainimage)
		mainimage = ImageTk.PhotoImage(mainimage)
		config = ('-l '+str(language)+' --oem 1 --psm 3')
		itext = pytesseract.image_to_string(warped, config=config)

		if panelA is None or panelB is None:
			panelA = Label(image=mainimage)
			panelA.image = mainimage
			panelA.pack(side="left", padx=10, pady=10)


			panelB = Label(justify=LEFT,text=itext)
			panelB.text = itext
			panelB.pack(side="right", padx=10, pady=10)
Example #3
0
def find_page(img, draw=True):
    ratio = img.shape[0] / 500.0
    orig = img.copy()
    img = imutils.resize(img, height=500)

    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    #gray = cv2.GaussianBlur(gray, (19, 19), 0)
    edged = cv2.Canny(gray, 75, 200)

    if draw:
        cv2.imshow("Img", edged)
        cv2.waitKey(0)

    _, cnts, _ = cv2.findContours(edged.copy(), cv2.RETR_LIST,
                                  cv2.CHAIN_APPROX_SIMPLE)

    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)

    possiable_pages = []
    approx_conts = []
    for c in cnts:
        epsilon = 0.1 * cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, epsilon, True)
        approx_conts.append(approx)
        if len(approx) == 4:
            possiable_pages.append(approx)
            break
    if draw:
        cv2.drawContours(img, cnts, -1, (0, 255, 0), 2)
        cv2.imshow("Img", img)
        cv2.waitKey(0)
    cv2.destroyAllWindows()

    return four_point_transform(orig, possiable_pages[0].reshape(4, 2) * ratio)
Example #4
0
    def format_image(self, image):
        # load the image and compute the ratio of the old height
        # to the new height, clone it, and resize it
        orig = cv2.imread(image)
        #ratio = orig.shape[0] / 500.0
        #image = imutils.resize(orig, height = 500)

        gray = self.grayscale(image)
        blurred = self.blur(gray)
        edged = cv2.Canny(blurred, 75, 200)

        # find the contours in the edged image, keeping only the
        # largest ones, and initialize the screen contour
        screenCnt = self.contour(edged)

        # apply the four point transform to obtain a top-down
        # view of the original image
        warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)

        # convert the warped image to grayscale, then threshold it
        # to give it that 'black and white' paper effect
        warped = self.grayscale(warped)
        warped = self.threshold(warped)

        return warped
Example #5
0
def crop(img, points):
    cropped = four_point_transform(img, points.reshape(4, 2))
    crop_int = 0.05
    return cropped[int(cropped.shape[0] * crop_int):int(cropped.shape[0] *
                                                        (1 - crop_int)),
                   int(cropped.shape[1] * crop_int):int(cropped.shape[1] *
                                                        (1 - crop_int))]
Example #6
0
def cropImage():
    camera.resolution = (2560, 1600)
    ts, img = photo.get_image_from_picam(camera)
    height, width, channels = img.shape
    shape, v, coordinates = BoxFinder.findBox(img)
    print coordinates
    for [coord, _] in coordinates:
        print "coord", coord
        assert (len(coord) == 4)
        if coord != None:
            # cropped = img[coord[1]:(coord[1]+coord[3]),coord[0]:(coord[0]+coord[2])]
            coord = np.array(coord)
            print coord
            cropped = four_point_transform(img, coord)
            cv2.imwrite(
                "/root/server/irobot/static/snapshots/snapshot-cropped" +
                str(ts) + ".jpg", cropped)
            height, width, channels = cropped.shape
            t = _getTagFromImage((ts, cropped))
            if t != None:
                dic = {
                    'X': [None, 'end of route'],
                    'A': ['left', 'spin left 3 times'],
                    'B': ['left', 'turn right 90 degrees'],
                    'C': ['left', 'move back 1 meters'],
                    'D': ['right', 'spin left 3 times'],
                    'E': ['right', 'turn right 90 degrees'],
                    'F': ['left', 'move back 1 meters']
                }
                # print width
                # print height
                return ts, [t] + dic[t]
    return ts, None
Example #7
0
    def get(self):

        url = request.args.get('url')
        token = request.args.get('token')
        correctorOn = True
        CNN_OCR = True
        url = url + '&token=' + token

        url_response = urllib.request.urlopen(url)
        img_array = np.array(bytearray(url_response.read()), dtype=np.uint8)
        plateImg = cv2.imdecode(img_array, -1)
        height, width = plateImg.shape[:2]
        warped = four_point_transform(
            plateImg,
            np.array([(0, 0), (width, 0), (width, height), (0, height)]))

        validChars = Segmentation.startSegment(warped)
        print(len(validChars))

        plateText = OCR.readPlate(validChars, correctorOn, CNN_OCR)
        print(plateText)
        if plateText != 'ignore':
            return jsonify(plate=plateText)
        else:
            return jsonify(plate='Try Again!')
Example #8
0
    def get(self):

        url = request.args.get('url')
        token = request.args.get('token')
        state = request.args.get('state')
        url = url + '&token=' + token

        url_response = urllib.request.urlopen(url)
        img_array = np.array(bytearray(url_response.read()), dtype=np.uint8)
        plateImg = cv2.imdecode(img_array, -1)
        height, width = plateImg.shape[:2]
        warped = four_point_transform(
            plateImg,
            np.array([(0, 0), (width, 0), (width, height), (0, height)]))

        h, w = warped.shape[:2]
        ratio = w / h
        height = int(300 / ratio)
        #imgPlate = cv2.resize(warped, (300, height))
        #cv2.imshow('lol',imgPlate)
        #cv2.waitKey(0)

        read1, read2 = OCR.readRC(warped)
        dic = parseRC.parseToJSON(read1, read2, state)

        print(dic)

        return jsonify(dic)
Example #9
0
    def main(self):

        image = self.img
        ratio = image.shape[0] / 500.0
        orig = image.copy()
        image = imutils.resize(image, height=500)

        # convert the image to grayscale, blur it, and find edges
        # in the image
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (5, 5), 0)
        edged = cv2.Canny(gray, 75, 200)

        # show the original image and the edge detected image
        # print("STEP 1: Edge Detection")
        # cv2.imshow("Image", image)
        # cv2.imshow("Edged", edged)
        # cv2.waitKey(0)
        # cv2.destroyAllWindows()

        # find the contours in the edged image, keeping only the
        # largest ones, and initialize the screen contour
        cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]

        # loop over the contours
        for c in cnts:
            # approximate the contour
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.02 * peri, True)

            # if our approximated contour has four points, then we
            # can assume that we have found our screen
            if len(approx) == 4:
                screenCnt = approx
                break

        # show the contour (outline) of the piece of paper
        # print("STEP 2: Find contours of paper")
        # cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)
        # cv2.imshow("Outline", image)
        # cv2.waitKey(0)
        # cv2.destroyAllWindows()

        # apply the four point transform to obtain a top-down
        # view of the original image
        warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)

        # convert the warped image to grayscale, then threshold it
        # to give it that 'black andd white' paper effect
        warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
        T = threshold_local(warped, 11, offset=10, method="gaussian")
        warped = (warped > T).astype("uint8") * 255

        # show the original and scanned images
        # print("STEP 3: Apply perspective transform")
        # cv2.imshow("Original", imutils.resize(orig, height=650))
        cv2.imshow("Scanned", imutils.resize(warped, height=650))
        cv2.waitKey(0)
Example #10
0
def scan(image_original, cnts, img_counter):
    for c in cnts:
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.015 * peri, True)
        if len(approx) == 4:
            screenCnt = approx

            #warp the image to fill the document
            warped = four_point_transform(image_original,
                                          screenCnt.reshape(4, 2))

            #increase brightness and contrast
            adjusted = increase_brightness(warped)

            #sharpen the image
            sharpened = cv2.filter2D(adjusted, -1, kernel)

            #show the image and save it
            cv2.imshow('Scanned image', sharpened)
            img_counter += 1
            img_name = "scan_{}.png".format(img_counter)
            cv2.imwrite(img_name, sharpened)
            break
        else:
            print("Contour not found!")
Example #11
0
    def processImage(self):
        image = cv2.imread(self.imagePath)
        ratio = image.shape[0] / 500.0
        orig = image.copy()
        image = imutils.resize(image, height=500)
        print "STEP 1: Edge Detection"
        # convert the image to grayscale, blur it, and find edges
        # in the image
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(image, (5, 5), 0)
        edged = cv2.Canny(gray, 55, 200)

        # cv2.imshow("Image", image)
        # cv2.imshow("Edged", edged)
        # cv2.waitKey(0)
        # cv2.destroyAllWindows()

        print "STEP 2: Find contours of paper"
        # find the contours in the edged image, keeping only the
        # largest ones, and initialize the screen contour
        (_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_LIST,
                                        cv2.CHAIN_APPROX_SIMPLE)
        cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]
        # loop over the contours
        screenCnt = []
        for c in cnts:
            # approximate the contour
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.02 * peri, True)
            # if our approximated contour has four points, then we
            # can assume that we have found our screen
            if len(approx) == 4:
                screenCnt = approx
                break
        if len(screenCnt) != 4:
            raise ContourNotFoundError('not find contour')

        # cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)
        # cv2.imshow("Outline", image)
        # cv2.waitKey(0)
        # cv2.destroyAllWindows()

        # apply the four point transform to obtain a top-down
        # view of the original image
        warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)

        # convert the warped image to grayscale, then threshold it
        # to give it that 'black and white' paper effect
        # warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
        # warped = threshold_adaptive(warped, 251, offset=10)
        # warped = warped.astype("uint8") * 255
        height, width = warped.shape[:2]
        ratio1 = float(width) / float(height)
        ratio2 = float(height) / float(width)
        # if (ratio1 > 0.80 or ratio1 < 0.60) and (ratio2 > 0.80 or ratio2 < 0.60):
        #     raise NotA4Error('Cropped Image is not a A4 paper: height: ' + str(height) + ' width: ' + str(width))
        cv2.imwrite(self.outputPath, warped)
        print "Finished Transformation"
        return self.outputPath
Example #12
0
def transformPerspective(orig, screenCnt):
    print("STEP 3: Apply perspective transform")
    # apply the four point transform to obtain a top-down view of the original image
    warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)

    cv2.imshow("im2", warped)
    cv2.waitKey(0)
    return warped
Example #13
0
def extractCard(Image, contours, ratio):
    # top-down view from four_point_transform
    card = four_point_transform(Image, contours.reshape(4, 2) * ratio)
    # show the contoured card with a top-down view
    cv2.imshow("extracted Card", imutils.resize(card, height=650))

    # cv2.imwrite('extracted.png',card)
    return card
Example #14
0
def pre_deal_4_measure(img_path): 
    '''
    input: image path
    output: 
        image --- rectangle with four squared calibration area 

    purpose: to make the image to square smooth one, in order for the further measurement
    
    help to make the contour: 
        - ! make sure the contour is obvious, HB pencil is not a good choice
        - add Gaussianblur
        - add getStructuringElement
    '''
    image = cv2.imread(img_path)

    # due to the HB pencil is too light, need sharpen deal
    # sharpened_image = add_sharpen_kernel(image, img_path = False)

    # deal in gray image
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (7, 7), 0)
    edged = cv2.Canny(gray, 20, 100) # 150 255

    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
    # for kindle pic 5, 5
    edged = cv2.dilate(edged, kernel)


    # find contour
    cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    # print (len(cnts))
    cnts = imutils.grab_contours(cnts) # get the cnts list information, the 1st item of the previous line
    
    # deal with cnts
    slicer = slice(1, 10, None)
    slicer = 2 

    conts_more_than_1 = False
    if type(slicer) is slice: 
        conts_more_than_1 = True

    cnts, out_rect_approx = get_out_rect_approx_n_sorted_cnts(cnts, slicer, conts_more_than_1)


    # get the top-bottom image and calculate the length
    # apply the four point transform to obtain a top-down view of the original image
    warped = four_point_transform(image, out_rect_approx.reshape(4, 2))
    
    # # show image part => for debug purpose
    # suitable_shown_image = imutils.resize(warped, height = 600)
    # # ratio = image.shape[0]/600
    # # resized_cnts = cnts_2_ratio(cnts, ratio, conts_more_than_1= conts_more_than_1)
    
    # # name_contour(suitable_shown_image, resized_cnts, conts_more_than_1= conts_more_than_1)
    # cv2.imshow('image.png', suitable_shown_image)
    # mac_show_cv2()
    return warped
Example #15
0
 def transform(self):
     if self.transform_flag is True:
         warped = four_point_transform(
             self.original,
             self.screenCnt.reshape(4, 2) * self.ratio)
         warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
         T = threshold_local(warped, 11, offset=10, method="gaussian")
         self.warped = (warped > T).astype("uint8") * 255
         self.original = self.warped
Example #16
0
def scan_image(image_path, display_message=True):
    saved_image = image_path[:-4] + "_scanned.jpg"
    if not os.path.isfile(saved_image):
        image = cv2.imread(image_path,
                           cv2.IMREAD_IGNORE_ORIENTATION | cv2.IMREAD_COLOR)
        ratio = image.shape[0] / 500.0
        orig = image.copy()
        image = imutils.resize(image, height=500)

        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (5, 5), 0)
        edged = cv2.Canny(gray, 75, 200)

        # print("STEP 1: Edge detection")

        # find the contours in the edged image, keeping only the
        # largest ones, and initialize the screen contour
        cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]
        # loop over the contours
        screenCnt = None
        for c in cnts:
            # approximate the contour
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.02 * peri, True)
            # if our approximated contour has four points, then we
            # can assume that we have found our screen
            if len(approx) == 4:
                screenCnt = approx
                break
        # show the contour (outline) of the piece of paper
        # print("STEP 2: Find contours of paper")
        # cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)

        # apply the four point transform to obtain a top-down
        # view of the original image
        if screenCnt is None:
            icon = resource_path('.') + "/resources/warning.ico"
            popupMessage("Cannot scanned {}".format(saved_image),
                         duration=3,
                         icon=icon)
            return None
        warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)
        # convert the warped image to grayscale, then threshold it
        # to give it that 'black and white' paper effect
        warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
        T = threshold_local(warped, 11, offset=10, method="gaussian")
        warped = (warped > T).astype("uint8") * 255
        # save to disk
        cv2.imwrite(saved_image.replace(".png", ".jpg"), warped)
        if display_message:
            popupMessage("Scanned successfully {}".format(saved_image))
    image = ImageTk.PhotoImage(loadImage(saved_image))
    return image
Example #17
0
def image_to_scan_bird_style_view(image, screenCnt, ratio):
    # apply the four point transform to obtain a top-down
    # view of the original image
    warped = four_point_transform(image, screenCnt.reshape(4, 2) * ratio)
    # convert the warped image to grayscale, then threshold it
    # to give it that 'black and white' paper effect
    warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
    warped = threshold_adaptive(warped, 250, offset=10)
    warped = warped.astype("uint8") * 255
    return warped
Example #18
0
    def scan(cls, filepath):
        print("Starting scan")
        # load the image and compute the ratio of the old height
        # to the new height, clone it, and resize it
        image = cv2.imread(filepath)
        ratio = image.shape[0] / 500.0
        orig = image.copy()
        image = imutils.resize(image, height=500)

        # convert the image to grayscale, blur it, and find edges
        # in the image
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (5, 5), 0)
        edged = cv2.Canny(gray, 75, 200)

        # find the contours in the edged image, keeping only the
        # largest ones, and initialize the screen contour
        cnts, hierarchy = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]
        screenCnt = None

        # loop over the contours
        for c in cnts:
            # approximate contours
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.02 * peri, True)

            # if our approximated contour has four points, then we
            # can assume that we have found our screen
            if len(approx) == 4:
                screenCnt = approx
                break

        # Check if we found a 4 point contour. If not, we create our own bounding box
        # with the largest contour
        if screenCnt is None:
            height, width, channels = image.shape
            imageBounds = np.array([[1, 1], [width, 1], [width, height], [1, height]])
            screenCnt = imutils.get_bounding_box(imageBounds)

        # apply the four point transform to obtain a top-down
        # view of the original image
        warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)

        # convert the warped image to grayscale, then threshold it
        # to give it that 'black and white' paper effect
        warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
        warped = threshold_adaptive(warped, 250, offset=10)
        warped = warped.astype("uint8") * 255

        # Write out image to tmp file
        filename = "tmp/tmp-result.png"
        cv2.imwrite(filename, warped)
        print("Finished scan")
        return filename
Example #19
0
def get_string():
    global cap
    global ctr
    global flag
    ctr = 0
    flag = 0
    while 1:
        ret, image = cap.read()
        if ret == False:
            return "Vibrate 2"
        else:
            rows, cols, x = image.shape
            M = cv2.getRotationMatrix2D((cols / 2, rows / 2), 90, 1)
            image = cv2.warpAffine(image, M, (cols, rows))
            if ctr >= 1:
                print("Capturing Image.....")
                orig = image.copy()
                gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
                gray = cv2.GaussianBlur(gray, (5, 5), 0)
                edged = cv2.Canny(gray, 75, 200)
                cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST,
                                        cv2.CHAIN_APPROX_SIMPLE)
                cnts = imutils.grab_contours(cnts)
                cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]
                for c in cnts:
                    peri = cv2.arcLength(c, True)
                    approx = cv2.approxPolyDP(c, 0.02 * peri, True)
                    if len(approx) == 4:
                        screenCnt = approx
                        break
                    else:
                        flag = 1
                        break
                if flag == 1:
                    cv2.imwrite('capture.jpg', image)
                    return "Vibrate"
                else:
                    #print(flag)
                    print("Getting OCR....")
                    warped = four_point_transform(orig,
                                                  screenCnt.reshape(4, 2))
                    warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
                    T = threshold_local(warped,
                                        11,
                                        offset=10,
                                        method="gaussian")
                    #warped = (warped > T).astype("uint8") * 255
                    #cv2.imshow("Scanned", warped)
                    #cv2.imshow("pers", image)
                    cv2.imwrite('capture.jpg', image)
                    cv2.imwrite('text.jpg', warped)
                    cv2.waitKey(0)
                    cv2.destroyAllWindows()
                    return pytesseract.image_to_string(warped)
Example #20
0
def transformado(image,left_top,right_top,left_bottom,right_bottom):
    pts = np.array([left_top, right_top, left_bottom, right_bottom])
    warped = four_point_transform(image, pts)
    cv2.line(image, left_top, right_top, (0,255,0),2)
    cv2.line(image, left_top, left_bottom, (0,255,0),2)
    cv2.line(image, left_bottom, right_bottom, (0,255,0),2)
    cv2.line(image, right_bottom, right_top, (0,255,0),2)

    cv2.imshow("Original", image)
    cv2.imshow("Corrigido", warped)
    cv2.waitKey(0)
Example #21
0
    def run(self):

        try:

            while True:

                valid, img = self.cam.read()

                if valid:
                    self.image = img

                    if len(self.config['refPt']) == 4:
                        pts = np.array(self.config['refPt'], np.int32)
                        pts = pts.reshape((-1, 1, 2))
                        self.imageReshaped = four_point_transform(
                            self.image, pts.reshape(4, 2))
                        cv2.polylines(self.image, [pts],
                                      True, (255, 255, 255),
                                      thickness=2)
                        if self.showPreview:

                            original_image_size = (self.image.shape[1],
                                                   self.image.shape[0])
                            normalized_reshape = cv2.resize(
                                self.imageReshaped, original_image_size,
                                cv2.INTER_LANCZOS4)
                            numpy_horizontal_concat = np.concatenate(
                                (self.image, normalized_reshape), axis=1)

                            cv2.imshow("image", numpy_horizontal_concat)
                    else:
                        if self.showPreview:
                            cv2.imshow(
                                "image",
                                cv2.resize(self.image,
                                           (self.previewMultiplier *
                                            self.image.shape[1],
                                            self.previewMultiplier *
                                            self.image.shape[0])))

                    if cv2.waitKey(1) & 0xFF == ord('q'):
                        break
                    if self.imageReshaped is not None:
                        self.image = cv2.resize(self.imageReshaped,
                                                self.dimensions,
                                                cv2.INTER_LANCZOS4)
                        self.LedProcessor.updateImage(self.image)

        except:

            traceback.print_exc()

            self.cam.release()
def scan_image(image):
    # image = cv2.imread(img)
    ratio = image.shape[0] / 500.0
    orig = image.copy()
    image = imutils.resize(image, height=500)

    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 0)
    edged = cv2.Canny(gray, 75, 200)

    print "STEP 1: Edge Detection"
    # cv2.imshow("Image", image)
    # cv2.imshow("Edged", edged)

    # cv2.waitKey(0)
    # cv2.destroyAllWindows()

    _, cnts, hierarchy = cv2.findContours(edged, cv2.RETR_LIST,
                                          cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]

    for c in cnts:
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)
        print(len(approx))

        if len(approx) == 4:
            screenCnt = approx
            break

        else:
            return

    print "STEP 2: Find contours of paper"
    cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)
    # cv2.imshow("Outline", image)
    #cv2.waitKey(0)
    #cv2.destroyAllWindows()

    warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)

    warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
    warped = threshold_adaptive(warped, 251, offset=10)
    warped = warped.astype("uint8") * 255

    print "STEP 3: Apply perspective transform"
    # cv2.imshow("Original", imutils.resize(orig, height = 650))
    cv2.imshow("Scanned", imutils.resize(warped, height=650))

    cv2.waitKey(0)
    cv2.destroyAllWindows()
Example #23
0
def scanInkFromImage(image, paperPoints):
    # show the contour (outline) of the piece of paper

    # apply the four point transform to obtain a top-down
    # view of the original image
    warped = four_point_transform(image, paperPoints)
    tuples = arrayToTuple(paperPoints)
    paperSize = paperSizer(sorted(tuples))
    # convert the warped image to grayscale, then threshold it
    # to give it that 'black and white' paper effect
    warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
    warped = threshold_adaptive(warped, 251, offset=10)
    warped = warped.astype("uint8") * 255
    return warped
Example #24
0
    def scan(self, image_path):

        RESCALED_HEIGHT = 500.0
        OUTPUT_DIR = 'processed_output'

        # load the image and compute the ratio of the old height
        # to the new height, clone it, and resize it
        image = cv2.imread(image_path)

        print ('image_path: ',image_path)

        assert(image is not None)

        ratio = image.shape[0] / RESCALED_HEIGHT
        orig = image.copy()
        rescaled_image = imutils2.resize(image, height = int(RESCALED_HEIGHT))

        # get the contour of the document
        screenCnt = self.get_contour(rescaled_image)

        if self.interactive:
            screenCnt = self.interactive_get_contour(screenCnt, rescaled_image)

        # apply the perspective transformation
        warped = transform.four_point_transform(orig, screenCnt * ratio)

        # convert the warped image to grayscale
        gray = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)

        # sharpen image
        sharpen = cv2.GaussianBlur(gray, (0,0), 3)
        sharpen = cv2.addWeighted(gray, 1.5, sharpen, -0.5, 0)

        # apply adaptive threshold to get black and white effect
        thresh1 = cv2.adaptiveThreshold(sharpen, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 21, 15)
        thresh2 = cv2.adaptiveThreshold(sharpen, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 41)

        # save the transformed image
        basename = os.path.basename(image_path)
        cv2.imwrite(OUTPUT_DIR + '/thresh1_' + basename, thresh1)
        cv2.imwrite(OUTPUT_DIR + '/gray_' + basename, gray)
        cv2.imwrite(OUTPUT_DIR + '/sharpen_' + basename, sharpen)
        cv2.imwrite(OUTPUT_DIR + '/warped_' + basename, warped)
        cv2.imwrite(OUTPUT_DIR + '/thresh2_' + basename, thresh2)
        print("Proccessed " + basename)

        return sharpen, thresh1
def main(base64String):
    # with open('data.txt', 'r') as file:
    #     data = file.read()
    image_orig = readb64(base64String)
    # cv2.imshow("A", image_orig)
    # cv2.waitKey(0)
    denoised_image = cv2.fastNlMeansDenoisingColored(image_orig, None, 10, 10,
                                                     7, 21)
    blank_image = np.zeros((image_orig.shape[0], image_orig.shape[1], 3))
    gray = cv2.cvtColor(image_orig, cv2.COLOR_BGR2GRAY)
    image = cv2.GaussianBlur(gray, (5, 5), 0)
    edged = auto_canny(image)
    screenCnt = []
    contours = cv2.findContours(edged, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    contours = imutils.grab_contours(contours)
    contours = sorted(contours, key=cv2.contourArea, reverse=True)[:5]
    # loop over the contours
    for c in contours:
        # approximate the contour
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.01 * peri, True)

        if len(approx) == 4:
            screenCnt = approx
            break
    cv2.drawContours(image_orig, contours, 0, (0, 255, 0), 2)
    # plt.figure()
    # plt.title("picture_path")
    # plt.imshow(image_orig)

    if (screenCnt.__len__() == 0):
        return (False, [])
    # apply the four point transform to obtain a top-down
    # view of the original image
    warped = four_point_transform(image_orig, screenCnt.reshape(4, 2))

    if (warped.shape[0] > warped.shape[1]):
        warped = cv2.rotate(warped, cv2.ROTATE_90_COUNTERCLOCKWISE)
    # show the original and scanned images
    # plt.figure()
    # plt.imshow(warped)
    # plt.title("picture_path")
    # plt.show()
    cv2.imwrite("images/number.jpeg", warped)
    # cv2.waitKey(0)
    returnPicture = warped
    return True, returnPicture
Example #26
0
    def scan(self, image_path):

        RESCALED_HEIGHT = 500.0
        OUTPUT_DIR = 'output'

        # load the image and compute the ratio of the old height
        # to the new height, clone it, and resize it
        image_path = "/home/user4/Downloads/DATASET/white_images/1.jpeg"
        image = cv2.imread(image_path)

        assert (image is not None)

        ratio = image.shape[0] / RESCALED_HEIGHT
        orig = image.copy()
        rescaled_image = imutils.resize(image, height=int(RESCALED_HEIGHT))

        # get the contour of the document
        screenCnt = self.get_contour(rescaled_image)

        # if self.interactive:
        #     screenCnt = self.interactive_get_contour(screenCnt, rescaled_image)
        screenCnt = self.interactive_get_contour(screenCnt, rescaled_image)

        # apply the perspective transformation
        warped = transform.four_point_transform(orig, screenCnt * ratio)
        warped = cv2.resize(warped, (2400, 1100))

        # # convert the warped image to grayscale
        # gray = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)

        # # sharpen image
        # sharpen = cv2.GaussianBlur(gray, (0,0), 3)
        # sharpen = cv2.addWeighted(gray, 1.5, sharpen, -0.5, 0)

        # # # apply adaptive threshold to get black and white effect
        # # thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 6)
        # thresh = cv2.adaptiveThreshold(sharpen, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 6)
        # #0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU

        # save the transformed image
        basename = os.path.basename(image_path)
        image21 = cv2.imwrite(OUTPUT_DIR + '/' + basename, warped)
        print("Proccessed " + basename)
Example #27
0
    def processImage(self):
        image = cv2.imread(self.imagePath)
        ratio = image.shape[0] / 500.0
        orig = image.copy()
        image = imutils.resize(image, height=500)
        print "STEP 1: Edge Detection"
        # convert the image to grayscale, blur it, and find edges
        # in the image
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (5, 5), 0)
        edged = cv2.Canny(gray, 75, 200)

        print "STEP 2: Find contours of paper"
        # find the contours in the edged image, keeping only the
        # largest ones, and initialize the screen contour
        (_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_LIST,
                                        cv2.CHAIN_APPROX_SIMPLE)
        cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]
        # loop over the contours
        for c in cnts:
            # approximate the contour
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.02 * peri, True)

            # if our approximated contour has four points, then we
            # can assume that we have found our screen
            if len(approx) == 4:
                screenCnt = approx
                break

        # apply the four point transform to obtain a top-down
        # view of the original image
        warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)

        # convert the warped image to grayscale, then threshold it
        # to give it that 'black and white' paper effect
        warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
        warped = threshold_adaptive(warped, 251, offset=10)
        warped = warped.astype("uint8") * 255

        cv2.imwrite(self.outputPath, warped)
        print "Finished"
        return self.outputPath
Example #28
0
def scan(image):
    screenCnt = None

    ratio = image.shape[0] / 500.0
    orig = image.copy()
    image = imutils.resize(image, height=500)

    # convert the image to grayscale, blur it, and find edges
    # in the image
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (9, 9), 0)

    # gray = threshold_adaptive(gray, 251, offset=5)
    # warped = warped.astype("uint8") * 255

    edged = cv2.Canny(gray, 75, 200)

    # find the contours in the edged image, keeping only the
    # largest ones, and initialize the screen contour
    (_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)  # opencv3
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]

    # loop over the contours
    for c in cnts:
        # approximate the contour
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)

        # if our approximated contour has four points, then we
        # can assume that we have found our screen
        if len(approx) == 4:
            screenCnt = approx
            break

    # apply the four point transform to obtain a top-down
    # view of the original image
    if screenCnt is None:
        return None
    else:
        warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)
        wraped_2 = cv2.resize(cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY), (1000, 1366))
        return wraped_2
def preprocess(image):
    image = cv2.imread(image)
    ratio = image.shape[0] / 500.0
    orig = image.copy()
    image = imutils.resize(image, height=500)

    # convert the image to grayscale, blur it, and find edges
    # in the image
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 0)
    edged = cv2.Canny(gray, 75, 200)

    # find the contours in the edged image, keeping only the
    # largest ones, and initialize the screen contour
    cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]

    # loop over the contours
    for c in cnts:
        # approximate the contour
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)

        # if our approximated contour has four points, then we
        # can assume that we have found our screen
        if len(approx) == 4:
            screenCnt = approx
            break

    # apply the four point transform to obtain a top-down
    # view of the original image
    warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)

    # convert the warped image to grayscale, then threshold it
    # to give it that 'black and white' paper effect
    warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
    T = threshold_local(warped, 11, offset=10, method="gaussian")
    warped = (warped > T).astype("uint8") * 255

    return warped
Example #30
0
def main():
    correctorOn = True
    oldSeg = False
    CNN_OCR = True

    plateImg = cv2.imread('./data/input/test5.png')
    height, width = plateImg.shape[:2]
    warped = four_point_transform(
        plateImg, np.array([(0, 0), (width, 0), (width, height), (0, height)]))

    if oldSeg:
        validChars = OldSegmentation.segment(plateImg)
    else:
        validChars = Segmentation.startSegment(plateImg)
    print(str(len(validChars)))

    plateText = OCR.readPlate(validChars, correctorOn, CNN_OCR)
    if plateText != 'ignore':
        print(plateText)
    else:
        print('Try Again!')
Example #31
0
    def scan(self, image_path):

        RESCALED_HEIGHT = 500.0
        OUTPUT_DIR = 'DocCollectorBot/scanned'

        # load the image and compute the ratio of the old height
        # to the new height, clone it, and resize it
        image = cv2.imread(image_path)

        assert (image is not None)

        ratio = image.shape[0] / RESCALED_HEIGHT
        orig = image.copy()
        rescaled_image = imutils.resize(image, height=int(RESCALED_HEIGHT))

        # get the contour of the document
        screenCnt = self.get_contour(rescaled_image)

        # apply the perspective transformation
        warped = transform.four_point_transform(orig, screenCnt * ratio)

        # convert the warped image to grayscale
        gray = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)

        # sharpen image
        sharpen = cv2.GaussianBlur(gray, (0, 0), 3)
        sharpen = cv2.addWeighted(gray, 1.5, sharpen, -0.5, 0)

        # apply adaptive threshold to get black and white effect
        thresh = cv2.adaptiveThreshold(sharpen, 255,
                                       cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                       cv2.THRESH_BINARY, 21, 15)

        # save the transformed image
        basename = os.path.basename(image_path)
        res = cv2.imwrite(OUTPUT_DIR + '/' + basename, thresh)
        # im = Image.fromarray(thresh)
        # im.save(OUTPUT_DIR + '/' + basename)
        # print("RES = ", res)
        print("Proccessed " + basename)
Example #32
0
def process_frame(image, image_height=1000):
    ratio = image.shape[0] / image_height
    orig = image.copy()
    image = imutils.resize(image, height=image_height)

    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (3, 3), 0)
    edged = cv2.Canny(gray, 75, 200)

    contours = cv2.findContours(edged.copy(), cv2.RETR_LIST,
                                cv2.CHAIN_APPROX_SIMPLE)
    contours = imutils.grab_contours(contours)
    # sort the contour features by area from largest to smallest
    contours = sorted(contours, key=cv2.contourArea, reverse=True)[:5]

    screen_cnt = None

    # loop over the contours and try to find the largest 4-pointed polygon
    for c in contours:
        # approximate the contour
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)

        # if our approximated contour has four points, then we
        # can assume that we have found our screen
        if len(approx) == 4:
            screen_cnt = approx
            break

    warped = np.random.rand(image.shape[0], image.shape[1])

    if screen_cnt is not None:
        cv2.drawContours(image, [screen_cnt], -1, (0, 255, 0), 2)
        warped = four_point_transform(orig, screen_cnt.reshape(4, 2) * ratio)
        # warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
        # threshold = threshold_local(warped, 11, offset=10, method="gaussian")
        # warped = np.array(warped > threshold).astype("uint8") * 255

    return edged, image, warped
  box = frame.copy()
  
  (cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
  cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:4]
  
  for c in cnts:
    peri = cv2.arcLength(c, True)
    approx = cv2.approxPolyDP(c, 0.02 * peri, True)
   
    if len(approx) == 4:
      try:
        oldarea = cv2.contourArea(approx)
        change = abs(cv2.contourArea(screenCnt) - oldarea)
        if change < oldarea/4:
          screenCnt = approx
          cv2.drawContours(box, [screenCnt], -1, (0, 255, 0), 2)
      except:
        screenCnt = approx
      break

  warped = four_point_transform(frame, screenCnt.reshape(4, 2))
  cv2.imshow("Original", imutils.resize(box, height = 350))
  cv2.imshow("Gray", imutils.resize(gray, height = 350))
  cv2.imshow("Scanned", cv2.resize(warped, (872, 800)))

  if cv2.waitKey(1) & 0xFF == ord('q'):
    break

cap.release()
cv2.destroyAllWindows()
if ratio > 1:
	orig = image.copy()
	image = imutils.resize(image, height = 500)

#preprocessing
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray,(5,5),2 )
kernel = np.ones((11,11),'uint8')
# dilated = cv2.dilate(gray,kernel, iterations = 2)
opening = cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel)
# closing = cv2.morphologyEx(dilated, cv2.MORPH_CLOSE, kernel)
ret,thresh = cv2.threshold(opening,127,255,0)
edges = cv2.Canny(opening, 150, 250, apertureSize=3)

_, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
contours = filter(lambda cont: cv2.arcLength(cont, False), contours)
# cv2.drawContours(image, contours, -1, (0, 255, 0), 2)
approximated_contours = []
for c in contours:
	area = cv2.contourArea(c)
	peri = cv2.arcLength(c, True)
	approx = cv2.approxPolyDP(c, 0.02 * peri, True).reshape(-1, 2)
	approximated_contours.append(approx)

approximated_contours = sorted(approximated_contours, key=cv2.contourArea,reverse=True)[:1]
warped = four_point_transform(image, approximated_contours[0].reshape(4, 2))
image = cv2.drawContours(image, approximated_contours, -1, (255, 0, 0), 2)

cv2.imshow("Original", image)
cv2.imshow("Warped", warped)
cv2.waitKey(0)
Example #35
0
def scanDoc():

#not sure how to grab the file when it gets posted, but it should get passed into cv2.imread("IMAGE goes here") I was going to try this:
    # dlImage = request.files['file']
    # print dlImage.content_type
    # print dlImage.filename
    # # print dlImage.read()
    # print "HERE"
    # img = dlImage
    # print img
    # img = cv2.imdecode(numpy.fromstring(request.files['file'].read(), numpy.uint8), cv2.CV_LOAD_IMAGE_UNCHANGED)
# load the image and compute the ratio of the old height
# to the new height, clone it, and resize it

    # img = jsonFile.read()
    # imgDaat = json.load(request.json)
    # print imgDaat["file"]
    # response = urllib2.Request(urlFinal)
    json = request.json['file']
    # print print open(json).read().decode('string-escape').decode("utf-8")
    # print type(json)
    # print json['files']
    # # img = json.loads(get_info())
    # print img
    # print request.files['file']
    # afterrequest
    image = cv2.imdecode(np.fromstring(request.json['file'], np.uint8), cv2.CV_LOAD_IMAGE_UNCHANGED)
    print 'CV2'
    print image
    ratio = image.shape[0] / 500.0
    orig = image.copy()
    image = imutils.resize(image, height = 500)

# convert the image to grayscale, blur it, and find edges
# in the image
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 0)
    edged = cv2.Canny(gray, 75, 200)

# show the original image and the edge detected image
    print "STEP 1: Edge Detection"

# find the contours in the edged image, keeping only the
# largest ones, and initialize the screen contour
    (cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5]

# loop over the contours
    for c in cnts:
    	# approximate the contour
    	peri = cv2.arcLength(c, True)
    	approx = cv2.approxPolyDP(c, 0.02 * peri, True)

    	# if our approximated contour has four points, then we
    	# can assume that we have found our screen
    	if len(approx) == 4:
    		screenCnt = approx
    		break

# show the contour (outline) of the piece of paper
    print "STEP 2: Find contours of paper"
    cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)

# apply the four point transform to obtain a top-down
# view of the original image
    warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)

# convert the warped image to grayscale, then threshold it
# to give it that 'black and white' paper effect
    warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
    warped = threshold_adaptive(warped, 250, offset = 10)
    warped = warped.astype("uint8") * 255
    scanImage = imutils.resize(warped, height = 650)
    print warped
    print scanImage

# show the original and scanned images
    print "STEP 3: Apply perspective transform"
    cv2.startWindowThread()
    cv2.namedWindow("preview")
    cv2.imshow("Original", imutils.resize(orig, height = 650))
    cv2.imshow("Scanned", scanImage)
    cv2.waitKey(0)


    return send_file(scanImage, mimetype='image/jpeg');
Example #36
0
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1.2, 400, np.array([]), 100, 5, 12, 16)
# ensure at least some circles were found
if circles is not None:
    # convert the (x, y) coordinates and radius of the circles to integers
    circles = np.round(circles[0, :]).astype("int")
        
        # loop over the (x, y) coordinates and radius of the circles
    for (x, y, r) in circles:
            # draw the circle in the output image, then draw a rectangle
            # corresponding to the center of the circle
        cv2.circle(output, (x, y), r, (0, 255, 0), 4)
        cv2.rectangle(output, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1)
    
        
    
cornercircles=[]

for ( x , y, r ) in circles :
    cornercircles.append([x,y])


pts = np.array(cornercircles, dtype = "float32")
print pts
# apply the four point tranform to obtain a "birds eye view" of
# the image
warped = four_point_transform(image, pts)

# show the original and warped images

cv2.imwrite("b1.jpg", warped)
image = mpimg.imread(args["image"])

im=plt.imread(args["image"])

#taking input from usser
ax = plt.gca()
fig = plt.gcf()
implot = ax.imshow(im)
coord=[]
def onclick(event):
    if event.xdata != None and event.ydata != None:
        coord.append((int(event.xdata), int(event.ydata)))
cid = fig.canvas.mpl_connect('button_press_event', onclick)
plt.show()
print(coord)
warped = four_point_transform(image, np.array(coord))

#taking input from usser
ax = plt.gca()
fig = plt.gcf()
implot = ax.imshow(im)
coor=[]

# show the original and warped images
plt.imshow(image),plt.show()
#cv2.imshow("Original", image)
plt.imshow(warped),plt.show()
#cv2.imshow("Warped", warped)
cv2.waitKey(0)
cv2.imwrite('Q1a.jpg', warped) 
cv2.destroyAllWindows()
Example #38
0
	approx = cv2.approxPolyDP(c, 0.02 * peri, True)

	# if our approximated contour has four points, then we
	# can assume that we have found our screen
	if len(approx) == 4:
		screenCnt = approx
		break

# show the contour (outline) of the piece of paper
print "STEP 2: Find contours of paper"
cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)
cv2.imshow("Outline", image)
cv2.waitKey(0)
cv2.destroyAllWindows()

# apply the four point transform to obtain a top-down
# view of the original image
warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)

# convert the warped image to grayscale, then threshold it
# to give it that 'black and white' paper effect
warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
warped = threshold_adaptive(warped, 250, offset = 10)
warped = warped.astype("uint8") * 255
print warped
# show the original and scanned images
print "STEP 3: Apply perspective transform"
cv2.imshow("Original", imutils.resize(orig, height = 650))
cv2.imshow("Scanned", imutils.resize(warped, height = 650))
cv2.waitKey(0)