Example #1
0
def find_box(img, draw=True):
    ratio = 1.5
    orig = img.copy()
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (3, 3), 0)
    gray = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                 cv2.THRESH_BINARY_INV, 61, 10)
    #gray = cv2.morphologyEx(gray, cv2.MORPH_OPEN, np.ones((3, 3)), iterations=1)
    #gray = cv2.morphologyEx(gray, cv2.MORPH_CLOSE, np.ones((9, 9)), iterations=1)
    #gray = cv2.morphologyEx(gray, cv2.MORPH_OPEN, np.ones((3, 3)), iterations=2)
    #_, gray = cv2.threshold(gray, 200, 255, cv2.THRESH_BINARY_INV)
    gray = cv2.dilate(gray, np.ones((5, 5), np.uint8), iterations=2)
    edged = cv2.Canny(gray, 75, 200)

    if draw:
        pass
        cv2.imshow("orig", imutils.resize(gray, height=500))
        cv2.imshow("equ", imutils.resize(edged, height=500))
        cv2.waitKey(0)

    _, cnts, _ = cv2.findContours(edged.copy(), cv2.RETR_LIST,
                                  cv2.CHAIN_APPROX_SIMPLE)

    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:15]

    possiable_pages = []
    approx_conts = []
    for c in cnts:
        epsilon = 0.1 * cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, epsilon, True)
        approx_conts.append(approx)
        if len(approx) == 4:
            possiable_pages.append(approx)

    possiable_pages = sorted(possiable_pages,
                             key=lambda c: cv2.boundingRect(c)[1],
                             reverse=False)
    out_page = []
    for i in range(len(possiable_pages) - 1):
        if cv2.boundingRect(possiable_pages[i + 1])[1] - cv2.boundingRect(
                possiable_pages[i])[1] > 15:
            out_page.append(possiable_pages[i])
    out_page.append(possiable_pages[-1])
    possiable_pages = out_page
    print "pages: {}, approx: {}".format(len(possiable_pages),
                                         len(approx_conts))
    if draw:
        cv2.drawContours(img, approx_conts, -1, (0, 255, 0), 2)
        cv2.drawContours(img, possiable_pages, -1, (0, 255, 0), 2)
        cv2.imshow("equ_conts", imutils.resize(img, height=500))
        cv2.waitKey(0)
        #cv2.imshow("cropped", ret[0])
        #cv2.imshow("cropped", imutils.resize(cropped, height=500))
        #plt.hist(orig.ravel(), 256, [0, 256])
        #plt.show()
        cv2.waitKey(0)
    ret = [crop(orig, x) for x in possiable_pages]
    ret2 = [x.tolist() for x in possiable_pages]
    return zip(ret, ret2)
def scan_image(image):
    # image = cv2.imread(img)
    ratio = image.shape[0] / 500.0
    orig = image.copy()
    image = imutils.resize(image, height=500)

    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 0)
    edged = cv2.Canny(gray, 75, 200)

    print "STEP 1: Edge Detection"
    # cv2.imshow("Image", image)
    # cv2.imshow("Edged", edged)

    # cv2.waitKey(0)
    # cv2.destroyAllWindows()

    _, cnts, hierarchy = cv2.findContours(edged, cv2.RETR_LIST,
                                          cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]

    for c in cnts:
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)
        print(len(approx))

        if len(approx) == 4:
            screenCnt = approx
            break

        else:
            return

    print "STEP 2: Find contours of paper"
    cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)
    # cv2.imshow("Outline", image)
    #cv2.waitKey(0)
    #cv2.destroyAllWindows()

    warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)

    warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
    warped = threshold_adaptive(warped, 251, offset=10)
    warped = warped.astype("uint8") * 255

    print "STEP 3: Apply perspective transform"
    # cv2.imshow("Original", imutils.resize(orig, height = 650))
    cv2.imshow("Scanned", imutils.resize(warped, height=650))

    cv2.waitKey(0)
    cv2.destroyAllWindows()
Example #3
0
def find_page(img, draw=True):
    ratio = img.shape[0] / 500.0
    orig = img.copy()
    img = imutils.resize(img, height=500)

    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    #gray = cv2.GaussianBlur(gray, (19, 19), 0)
    edged = cv2.Canny(gray, 75, 200)

    if draw:
        cv2.imshow("Img", edged)
        cv2.waitKey(0)

    _, cnts, _ = cv2.findContours(edged.copy(), cv2.RETR_LIST,
                                  cv2.CHAIN_APPROX_SIMPLE)

    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)

    possiable_pages = []
    approx_conts = []
    for c in cnts:
        epsilon = 0.1 * cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, epsilon, True)
        approx_conts.append(approx)
        if len(approx) == 4:
            possiable_pages.append(approx)
            break
    if draw:
        cv2.drawContours(img, cnts, -1, (0, 255, 0), 2)
        cv2.imshow("Img", img)
        cv2.waitKey(0)
    cv2.destroyAllWindows()

    return four_point_transform(orig, possiable_pages[0].reshape(4, 2) * ratio)
Example #4
0
    def thresh(self, image):
        ratio = image.shape[0] / 500.0
        orig = image.copy()
        warped = imutils.resize(image, height=500)
        warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
        warped = threshold_adaptive(warped, 251, offset=10)
        warped = warped.astype("uint8") * 255
        gray = cv2.GaussianBlur(warped, (5, 5), 0)
        # show the original and scanned images
        # print ("STEP 3: Apply perspective transform")
        print("hello")
        dst = cv2.fastNlMeansDenoising(warped, None, 10, 5, 21)
        im = Image.fromarray(imutils.resize(dst, height=650))
        # im.show()

        return (dst)
def get_images_and_labels(path):
    # Append all the absolute image paths in a list image_paths
    # We will not read the image with the .sad extension in the training set
    # Rather, we will use them to test our accuracy of the training
    image_paths = [os.path.join(path, f) for f in os.listdir(path) if not f.endswith('.sad')]
    # images will contains face images
    images = []
    # labels will contains the label that is assigned to the image
    labels = []
    for image_path in image_paths:
        # Read the image and convert to grayscale
        image_pil = cv2.imread(image_path)
        resized = imutils.resize(image_pil, width = 300)
        gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
        # Convert the image format into numpy array
        image = np.array(gray, 'uint8')
        # Get the label of the image
        nbr = int(os.path.split(image_path)[1].split(".")[0].replace("subject", ""))
        # Detect the face in the image
        faces = faceCascade.detectMultiScale(image)
        # If face is detected, append the face to images and the label to labels
        for (x, y, w, h) in faces:
            images.append(image[y: y + h, x: x + w])
            labels.append(nbr)
            cv2.imshow("Adding faces to traning set...", image[y: y + h, x: x + w])
            cv2.waitKey(50)
    # return the images list and labels list
    return images, labels
Example #6
0
def resize_image(image, desired_height):
    # load the image and compute the ratio of the old height
    # to the new height, clone it, and resize it
    ratio = image.shape[0] / desired_height
    orig = image.copy()
    image = imutils.resize(image, height=desired_height)
    return image, orig, ratio
Example #7
0
    def processImage(self):
        image = cv2.imread(self.imagePath)
        ratio = image.shape[0] / 500.0
        orig = image.copy()
        image = imutils.resize(image, height=500)
        print "STEP 1: Edge Detection"
        # convert the image to grayscale, blur it, and find edges
        # in the image
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(image, (5, 5), 0)
        edged = cv2.Canny(gray, 55, 200)

        # cv2.imshow("Image", image)
        # cv2.imshow("Edged", edged)
        # cv2.waitKey(0)
        # cv2.destroyAllWindows()

        print "STEP 2: Find contours of paper"
        # find the contours in the edged image, keeping only the
        # largest ones, and initialize the screen contour
        (_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_LIST,
                                        cv2.CHAIN_APPROX_SIMPLE)
        cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]
        # loop over the contours
        screenCnt = []
        for c in cnts:
            # approximate the contour
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.02 * peri, True)
            # if our approximated contour has four points, then we
            # can assume that we have found our screen
            if len(approx) == 4:
                screenCnt = approx
                break
        if len(screenCnt) != 4:
            raise ContourNotFoundError('not find contour')

        # cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)
        # cv2.imshow("Outline", image)
        # cv2.waitKey(0)
        # cv2.destroyAllWindows()

        # apply the four point transform to obtain a top-down
        # view of the original image
        warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)

        # convert the warped image to grayscale, then threshold it
        # to give it that 'black and white' paper effect
        # warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
        # warped = threshold_adaptive(warped, 251, offset=10)
        # warped = warped.astype("uint8") * 255
        height, width = warped.shape[:2]
        ratio1 = float(width) / float(height)
        ratio2 = float(height) / float(width)
        # if (ratio1 > 0.80 or ratio1 < 0.60) and (ratio2 > 0.80 or ratio2 < 0.60):
        #     raise NotA4Error('Cropped Image is not a A4 paper: height: ' + str(height) + ' width: ' + str(width))
        cv2.imwrite(self.outputPath, warped)
        print "Finished Transformation"
        return self.outputPath
Example #8
0
def readReceipt(number):
    # Read from specificed path.
    image = cv2.imread(receipts[number])
    # Resize as the images are hi-res.
    ratio = image.shape[0] / 500.0
    image = imutils.resize(image, height = 500)
    # Return image.
    return image
Example #9
0
def center_extent(image, size):
    (eW, eH) = size
    if image.shape[1] > image.shape[0]:
        image = imutils.resize(image, width=eW)
    else:
        image = imutils.resize(image, height=eH)
    extent = np.zeros((eH, eW), dtype='uint8')
    offsetX = (eW - image.shape[1]) // 2
    offsetY = (eH - image.shape[0]) // 2

    extent[offsetY:offsetY + image.shape[0],
           offsetX:offsetX + image.shape[1]] = image
    CM = mahotas.center_of_mass(extent)
    (cY, cX) = np.round(CM).astype('int32')
    (dX, dY) = ((size[0] // 2) - cX, (size[1] // 2) - cY)
    M = np.float32([[1, 0, dX], [0, 1, dY]])
    extent = cv2.warpAffine(extent, M, size)

    return extent
Example #10
0
    def scan(cls, filepath):
        print("Starting scan")
        # load the image and compute the ratio of the old height
        # to the new height, clone it, and resize it
        image = cv2.imread(filepath)
        ratio = image.shape[0] / 500.0
        orig = image.copy()
        image = imutils.resize(image, height=500)

        # convert the image to grayscale, blur it, and find edges
        # in the image
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (5, 5), 0)
        edged = cv2.Canny(gray, 75, 200)

        # find the contours in the edged image, keeping only the
        # largest ones, and initialize the screen contour
        cnts, hierarchy = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]
        screenCnt = None

        # loop over the contours
        for c in cnts:
            # approximate contours
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.02 * peri, True)

            # if our approximated contour has four points, then we
            # can assume that we have found our screen
            if len(approx) == 4:
                screenCnt = approx
                break

        # Check if we found a 4 point contour. If not, we create our own bounding box
        # with the largest contour
        if screenCnt is None:
            height, width, channels = image.shape
            imageBounds = np.array([[1, 1], [width, 1], [width, height], [1, height]])
            screenCnt = imutils.get_bounding_box(imageBounds)

        # apply the four point transform to obtain a top-down
        # view of the original image
        warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)

        # convert the warped image to grayscale, then threshold it
        # to give it that 'black and white' paper effect
        warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
        warped = threshold_adaptive(warped, 250, offset=10)
        warped = warped.astype("uint8") * 255

        # Write out image to tmp file
        filename = "tmp/tmp-result.png"
        cv2.imwrite(filename, warped)
        print("Finished scan")
        return filename
Example #11
0
    def run(self):

        # if a video path was not supplied, grab the reference
        # to the gray
        if not args.get("video", False):
            camera = cv2.VideoCapture(0)

        # otherwise, load the video
        else:
            camera = cv2.VideoCapture(args["video"])

        # hand training data
        hand_cascade = cv2.CascadeClassifier('hand_1.xml')

        # keep looping over the frames in the video
        while (camera.isOpened()):
            # grab the current frame
            (grabbed, frame) = camera.read()
            frame = cv2.flip(frame, 1)

            # if we are viewing a video and we did not grab a
            # frame, then we have reached the end of the video
            if args.get("video") and not grabbed:
                break

            # resize the frame, convert it to the HSV color space,
            # and determine the HSV pixel intensities that fall into
            frame = imutils.resize(frame, width=600)

            skin = self.skindetection(frame)

            frameGray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            skinGray = cv2.cvtColor(skin, cv2.COLOR_BGR2GRAY)

            #  hand harrcascade part ----------------------------
            hand = hand_cascade.detectMultiScale(skinGray, 1.3, 5)
            for (x, y, w, h) in hand:
                cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 4)

# ---------------------------------------------------------

            ret, skinGray = cv2.threshold(skinGray, 120, 255,
                                          cv2.THRESH_BINARY_INV)

            # show the skin in the image along with the mask
            cv2.imshow("images", np.hstack([frame, skin]))
            cv2.imshow('gray', skinGray)

            # if the 'q' key is pressed, stop the loop
            if cv2.waitKey(1) & 0xFF == ord("q"):
                break

        # cleanup the camera and close any open windows
        camera.release()
        cv2.destroyAllWindows()
Example #12
0
def deskew(image, width):
    (h, w) = image.shape[:2]
    moments = cv2.moments(image)

    skew = moments['mu11'] / moments['mu02']
    M = np.float32([[1, skew, -0.5 * w * skew], [0, 1, 0]])
    image = cv2.warpAffine(image,
                           M, (w, h),
                           flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)
    image = imutils.resize(image, width=width)
    return image
def draw_circle(event, x, y, flags, param):
    global mouseX, mouseY, color
    if event == cv2.EVENT_LBUTTONDBLCLK:
        mouseX, mouseY = x, y
        image = cv2.imread(dir_initials + "initial_" + letters[l] + "-1.png")
        initial = imutils.resize(image, width=400, height=300)
        cv2.imwrite("frame.png", initial)
        color = initial[y, x]
        print("My color is " + format(color))
    if event == cv2.EVENT_RBUTTONDBLCLK:
        mouseX, mouseY = x, y
        image = cv2.imread("frame.png")
        image = imutils.resize(image, width=400)
        color_clicked = image[y, x]
        print("Color clicked is " + format(color_clicked))
        if color_clicked[2] < (color[2] + 40) and color_clicked[2] > (
                color[2] - 40
        ) and color_clicked[1] < (color[1] + 50) and color_clicked[1] > (
                color[1] - 50) and color_clicked[0] < (
                    color[0] + 50) and color_clicked[0] > (color[0] - 50):
            print("Skin detected")
Example #14
0
def get_image():
    ap = argparse.ArgumentParser()
    ap.add_argument("-i",
                    "--image",
                    required=True,
                    help="Path to the image to be scanned")
    args = vars(ap.parse_args())
    image = cv2.imread(args["image"])
    ratio = image.shape[0] / 500.0
    orig_img = image.copy()
    # TODO: Check image orientation and rotate if neccessary with game_map = np.rot90(game_map, k=3)
    resized_img = imutils.resize(image, height=500)
    return (orig_img, resized_img, ratio)
Example #15
0
def getSkinColor(pathToImage, outpath_onlySkin, outpath_Kmean, outpath_AGC):
    # define the upper and lower boundaries of the HSV pixel
    # intensities to be considered 'skin'
    # lower = np.array([0, 48, 80], dtype = "uint8")
    lower = np.array([0, 5, 40], dtype="uint8")
    upper = np.array([55, 255, 255], dtype = "uint8")

    # get the image from the file path
    frame = cv2.imread(pathToImage)

    # resize the frame, convert it to the HSV color space,
    # and determine the HSV pixel intensities that fall into
    # the speicifed upper and lower boundaries
    frame = imutils.resize(frame, width = 400)
    converted = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    skinMask = cv2.inRange(converted, lower, upper)

    # apply a series of erosions and dilations to the mask
    # using an elliptical kernel
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
    skinMask = cv2.erode(skinMask, kernel, iterations = 2)
    skinMask = cv2.dilate(skinMask, kernel, iterations = 2)

    # blur the mask to help remove noise, then apply the
    # mask to the frame
    skinMask = cv2.GaussianBlur(skinMask, (3, 3), 0)
    skin = cv2.bitwise_and(frame, frame, mask = skinMask)

    # show the skin in the image along with the mask
    cv2.imwrite(outpath_onlySkin, skin)

    # do the action money shot!
    quant = cluster_and_categorize(skin)

    # save outputs of k-mean clustering
    cv2.imwrite(outpath_Kmean, quant)

    # get solid 150x150 image of the detected skin color
    k_color = np.zeros((150, 150, 3), np.uint8)
    k_color[:] = get_skin_color_from_hist(quant)
    cv2.imwrite(outpath_AGC, k_color)

    skin_type = categorize_skin_color(k_color[0:1,0:1])

    print pathToImage + "\n---------------"
    print "Your skin color is: "
    print k_color[0,0]
    print "Your skin type is: "
    print skin_type
    print ""
Example #16
0
def detectFace(camera, fd):
    (grabbed, frame) = camera.read()
    frame = imutils.resize(frame, width = 300)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    faceRects = fd.detect(gray, scaleFactor = 1.1, minNeighbors = 5,
		minSize = (30, 30))
    # frameClone = frame.copy()
    #
    # for (fX, fY, fW, fH) in faceRects:
    #     cv2.rectangle(frameClone, (fX, fY), (fX + fW, fY + fH), (0, 255, 0), 2)
    #
    # cv2.imshow("Face", frameClone)

    return len(faceRects)
Example #17
0
    def scan(self, image_path):

        RESCALED_HEIGHT = 500.0

        OUTPUT_DIR = os.path.join(os.getcwd(), "output")

        # load the image and compute the ratio of the old height
        # to the new height, clone it, and resize it
        image = cv2.imread(image_path)

        assert (image is not None)

        ratio = image.shape[0] / RESCALED_HEIGHT
        orig = image.copy()
        rescaled_image = imutils.resize(image, height=int(RESCALED_HEIGHT))

        # get the contour of the document
        screenCnt = self.get_contour(rescaled_image)

        if self.interactive:
            screenCnt = self.interactive_get_contour(screenCnt, rescaled_image)

        # apply the perspective transformation
        warped = transform.four_point_transform(orig, screenCnt * ratio)

        # convert the warped image to grayscale
        gray = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)

        # sharpen image
        sharpen = cv2.GaussianBlur(gray, (0, 0), 3)
        sharpen = cv2.addWeighted(gray, 1.5, sharpen, -0.5, 0)

        # apply adaptive threshold to get black and white effect
        thresh = cv2.adaptiveThreshold(sharpen, 255,
                                       cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                       cv2.THRESH_BINARY, 21, 15)

        # save the transformed image
        basename = os.path.basename(image_path)

        if os.path.exists(OUTPUT_DIR):
            cv2.imwrite(OUTPUT_DIR + '/' + basename, thresh)
            print("Proccessed " + basename)
        else:
            os.mkdir(OUTPUT_DIR)
            cv2.imwrite(OUTPUT_DIR + '/' + basename, thresh)
            print("Proccessed " + basename)
Example #18
0
def detectFace(camera, fd):
    (grabbed, frame) = camera.read()
    frame = imutils.resize(frame, width=300)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    faceRects = fd.detect(gray,
                          scaleFactor=1.1,
                          minNeighbors=5,
                          minSize=(30, 30))
    # frameClone = frame.copy()
    #
    # for (fX, fY, fW, fH) in faceRects:
    #     cv2.rectangle(frameClone, (fX, fY), (fX + fW, fY + fH), (0, 255, 0), 2)
    #
    # cv2.imshow("Face", frameClone)

    return len(faceRects)
Example #19
0
    def processImage(self):
        image = cv2.imread(self.imagePath)
        ratio = image.shape[0] / 500.0
        orig = image.copy()
        image = imutils.resize(image, height=500)
        print "STEP 1: Edge Detection"
        # convert the image to grayscale, blur it, and find edges
        # in the image
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (5, 5), 0)
        edged = cv2.Canny(gray, 75, 200)

        print "STEP 2: Find contours of paper"
        # find the contours in the edged image, keeping only the
        # largest ones, and initialize the screen contour
        (_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_LIST,
                                        cv2.CHAIN_APPROX_SIMPLE)
        cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]
        # loop over the contours
        for c in cnts:
            # approximate the contour
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.02 * peri, True)

            # if our approximated contour has four points, then we
            # can assume that we have found our screen
            if len(approx) == 4:
                screenCnt = approx
                break

        # apply the four point transform to obtain a top-down
        # view of the original image
        warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)

        # convert the warped image to grayscale, then threshold it
        # to give it that 'black and white' paper effect
        warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
        warped = threshold_adaptive(warped, 251, offset=10)
        warped = warped.astype("uint8") * 255

        cv2.imwrite(self.outputPath, warped)
        print "Finished"
        return self.outputPath
Example #20
0
    def scan(self, image_path):

        RESCALED_HEIGHT = 500.0
        OUTPUT_DIR = 'output'

        # load the image and compute the ratio of the old height
        # to the new height, clone it, and resize it
        image_path = "/home/user4/Downloads/DATASET/white_images/1.jpeg"
        image = cv2.imread(image_path)

        assert (image is not None)

        ratio = image.shape[0] / RESCALED_HEIGHT
        orig = image.copy()
        rescaled_image = imutils.resize(image, height=int(RESCALED_HEIGHT))

        # get the contour of the document
        screenCnt = self.get_contour(rescaled_image)

        # if self.interactive:
        #     screenCnt = self.interactive_get_contour(screenCnt, rescaled_image)
        screenCnt = self.interactive_get_contour(screenCnt, rescaled_image)

        # apply the perspective transformation
        warped = transform.four_point_transform(orig, screenCnt * ratio)
        warped = cv2.resize(warped, (2400, 1100))

        # # convert the warped image to grayscale
        # gray = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)

        # # sharpen image
        # sharpen = cv2.GaussianBlur(gray, (0,0), 3)
        # sharpen = cv2.addWeighted(gray, 1.5, sharpen, -0.5, 0)

        # # # apply adaptive threshold to get black and white effect
        # # thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 6)
        # thresh = cv2.adaptiveThreshold(sharpen, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 6)
        # #0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU

        # save the transformed image
        basename = os.path.basename(image_path)
        image21 = cv2.imwrite(OUTPUT_DIR + '/' + basename, warped)
        print("Proccessed " + basename)
Example #21
0
def has_hand(image, image_path="result.JPG"):

    # define the upper and lower boundaries of the HSV pixel
    # intensities to be considered 'skin'
    lower = np.array([0, 48, 80], dtype="uint8")
    upper = np.array([20, 255, 255], dtype="uint8")

    # resize the frame, convert it to the HSV color space,
    # and determine the HSV pixel intensities that fall into
    # the speicifed upper and lower boundaries
    frame = imutils.resize(image, width=400)
    converted = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
    skinMask = cv2.inRange(converted, lower, upper)

    # apply a series of erosions and dilations to the mask
    # using an elliptical kernel
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))
    skinMask = cv2.erode(skinMask, kernel, iterations=2)
    skinMask = cv2.dilate(skinMask, kernel, iterations=2)

    # blur the mask to help remove noise, then apply the
    # mask to the frame
    skinMask = cv2.GaussianBlur(skinMask, (3, 3), 0)
    skin = cv2.bitwise_and(frame, frame, mask=skinMask)

    # show the skin in the image along with the mask
    cv2.imwrite(image_path[:-4] + "_2.JPG", np.hstack([frame, skin]))
    count = 0
    for array in skinMask:
        for elm in array:
            if elm == 255:
                # print("has skin")
                count += 1

    print(count)

    if count > 1500:
        print("has skin")
        return True

    else:
        return False
Example #22
0
def scan(image):
    screenCnt = None

    ratio = image.shape[0] / 500.0
    orig = image.copy()
    image = imutils.resize(image, height=500)

    # convert the image to grayscale, blur it, and find edges
    # in the image
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (9, 9), 0)

    # gray = threshold_adaptive(gray, 251, offset=5)
    # warped = warped.astype("uint8") * 255

    edged = cv2.Canny(gray, 75, 200)

    # find the contours in the edged image, keeping only the
    # largest ones, and initialize the screen contour
    (_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)  # opencv3
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]

    # loop over the contours
    for c in cnts:
        # approximate the contour
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)

        # if our approximated contour has four points, then we
        # can assume that we have found our screen
        if len(approx) == 4:
            screenCnt = approx
            break

    # apply the four point transform to obtain a top-down
    # view of the original image
    if screenCnt is None:
        return None
    else:
        warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)
        wraped_2 = cv2.resize(cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY), (1000, 1366))
        return wraped_2
Example #23
0
    def show_camera(self):
        # capture frames from the camera
        for f in self.camera.capture_continuous(self.rawCapture,
                                                format="bgr",
                                                use_video_port=True):
            # grab the raw NumPy array representing the image
            self.frame = f.array

            # resize the frame and convert it to grayscale
            self.frame = imutils.resize(self.frame, width=300)
            self.gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)

            # detect faces in the image and then clone the frame
            # so that we can draw on it
            self.faceRects = self.fd.detect(self.gray,
                                            scaleFactor=1.1,
                                            minNeighbors=5,
                                            minSize=(30, 30))
            self.frameClone = self.frame.copy()

            # loop over the face bounding boxes and draw them
            for (fX, fY, fW, fH) in self.faceRects:
                cv2.rectangle(self.frameClone, (fX, fY), (fX + fW, fY + fH),
                              (0, 255, 0), 2)

            # Desactivo el laser si detecto una cara
            if len(self.faceRects) > 0:
                GPIO.output(laser, 0)
            else:
                GPIO.output(laser, 1)

            # show our detected faces, then clear the frame in
            # preparation for the next frame
            #cv2.imshow("Face", self.frameClone)
            self.frame2 = self.rot180(numpy.rot90(self.frameClone))
            self.frame2 = pygame.surfarray.make_surface(self.frame2)
            screen.blit(self.frame2, (400, 10))

            pygame.display.update()
            #pygame.display.flip()

            self.rawCapture.truncate(0)
Example #24
0
    def show_camera(self):
        # capture frames from the camera
        for f in self.camera.capture_continuous(self.rawCapture,
                                                format="bgr",
                                                use_video_port=True):
            # grab the raw NumPy array representing the image
            self.frame = f.array

            # resize the frame and convert it to grayscale
            self.frame = self.rot180(imutils.resize(self.frame, width=300))
            self.gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)

            # detect faces in the image and then clone the frame
            # so that we can draw on it
            self.faceRects = self.fd.detect(self.gray,
                                            scaleFactor=1.1,
                                            minNeighbors=5,
                                            minSize=(30, 30))
            self.frameClone = self.frame.copy()

            # loop over the face bounding boxes and draw them
            for (fX, fY, fW, fH) in self.faceRects:
                cv2.rectangle(self.frameClone, (fX, fY), (fX + fW, fY + fH),
                              (0, 255, 0), 2)

            # Desactivo el laser si detecto una cara
            if len(self.faceRects) > 0:
                GPIO.output(laser, 0)
            else:
                GPIO.output(laser, 1)

            # show our detected faces, then clear the frame in
            # preparation for the next frame
            cv2.imshow("Robopot", self.frameClone)

            self.rawCapture.truncate(0)

            # if the 'q' key is pressed, stop the loop
            if cv2.waitKey(1) & 0xFF == ord("q"):
                break
def detect_skin(frame, img_width):
    # Source: http://www.pyimagesearch.com/2014/08/18/skin-detection-step-step-example-using-python-opencv/
    # resize the frame, convert it to the HSV color space,
    # and determine the HSV pixel intensities that fall into
    # the speicifed upper and lower boundaries
    frame = imutils.resize(frame, width=img_width)
    converted = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    skinMask = cv2.inRange(converted, lower, upper)

    # apply a series of erosions and dilations to the mask
    # using an elliptical kernel
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
    skinMask = cv2.erode(skinMask, kernel, iterations=2)
    skinMask = cv2.dilate(skinMask, kernel, iterations=2)

    # blur the mask to help remove noise, then apply the
    # mask to the frame
    skinMask = cv2.GaussianBlur(skinMask, (3, 3), 0)
    skin = cv2.bitwise_and(frame, frame, mask=skinMask)

    # Return the frame
    return skin
Example #26
0
def getSkinColor(pathToImage, outpath_onlySkin, outpath_acg):
    # define the upper and lower boundaries of the HSV pixel
    # intensities to be considered 'skin'
    #lower = np.array([0, 48, 80], dtype = "uint8")
    lower = np.array([0, 5, 75], dtype = "uint8")
    upper = np.array([20, 255, 255], dtype = "uint8")

    frame = cv2.imread(pathToImage)

    # resize the frame, convert it to the HSV color space,
    # and determine the HSV pixel intensities that fall into
    # the speicifed upper and lower boundaries
    frame = imutils.resize(frame, width = 400)
    converted = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    skinMask = cv2.inRange(converted, lower, upper)

    # apply a series of erosions and dilations to the mask
    # using an elliptical kernel
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
    skinMask = cv2.erode(skinMask, kernel, iterations = 2)
    skinMask = cv2.dilate(skinMask, kernel, iterations = 2)

    # blur the mask to help remove noise, then apply the
    # mask to the frame
    skinMask = cv2.GaussianBlur(skinMask, (3, 3), 0)
    skin = cv2.bitwise_and(frame, frame, mask = skinMask)

    # show the skin in the image along with the mask
    cv2.imwrite(outpath_onlySkin, skin)
    #average_color = computeAvg(skin, outpath_acg)
    #print average_color
    #average_color = findNearestBucket([(40, 1.2, 98.04),(70,2,99),(49.09,34.38,87.84),(37.5,46.64,87.45),(13.91,68.32,39.61),(272.73,25.58,16.86)], average_color)
    #average_color = bucketsavg(skin, outpath_acg, [(250,249,247),(251,252,246),(224,210,147),(223,184,119),(101,48,32),(38,32,43)])
    #average_color = bucketsavg(skin, outpath_acg, [(40, 1.2, 98.04),(70,2,99),(49.09,34.38,87.84),(37.5,46.64,87.45),(13.91,68.32,39.61),(272.73,25.58,16.86)])
    #average_color = bucketsavg(skin, outpath_acg, [(280, 3, 245), (40,3,236), (40,3,250),(54,23,253),(41,23,253),(43,25,254),(5,11,250),(21,14,243),(32,10,244),(67,8,252),(44,15,252),(43,29,254),(48,30,255),(48,30,255),(46,48,241),(48,70,239),(49,88,224),(49,66,242),(43,82,235),(49,111,235),(45,139,227),(43,135,225),(42,114,223),(37,118,222),(38,127,199),(35,122,188),(])
    
#print "The skin color in HSV scale is: "
    #print average_color
    return average_color
Example #27
0
def transform_file(file):
    image = cv2.imdecode(
        np.fromstring(file.read(), np.uint8),
        cv2.IMREAD_UNCHANGED)  # make img from request to np array
    ratio = image.shape[0] / 500.0
    orig = image.copy()
    image = imutils.resize(image, height=500)

    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 0)
    edged = cv2.Canny(gray, 75, 200)

    print("STEP 1: Edge Detection")
    (_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_LIST,
                                    cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]
    # loop over the contours
    for c in cnts:
        # approximate the contour
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)

        # if our approximated contour has four points, then we
        # can assume that we have found our screen
        if len(approx) == 4:
            screenCnt = approx
            break

    print("STEP 2: Find contours of paper")
    warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)
    # convert the warped image to grayscale, then threshold it
    # to give it that 'black and white' paper effect
    warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
    warped = threshold_adaptive(warped, 251, offset=10)
    warped = warped.astype("uint8") * 255
    # would be nicer to leave the writing to another function? not sure
    cv2.imwrite(os.path.join(app.config['UPLOAD_FOLDER'], file.filename),
                warped)
Example #28
0
import sys, cv2
from pyimagesearch import imutils

# The program accepts one command line parameter, specifying the file to read.
img = cv2.imread(sys.argv[1])

ratio = img.shape[0] / 500.0
orig = img.copy()
img = imutils.resize(img, height = 500)

## CROPPING AND DESKEWING
#gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

#blurred = cv2.GaussianBlur(gray, (9,9), 0)

edges = cv2.Canny(img, 0, 150, apertureSize=3)

cv2.imshow("Image", edges)
cv2.waitKey(3000)
cv2.destroyAllWindows()

morphKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 1))
edges = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, morphKernel)

cv2.imshow("Image", edges)
cv2.waitKey(3000)
cv2.destroyAllWindows()

morphKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 1))
edges = cv2.morphologyEx(edges, cv2.MORPH_OPEN, morphKernel)
	camera = cv2.VideoCapture(args["video"])

# keep looping over the frames in the video
while True:
	# grab the current frame
	(grabbed, frame) = camera.read()	#grabbed = boolean for sucessful reading, frame = image frame
 
	# if we are viewing a video and we did not grab a
	# frame, then we have reached the end of the video
	if args.get("video") and not grabbed:
		break
 
	# resize the frame, convert it to the HSV color space,
	# and determine the HSV pixel intensities that fall into
	# the speicifed upper and lower boundaries
	frame = imutils.resize(frame, width = 400)
	converted = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)	#Convert to HSV
	skinMask = cv2.inRange(converted, lower, upper)		#Check if within boundaries
 
	# apply a series of erosions and dilations to the mask
	# using an elliptical kernel to remove small false-positive skin regions
	kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
	skinMask = cv2.erode(skinMask, kernel, iterations = 2)
	skinMask = cv2.dilate(skinMask, kernel, iterations = 2)
 
	# blur the mask to help remove noise, then apply the
	# mask to the frame
	skinMask = cv2.GaussianBlur(skinMask, (3, 3), 0)
	#Apply mask
	skin = cv2.bitwise_and(frame, frame, mask = skinMask)
 
import numpy as np
import argparse
import pyimagesearch.imutils as imutils #pyimagesearch #.imutils
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--puzzle", required = True, help = "Path to the puzzle image")
ap.add_argument("-w", "--waldo", required = True, help = "Path to the waldo image")
args = vars(ap.parse_args())
# load the puzzle and waldo images
puzzle = cv2.imread(args["puzzle"])
waldo = cv2.imread(args["waldo"])
(waldoHeight, waldoWidth) = waldo.shape[:2]
# find the waldo in the puzzle
result = cv2.matchTemplate(puzzle, waldo, cv2.TM_CCOEFF)
(_, _, minLoc, maxLoc) = cv2.minMaxLoc(result)
# the puzzle image
topLeft = maxLoc
botRight = (topLeft[0] + waldoWidth, topLeft[1] + waldoHeight)
roi = puzzle[topLeft[1]:botRight[1], topLeft[0]:botRight[0]]
# construct a darkened transparent 'layer' to darken everything
# in the puzzle except for waldo
mask = np.zeros(puzzle.shape, dtype = "uint8")
puzzle = cv2.addWeighted(puzzle, 0.25, mask, 0.75, 0)
# put the original waldo back in the image so that he is
# 'brighter' than the rest of the image
puzzle[topLeft[1]:botRight[1], topLeft[0]:botRight[0]] = roi
# display the images
cv2.imshow("Puzzle", imutils.resize(puzzle, height = 650))
cv2.imshow("Waldo", waldo)
cv2.waitKey(0)
Example #31
0
lower = np.array([0, 48, 80], dtype="uint8")
upper = np.array([20, 255, 255], dtype="uint8")

cap = cv2.VideoCapture(0)

while True:
    # grab the current frame
    grabbed, frame = cap.read()

    # if we are viewing a video and we did not grab a
    # frame, then we have reached the end of the video

    # resize the frame, convert it to the HSV color space,
    # and determine the HSV pixel intensities that fall into
    # the speicifed upper and lower boundaries
    frame = imutils.resize(frame, width=400)
    converted = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    skinMask = cv2.inRange(converted, lower, upper)

    # apply a series of erosions and dilations to the mask
    # using an elliptical kernel
    # kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
    # skinMask = cv2.erode(skinMask, kernel, iterations = 2)
    # skinMask = cv2.dilate(skinMask, kernel, iterations = 2)

    # blur the mask to help remove noise, then apply the
    # mask to the frame
    skinMask = cv2.GaussianBlur(skinMask, (3, 3), 0)
    skin = cv2.bitwise_and(frame, frame, mask=skinMask)

    # show the skin in the image along with the mask
Example #32
0
#python scan.py --image unnamed.jpg

# import the necessary packages
from pyimagesearch.transform import four_point_transform
from pyimagesearch import imutils
from skimage.filters import threshold_adaptive
import numpy as np
#import argparse
import cv2

# load the image and compute the ratio of the old height
# to the new height, clone it, and resize it
image = cv2.imread("unnamed4.jpg")
ratio = image.shape[0] / 500.0
orig = image.copy()
image = imutils.resize(image, height=500)

# convert the image to grayscale, blur it, and find edges
# in the image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(gray, 75, 200)

# show the original image and the edge detected image
print "STEP 1: Edge Detection"
cv2.imshow("Image", image)
cv2.imshow("Edged", edged)
cv2.waitKey(0)
cv2.destroyAllWindows()

# find the contours in the edged image, keeping only the
Example #33
0
    def run(self):

        # if a video path was not supplied, grab the reference
        # to the gray
        if not args.get("video", False):
            camera = cv2.VideoCapture(0)

        # otherwise, load the video
        else:
            camera = cv2.VideoCapture(args["video"])

        # hand training data
        #hand_cascade = cv2.CascadeClassifier('hand_1.xml')

        # keep looping over the frames in the video
        while (camera.isOpened()):
            # grab the current frame
            (grabbed, frame) = camera.read()
            frame = cv2.flip(frame, 1)

            # if we are viewing a video and we did not grab a
            # frame, then we have reached the end of the video
            if args.get("video") and not grabbed:
                break

            # resize the frame, convert it to the HSV color space,
            # and determine the HSV pixel intensities that fall into
            frame = imutils.resize(frame, width=900)

            # define our roi region
            x1, x2 = 10, 210
            y1, y2 = 600, 800
            roi_img = frame[x1:x2, y1:y2]

            # extract skin color from the background
            skin = self.extraction(roi_img, self.skinLower, self.skinUpper)

            frameGray = cv2.cvtColor(roi_img, cv2.COLOR_BGR2GRAY)
            skinGray = cv2.cvtColor(skin, cv2.COLOR_BGR2GRAY)

            #  hand harrcascade part ----------------------------
            '''
			hand = hand_cascade.detectMultiScale(skinGray, 1.3, 5)
			for (x,y,w,h) in hand:
				cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),4)
			'''
            # ---------------------------------------------------------

            ret, skinGray = cv2.threshold(skinGray, 120, 255,
                                          cv2.THRESH_BINARY_INV)

            # find contour
            image, contours, hierarchy = cv2.findContours(skinGray.copy(), \
             cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

            # find the dominant hand contour
            max_area = -1
            for i in range(len(contours)):
                cnt = contours[i]
                area = cv2.contourArea(cnt)
                if (area > max_area):
                    max_area = area
                    ci = i
            cnt = contours[ci]

            # draw contonur
            hull = cv2.convexHull(cnt)
            drawing = np.zeros(roi_img.shape, np.uint8)
            cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 2)
            cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 2)
            hull = cv2.convexHull(cnt, returnPoints=False)

            # check if defect angle smaller than 90 degree
            defects = cv2.convexityDefects(cnt, hull)
            count_defects = 0

            if defects != None:
                for i in range(defects.shape[0]):
                    s, e, f, d = defects[i, 0]
                    start = tuple(cnt[s][0])
                    end = tuple(cnt[e][0])
                    far = tuple(cnt[f][0])
                    a = math.sqrt((end[0] - start[0])**2 +
                                  (end[1] - start[1])**2)
                    b = math.sqrt((far[0] - start[0])**2 +
                                  (far[1] - start[1])**2)
                    c = math.sqrt((end[0] - far[0])**2 + (end[1] - far[1])**2)
                    angle = math.acos((b**2 + c**2 - a**2) / (2 * b * c)) * 57
                    if angle <= 90:
                        count_defects += 1
                        cv2.circle(roi_img, far, 1, [0, 0, 255], 3)

                cv2.line(roi_img, start, end, [0, 255, 0], 3)
            print count_defects

            # draw the ROI region
            cv2.rectangle(frame, (y2, x2), (y1, x1), (0, 255, 0), 3)

            # show the skin in the image along with the mask
            cv2.imshow("images", np.hstack([roi_img, skin]))
            # show camera image
            cv2.imshow('camera', frame)
            #cv2.imshow('gray', skinGray)
            cv2.imshow('drawing', drawing)

            # if the 'q' key is pressed, stop the loop
            if cv2.waitKey(1) & 0xFF == ord("q"):
                break

        # cleanup the camera and close any open windows
        camera.release()
        cv2.destroyAllWindows()
Example #34
0
from skimage import exposure
import numpy as np
import argparse
import cv2

# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-q", "--query", required=True, help="Path to the query image")
args = vars(ap.parse_args())

# load the query image, compute the ratio of the old height
# to the new height, clone it, and resize it
image = cv2.imread(args["query"])
ratio = image.shape[0] / 300.0
orig = image.copy()
image = imutils.resize(image, height=300)

# convert the image to grayscale, blur it, and find edges
# in the image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.bilateralFilter(gray, 11, 17, 17)
edged = cv2.Canny(gray, 30, 200)

# find contours in the edged image, keep only the largest
# ones, and initialize our screen contour
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_TREE,
                             cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:10]
screenCnt = None

# loop over our contours
Example #35
0
ap.add_argument("-q", "--query", required=True, help="Path to the query image")
args = vars(ap.parse_args())

# load the index
index = open(args["index"], 'rb').read()
index = pickle.loads(index)

#Unpickling a python 2 object with python 3
#change ""index = pickle.loads(index)""
#to     ""index = pickle.loads(index, encoding='latin1')""

# load the query image, convert it to grayscale, and
# resize it
image = cv2.imread(args["query"])
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image = imutils.resize(image, width=64)

# threshold the image
thresh = cv2.adaptiveThreshold(image, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                               cv2.THRESH_BINARY_INV, 11, 7)

# initialize the outline image, find the outermost
# contours (the outline) of the pokemon, then draw
# it
outline = np.zeros(image.shape, dtype="uint8")
(_, cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
# remember! 3 output
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[0]
cv2.drawContours(outline, [cnts], -1, 255, -1)
Example #36
0
    def init_image(self, image):
        ratio = image.shape[0] / 600.0
        orig = image.copy()
        image = imutils.resize(image, height = 600)

        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        gray = cv2.bilateralFilter(gray, 11, 17, 17)
        edged = cv2.Canny(gray, 30, 200)

        #cv2.imshow("Edged", edged)

        _, cnts, _ = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:10]
        screenCnt = None

        for c in cnts:
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.02 * peri, True)

            if len(approx) == 4:
                screenCnt = approx
                break

        if screenCnt != None:
            cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 3)
            #cv2.imshow("Edged", image)

            pts = screenCnt.reshape(4, 2)
            rect = np.zeros((4, 2), dtype = "float32")

            s = pts.sum(axis = 1)
            rect[0] = pts[np.argmin(s)]
            rect[2] = pts[np.argmax(s)]

            diff = np.diff(pts, axis = 1)
            rect[1] = pts[np.argmin(diff)]
            rect[3] = pts[np.argmax(diff)]

            rect *= ratio

            (tl, tr, br, bl) = rect
            widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
            widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))

            heightA = np.sqrt(((tr[0] - [br[0]]) ** 2) + ((tr[1] - bl[1]) ** 2))
            heightB = np.sqrt(((tl[0] - [bl[0]]) ** 2) + ((tl[1] - bl[1]) ** 2))

            maxWidth = max(int(widthA), int(widthB))
            maxHeight = max(int(heightA), int(heightB))

            dst = np.array([
                [0, 0],
                [maxWidth - 1, 0],
                [maxWidth - 1, maxHeight - 1],
                [0, maxHeight - 1]
            ], dtype = "float32")


            if self.check_screen(maxWidth, maxHeight):
                #print maxWidth, maxHeight

                M = cv2.getPerspectiveTransform(rect, dst)
                warp = cv2.warpPerspective(orig, M, (maxWidth, maxHeight))

                #cv2.imshow("Warp", warp)

                gray = cv2.cvtColor(warp, cv2.COLOR_BGR2GRAY)
                gray = cv2.bilateralFilter(gray, 17, 17, 17)

                athresholded = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
                ret, thresholded = cv2.threshold(gray,60, 255, cv2.THRESH_BINARY)
                #cv2.namedWindow("Grayscale")
                #cv2.setMouseCallback("Grayscale", self.onMouseClick)

                self.athrimg = athresholded.copy()
                self.thrimg = thresholded.copy()
                self.display = athresholded.copy()
                self.display = cv2.cvtColor(self.display, cv2.COLOR_GRAY2BGR)
                #cv2.imshow("Grayscale", self.athrimg)
                # pause
                #self.wait = 0
                #cv2.imshow("THRES", self.thrimg)

                # wait
                #cv2.waitKey(0)
                return True
        return False
Example #37
0
# construct the argument parser and parse the arguments
# ap = argparse.ArgumentParser()
# ap.add_argument("-i", "--image", required = True,
# 	help = "Path to the image to be scanned")
# args = vars(ap.parse_args())

args= {}
args["image"]= 'images/receipt.jpg'

# load the image and compute the ratio of the old height
# to the new height, clone it, and resize it
image = cv2.imread(args["image"])
ratio = image.shape[0] / 500.0
orig = image.copy()
image = imutils.resize(image, height = 500) # This reshapes in both dimensions with the same factor so that we get height = 500

# convert the image to grayscale, blur it, and find edges
# in the image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(gray, 75, 200)

# show the original image and the edge detected image
print "STEP 1: Edge Detection"
cv2.imshow("Image", image)
cv2.imshow("Edged", edged)
cv2.waitKey(0)
cv2.destroyAllWindows()

# find the contours in the edged image, keeping only the
Example #38
0
# load the puzzle and waldo images
puzzle = cv2.imread(args["puzzle"])
waldo = cv2.imread(args["waldo"])
(waldoHeight, waldoWidth) = waldo.shape[:2]

	
# find the waldo in the puzzle
result = cv2.matchTemplate(puzzle, waldo, cv2.TM_CCOEFF)
(_, _, minLoc, maxLoc) = cv2.minMaxLoc(result)

#grab the bounding box of waldo and extract him from
# the puzzle image
topLeft = maxLoc
botRight = (topLeft[0] + waldoWidth, topLeft[1] + waldoHeight)
roi = puzzle[topLeft[1]:botRight[1], topLeft[0]:botRight[0]]
 
# construct a darkened transparent 'layer' to darken everything
# in the puzzle except for waldo
mask = np.zeros(puzzle.shape, dtype = "uint8")
puzzle = cv2.addWeighted(puzzle, 0.25, mask, 0.75, 0)
	
# put the original waldo back in the image so that he is
# 'brighter' than the rest of the image
puzzle[topLeft[1]:botRight[1], topLeft[0]:botRight[0]] = roi
 
# display the images
cv2.imshow("Puzzle", imutils.resize(puzzle, height = 650))
cv2.imshow("Waldo", waldo)
cv2.waitKey(0)
Example #39
0
        roi = gray[y:y + h, x:x + w]
        thresh = roi.copy()
        T = mahotas.thresholding.otsu(roi)
        thresh[thresh > T] = 255
        thresh = cv2.bitwise_not(thresh)

        # deskew the image center its extent
        thresh = dataset.deskew(thresh, 20)
        thresh = dataset.center_extent(thresh, (20, 20))

        big_thresh = cv2.resize(thresh,(int(thresh.shape[1]*5),int(thresh.shape[0]*5)))
        print(str(thresh.shape[1])+" "+str(thresh.shape[0]))
        cv2.imshow("thresh2", big_thresh)
        cv2.imshow("thresh", thresh)

        thresh_tf = imutils.resize(thresh, height = 28, width = 28)
        my_image = np.array([thresh_tf])
        
        prediction=tf.argmax(y_conv,1)
        
        my_image = np.array(thresh_tf.reshape(1, 784))
        my_image = my_image / 255
        my_image = my_image.astype('Float32')
        
        digit = prediction.eval(feed_dict={x_: my_image,keep_prob: 1.0}, session=sess)[0]
        
        # extract features from the image and classify it
        print("I think that number is: {}".format(digit))

        # draw a rectangle around the digit, the show what the
        # digit was classified as
Example #40
0
# construct the face detector and allow the camera to warm
# up
fd = FaceDetector(args["face"])
time.sleep(0.1)

checkOK = False

		
# capture frames from the camera
for f in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
	# grab the raw NumPy array representing the image
	frame = f.array

	# resize the frame and convert it to grayscale
	frame = imutils.resize(frame, width = 300)
	gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
	color = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)


	# detect faces in the image and then clone the frame
	# so that we can draw on it
	faceRects = fd.detect(gray, scaleFactor = 1.1, minNeighbors = 5,
		minSize = (30, 30))
	frameClone = frame.copy()

	
	# loop over the face bounding boxes and draw them
	fX=0
	fY=0
	fW=0
Example #41
0
'''
hist,bins = np.histogram(im.flatten(),256,[0,256])
plt_one = plt.figure(1)
cdf = hist.cumsum()
cdf_normalized = cdf * hist.max()/ cdf.max()

cdf_m = np.ma.masked_equal(cdf,0)
cdf_m = (cdf_m - cdf_m.min())*255/(cdf_m.max()-cdf_m.min())
cdf = np.ma.filled(cdf_m,0).astype('uint8')
im = cdf[im]
'''

###
#im = np.fliplr(im)

im = imutils.resize(im, height=500)

imgray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)

# Contour detection
#ret,thresh = cv2.threshold(imgray,127,255,0)

imgray = cv2.GaussianBlur(imgray, (5, 5), 0)



imgray = cv2.adaptiveThreshold(imgray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
            cv2.THRESH_BINARY,11,2)

thresh = imgray
    ret, frame = cap.read()
    cv2.imwrite(dir_initials + "initial_" + letters[l] + "-1.png", frame)

    for i in range(1, 21):
        cap.set(1, i)
        ret, frame = cap.read()
        cv2.imwrite(
            dir_frames + "initial_" + letters[l] + "-1-" + format(k) + ".png",
            frame)
        k += 1

for l in range(31):
    print("Select skin color initial frame of letter " + letters[l])
    image_initial = cv2.imread(dir_initials + "initial_" + letters[l] +
                               "-1.png")
    initial = imutils.resize(image_initial, width=400, height=225)
    cv2.namedWindow('images')
    cv2.setMouseCallback('images', draw_circle)
    cv2.imshow("images", initial)
    brP = cv2.waitKey(0) & 0xff
    if brP == 32:  # SpaceBar key to stop
        cv2.destroyAllWindows()

    print("Separate hand from each frame of letter " + letters[l])
    for i in range(1, 21):
        a = cv2.imread(dir_frames + "initial_" + letters[l] + "-1-" +
                       format(i) + ".png")
        # a = cv2.imread("C:\Users\Kosara\Documents\DIPLOMA THESIS\handgesture-imageprocessing-master/dataset/frames skin renamed/"
        #     + letters[l] + "-" + str(folder) + "-" + format(i) + ".png")
        initial = imutils.resize(a, width=400, height=300)
        blank_image = np.zeros((300, 400, 3), np.uint8)
Example #43
0
def getSkinColor(pathToImage, outpath_onlySkin, outpath_Kmean, outpath_AGC):
    # define the upper and lower boundaries of the HSV pixel
    # intensities to be considered 'skin'
    lower_a = np.array([0, 5, 40], dtype="uint8")
    upper_a = np.array([25, 255, 255], dtype = "uint8")

    lower_b = np.array([160, 10, 230], dtype="uint8")
    upper_b = np.array([179, 30, 250], dtype="uint8")

    lower_c = np.array([0, 70, 70], dtype="uint8")
    upper_c = np.array([3, 85, 98], dtype="uint8")

    # get the image from the file path
    frame = cv2.imread(pathToImage)

    # resize the frame, convert it to the HSV color space,
    # and determine the HSV pixel intensities that fall into
    # the speicifed upper and lower boundaries
    frame = imutils.resize(frame, width = 300, height=300)
    frame = cv2.medianBlur(frame, 9)
    converted = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    skinMask_a = cv2.inRange(converted, lower_a, upper_a)
    skinMask_b = cv2.inRange(converted, lower_b, upper_b)
    skinMask_c = cv2.inRange(converted, lower_c, upper_c)

    # combine the masks (edge cases handled here)
    skinMask = skinMask_a + skinMask_b + skinMask_c

    # apply a series of erosions and dilations to the mask
    # using an elliptical kernel
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
    skinMask = cv2.erode(skinMask, kernel, iterations = 2)
    skinMask = cv2.dilate(skinMask, kernel, iterations = 2)

    # blur the mask to help remove noise, then apply the
    # mask to the frame
    skinMask = cv2.GaussianBlur(skinMask, (3, 3), 0)
    skin = cv2.bitwise_and(frame, frame, mask = skinMask)

    # show the skin in the image along with the mask
    cv2.imwrite(outpath_onlySkin, skin)

    # do the action money shot!
    # quant = cluster_and_mark(skin)
    quant = cluster_and_categorize(skin)

    # save outputs of k-mean clustering
    cv2.imwrite(outpath_Kmean, quant)

    # get solid 150x150 image of the detected skin color
    k_color = np.zeros((200, 200, 3), np.uint8)
    k_color[:] = get_skin_color_from_hist(quant)

    skin_type = categorize_skin_color(k_color[50:51,50:51])

    print "Image - ", pathToImage, "\n---------------"
    print "Your skin color is: ", k_color[50,50]
    print "Your skin type is: ", skin_type, "\n"

    cv2.putText(k_color, '#{}'.format(skin_type), (10, 60), cv2.FONT_HERSHEY_COMPLEX_SMALL, 3, (255,255,255),4)
    cv2.imwrite(outpath_AGC, k_color)

    return (k_color[50,50], skin_type)
for item in datas:
    #if item[0] == 255 and item[1] == 255 and item[2] == 255:
    #if item[0] > 75 and item[0] < 225:  # Colors in background
    if item[0] < 75 or item[0] > 225:  # Colors in foreground objects
        #newData.append((255, 255, 255, 0))
        #newData.append((0, 0, 0, 0)) # Make the colors black
        newData.append((0,0)) # Make the colors black
    else:
        newData.append(item)
print("--- %s seconds ---" % (time() - start_time))

img.putdata(newData)
img.save("img2.png", "PNG")

img = cv2.imread('img2.png')
img = imutils.resize(img, height = 700)
cv2.imshow('result', img)
cv2.waitKey(0)

#----------------------------------------------------------#

img = Image.open('img2.png')
img = img.convert("RGBA")
datas = img.getdata()

newData = []
for item in datas:
    if item[0] == 0 and item[1] == 0 and item[2] == 0:
        newData.append((0, 0, 255, 0))
    else:
        newData.append(item)
else:
    camera = cv2.VideoCapture(args["video"])

(grabbed, frame) = camera.read()
if args.get("video") and not grabbed:
    exit(0)

focal_len = focal_len if focal_len is not None else dist.cfg_cam()

print "------------------------------------------------BEGIN------------------------------------------------"

# Video Size Diagnostics
# (grabbed, frame) = camera.read()
# print frame.shape

frame = imutils.resize(frame, width=800)

# Init Video
# vid.vid_init(frame.shape[1], frame.shape[0])

roiY_old = roiY
roiX_old = roiX
roiHeight_old = roiHeight
roiWidth_old = roiWidth

roiX = 0
roiY = frame.shape[0] / 2
roiWidth = frame.shape[1]
roiHeight = frame.shape[0]
print frame.shape
Example #46
0
    # if we are viewing a video and we did not grab a
    # frame, then we have reached the end of the video
    if args.get("video") and not grabbed:
        break

    #Reset Surface Area: #DO I NEED THAT FOR MAC?

    areaNumber = 0

    # grab the raw NumPy array representing the image - ONLY FOR PI?
    # frame = f.array

    # resize the frame and convert it to grayscale
    frame = cv2.flip(frame, 1)
    frameorig = frame
    frame = imutils.resize(frame, width=resizeTo)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    grayorig = cv2.cvtColor(frameorig, cv2.COLOR_BGR2GRAY)
    # gray = cv2.GaussianBlur(gray, (21, 21), 0)

    # detect faces in the image and then clone the frame
    # so that we can draw on it
    faceRects = fd.detect(gray,
                          scaleFactor=1.11,
                          minNeighbors=5,
                          minSize=(minValue, minValue))
    frameClone = cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)  #
    # frameClone = frame.copy() # (the original line)
    # frameClone = gray
    overlay = np.zeros((screenHeight, screenWidth, 3),
                       np.uint8)  # EXP overlay maybe for rects on fullres?
Example #47
0
        wordarr = line.split()
        for index in xrange(len(wordarr)/2, len(wordarr)):
            if isprice(wordarr[index]):
                retlist.append(line)
    return retlist


filenamearr = ["Sobeys.jpg", "UWBOOK.jpg", "Walmart.jpeg"]

for filename in filenamearr:

    image = cv2.imread("/Users/Jack/Desktop/TestData/" + filename)
    ratio = image.shape[0] / 500.0
    orig = image.copy()
    o_height = image.shape[0]
    image = imutils.resize(image, height = 500)

    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 0)
    edged = cv2.Canny(gray, 75, 200)

    im2, cnts, hierarchy = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(cnts, key=cv2.contourArea, reverse = True)[:5]

    for c in cnts:
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)

        if len(approx) == 4:
            screenCnt = approx
            break
Example #48
0

# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required = True,
	help = "Path to the image to be scanned")
args = vars(ap.parse_args())

# load the image and compute the ratio of the old height
# to the new height, clone it, and resize it
print args['image']
image = cv2.imread(args["image"])
print image
ratio = image.shape[0] / 500.0
orig = image.copy()
image = imutils.resize(image, height = 500)

# convert the image to grayscale, blur it, and find edges
# in the image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(gray, 75, 200)

# show the original image and the edge detected image
print "STEP 1: Edge Detection"
cv2.imshow("Image", image)
cv2.imshow("Edged", edged)
cv2.waitKey(0)
cv2.destroyAllWindows()

# find the contours in the edged image, keeping only the
Example #49
0
    return group_morse, morse_groups, morse_groups_cent, morse_grp_cnts


def get_morse_text(morse_groups, morse_groups_cent):
    morse_text = []
    for i in range(len(morse_groups)):
        morse_text.append(translate_morse_to_letters(find_morse_message(morse_groups[i], morse_groups_cent[i])))
    return morse_text


if __name__ == '__main__':
    image, true_orig = scan_image()
    orig = image.copy()
    grey_image = crop_and_clear_image(image)
    cv2.imshow("After cropping", imutils.resize(grey_image, height=650))
    cv2.waitKey(0)
    morse_cent, morse_cnts = find_centroids(grey_image)

    group_morse = grey_image.copy()
    group_morse, morse_groups, morse_groups_cent, morse_grp_cnts = find_morse_contours(group_morse)
    cv2.imshow("Morse code grouped", imutils.resize(group_morse, height=650))
    cv2.waitKey(0)
    morse_text = get_morse_text(morse_groups, morse_groups_cent)
    for i in range(len(morse_groups)):
        center = morse_groups_cent[i][0]
        cv2.putText(orig, morse_text[i], (center[0], center[1]), cv2.FONT_HERSHEY_COMPLEX, 4, (0, 0, 255), 10)

    cv2.imshow("Original", imutils.resize(true_orig, height=650))
    cv2.imshow("Morse code scanned", imutils.resize(orig, height=650))
    cv2.waitKey(0)
Example #50
0
def scanDoc():

#not sure how to grab the file when it gets posted, but it should get passed into cv2.imread("IMAGE goes here") I was going to try this:
    # dlImage = request.files['file']
    # print dlImage.content_type
    # print dlImage.filename
    # # print dlImage.read()
    # print "HERE"
    # img = dlImage
    # print img
    # img = cv2.imdecode(numpy.fromstring(request.files['file'].read(), numpy.uint8), cv2.CV_LOAD_IMAGE_UNCHANGED)
# load the image and compute the ratio of the old height
# to the new height, clone it, and resize it

    # img = jsonFile.read()
    # imgDaat = json.load(request.json)
    # print imgDaat["file"]
    # response = urllib2.Request(urlFinal)
    json = request.json['file']
    # print print open(json).read().decode('string-escape').decode("utf-8")
    # print type(json)
    # print json['files']
    # # img = json.loads(get_info())
    # print img
    # print request.files['file']
    # afterrequest
    image = cv2.imdecode(np.fromstring(request.json['file'], np.uint8), cv2.CV_LOAD_IMAGE_UNCHANGED)
    print 'CV2'
    print image
    ratio = image.shape[0] / 500.0
    orig = image.copy()
    image = imutils.resize(image, height = 500)

# convert the image to grayscale, blur it, and find edges
# in the image
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 0)
    edged = cv2.Canny(gray, 75, 200)

# show the original image and the edge detected image
    print "STEP 1: Edge Detection"

# find the contours in the edged image, keeping only the
# largest ones, and initialize the screen contour
    (cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5]

# loop over the contours
    for c in cnts:
    	# approximate the contour
    	peri = cv2.arcLength(c, True)
    	approx = cv2.approxPolyDP(c, 0.02 * peri, True)

    	# if our approximated contour has four points, then we
    	# can assume that we have found our screen
    	if len(approx) == 4:
    		screenCnt = approx
    		break

# show the contour (outline) of the piece of paper
    print "STEP 2: Find contours of paper"
    cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)

# apply the four point transform to obtain a top-down
# view of the original image
    warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)

# convert the warped image to grayscale, then threshold it
# to give it that 'black and white' paper effect
    warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
    warped = threshold_adaptive(warped, 250, offset = 10)
    warped = warped.astype("uint8") * 255
    scanImage = imutils.resize(warped, height = 650)
    print warped
    print scanImage

# show the original and scanned images
    print "STEP 3: Apply perspective transform"
    cv2.startWindowThread()
    cv2.namedWindow("preview")
    cv2.imshow("Original", imutils.resize(orig, height = 650))
    cv2.imshow("Scanned", scanImage)
    cv2.waitKey(0)


    return send_file(scanImage, mimetype='image/jpeg');
Example #51
0
def readReceipt(path):
    image = cv2.imread(path)
    if (ap.parse_args().resize):
        image = imutils.resize(image, height = 500)
    return image
Example #52
0
# im = cv2.imread('images/anthony-1.jpg')
# im = cv2.imread('images/car_two.jpg')
# im = cv2.imread('images/object_group_1.jpg')
im = cv2.imread("images/beach_trash_3.jpg")
# im = cv2.imread('images/circles1.png')
# im = cv2.imread('images/waterbottle_0.jpg')

# cv2.imshow('Original', im)

# Histogram equalization to improve contrast


###
# im = np.fliplr(im)

im = imutils.resize(im, height=400)

imgray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)

# Contour detection
# ret,thresh = cv2.threshold(imgray,127,255,0)

# imgray = cv2.GaussianBlur(imgray, (5, 5), 200)
imgray = cv2.medianBlur(imgray, 11)

cv2.imshow("Blurred", imgray)

"""
hist,bins = np.histogram(imgray.flatten(),256,[0,256])
plt_one = plt.figure(1)
cdf = hist.cumsum()
Example #53
0
from matplotlib import pyplot as plt

# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required = True,
	help = "Path to the image to be scanned")
args = vars(ap.parse_args())

# Read in image.
image = cv2.imread(args["image"])
# Resize as the images are hi-res.
ratio = image.shape[0] / 500.0
# Keep a copy of original.
orig = image.copy()
# Resize image.
image = imutils.resize(image, height = 500)

image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY )


ret,thresh1 = cv2.threshold(image,100,255,cv2.THRESH_BINARY)
ret,thresh2 = cv2.threshold(image,100,255,cv2.THRESH_BINARY_INV)
ret,thresh3 = cv2.threshold(image,100,255,cv2.THRESH_TRUNC)
ret,thresh4 = cv2.threshold(image,100,255,cv2.THRESH_TOZERO)
ret,thresh5 = cv2.threshold(image,100,255,cv2.THRESH_TOZERO_INV)

thresh = ['image','thresh1','thresh2','thresh3','thresh4','thresh5']

for i in xrange(6):
    plt.subplot(2,3,i+1),plt.imshow(eval(thresh[i]),'gray')
    plt.title(thresh[i])
Example #54
0
from skimage.filter import threshold_adaptive
import argparse
import cv2


# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path to the image")
args = vars(ap.parse_args())

# load the image and compute the ratio of the old height
# to the new height, clone it, and resize it
image = cv2.imread(args["image"])
ratio = image.shape[0] / 500.0
orig = image.copy()
image = imutils.resize(image, height=500)

# convert the image to grayscale, blur it, and find edges
# in the image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(gray, 75, 200)

# show the original image and the edge detected image
print "STEP 1: Edge Detection"

# find the contours in the edged image, keeping only the
# largest ones, and initialize the screen contour
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]
Example #55
0
		faceRects = fd.detect(gray, scaleFactor = 1.3, minNeighbors = 5,minSize = (100, 100))
		frameClone = frame.copy()
		face = []
		# loop over the face bounding boxes and draw them
		for (fX, fY, fW, fH) in faceRects:
			try:
				#find centroid of the rectangle
				cx = fX + int(fW/2)
				cy = fY + int(fH/2)
				fX -= int(fX/10*offset_constant)
				fY -= int(fY/10*offset_constant)
				fW += int(int(fX/10)*offset_constant) * 2

				face = frameClone[fY:fY+int((fW*proportional_w)/proportional_h),fX:fX+fW]
				#resize
				face_resized = imutils.resize(face, width = larghezza_foto)
				blank_image = face_resized
			except:
				pass
		#merge
		(h, w) = blank_image.shape[:2]
		image_patente[65:int(65+h), 19:int(19+w)] = blank_image
		#show
		cv2.imshow("LIVE", image_patente)

		# if the '1' key is pressed, stop the loop
		c = cv2.waitKey(1)
		if '1' == chr(c & 255):
			cv2.destroyWindow("LIVE")
			break
  box = frame.copy()
  
  (cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
  cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:4]
  
  for c in cnts:
    peri = cv2.arcLength(c, True)
    approx = cv2.approxPolyDP(c, 0.02 * peri, True)
   
    if len(approx) == 4:
      try:
        oldarea = cv2.contourArea(approx)
        change = abs(cv2.contourArea(screenCnt) - oldarea)
        if change < oldarea/4:
          screenCnt = approx
          cv2.drawContours(box, [screenCnt], -1, (0, 255, 0), 2)
      except:
        screenCnt = approx
      break

  warped = four_point_transform(frame, screenCnt.reshape(4, 2))
  cv2.imshow("Original", imutils.resize(box, height = 350))
  cv2.imshow("Gray", imutils.resize(gray, height = 350))
  cv2.imshow("Scanned", cv2.resize(warped, (872, 800)))

  if cv2.waitKey(1) & 0xFF == ord('q'):
    break

cap.release()
cv2.destroyAllWindows()