Example #1
0
    def detect_edge(self, image, enabled_transform=False):
        dst = None
        orig = image.copy()

        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        blurred = cv2.GaussianBlur(gray, (5, 5), 0)
        edged = cv2.Canny(blurred, 0, 20)
        _, contours, _ = cv2.findContours(edged, cv2.RETR_LIST,
                                          cv2.CHAIN_APPROX_NONE)

        contours = sorted(contours, key=cv2.contourArea, reverse=True)

        for cnt in contours:
            epsilon = 0.051 * cv2.arcLength(cnt, True)
            approx = cv2.approxPolyDP(cnt, epsilon, True)

            if len(approx) == 4:
                target = approx
                cv2.drawContours(image, [target], -1, (0, 255, 0), 2)

                if enabled_transform:
                    approx = rect.rectify(target)
                    dst = self.four_point_transform(orig, approx)
                break

        return image, dst
Example #2
0
def Transform(list):
    for img in list:
        image = cv2.imread('Captured/{}'.format(img))
        image = cv2.resize(image, (3000, 2000))
        edged = cv2.imread('Edged/{}'.format(img), 0)

        # find the contours in the edged image, keeping only the
        # largest ones, and initialize the screen contour
        cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if imutils.is_cv2() else cnts[1]
        cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]

        # loop over the contours
        for c in cnts:
            # approximate the contour
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.02 * peri, True)

            # if our approximated contour has four points, then we
            # can assume that we have found our screen
            if len(approx) == 4:
                screenCnt = approx
                break

        # mapping target points to 800x800 quadrilateral
        approx = rect.rectify(screenCnt)
        pts2 = np.float32([[0, 0], [800, 0], [800, 800], [0, 800]])

        M = cv2.getPerspectiveTransform(approx, pts2)
        dst = cv2.warpPerspective(image, M, (800, 800))

        cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)
        dst = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)
        cv2.imwrite('Transformed/{}'.format(img), dst)
Example #3
0
    def detect_edge(self, image, kernel_size=5, transform_image=False):
        dst = None
        orig = image.copy()

        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        blurred = cv2.GaussianBlur(gray, (kernel_size, kernel_size), 0)
        edged = cv2.Canny(blurred, 0, 20)
        # show the original image and the edge detected image
        print("Edge Detection")
        cv2.imshow("Image", image)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
        cv2.imshow("Edged", edged)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

        _, contours, _ = cv2.findContours(edged, cv2.RETR_LIST,
                                          cv2.CHAIN_APPROX_NONE)

        contours = sorted(contours, key=cv2.contourArea, reverse=True)

        for cnt in contours:
            epsilon = 0.051 * cv2.arcLength(cnt, True)
            approx = cv2.approxPolyDP(cnt, epsilon, True)

            if len(approx) == 4:
                target = approx
                cv2.drawContours(image, [target], -1, (0, 255, 0), 2)

                if transform_image:
                    approx = rectify(target)
                    dst = self.four_point_transform(orig, approx)
                break

        return image, dst
def main(id):
    #print("in scanner")
    srcdir = "./FromPhone"
    trgdir = "./DST"
    b = np.load("formLogs.npy")
    images = sorted(os.listdir(srcdir))
    cnt = 1
    for im in images:
        #image
        # resize image so it can be processed
        # choose optimal dimensions such that important content is not losmx, t
        image = cv2.imread(os.path.join(srcdir, im))
        image = cv2.resize(image, (1500, 880))

        # creating copy of original image
        orig = image.copy()

        # convert to grayscale and blur to smooth
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        blurred = cv2.GaussianBlur(gray, (5, 5), 0)
        #blurred = cv2.medianBlur(gray, 5)

        # apply Canny Edge Detection
        edged = cv2.Canny(blurred, 0, 50)
        orig_edged = edged.copy()

        # find the contours in the edged image, keeping only the
        # largest ones, and initialize the screen contour
        derp, contours, _ = cv2.findContours(edged, cv2.RETR_LIST,
                                             cv2.CHAIN_APPROX_NONE)
        contours = sorted(contours, key=cv2.contourArea, reverse=True)

        #x,y,w,h = cv2.boundingRect(contours[0])
        #cv2.rectangle(image,(x,y),(x+w,y+h),(0,0,255),0)

        # get approximate contour
        for c in contours:
            p = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.02 * p, True)

            if len(approx) == 4:
                target = approx
                break

        # mapping target points to 800x800 quadrilateral
        approx = rect.rectify(target)
        pts2 = np.float32([[0, 0], [800, 0], [800, 800], [0, 800]])

        M = cv2.getPerspectiveTransform(approx, pts2)
        dst = cv2.warpPerspective(orig, M, (800, 800))

        cv2.drawContours(image, [target], -1, (0, 255, 0), 2)
        dst = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)
        cv2.imwrite(os.path.join("./ForGUI", "gui" + str(cnt) + ".jpg"), dst)
        dst = dst[50:-5, 200:-5]
        cv2.imwrite(os.path.join(trgdir, "dst" + str(cnt) + ".jpg"), dst)
        cnt += 1
Example #5
0
    def start_scanning(self):
        ''' Main function, which scans the image. '''

        # add image here.
        # We can also use laptop's webcam if the resolution is good enough to capture
        # readable document content
        image = cv2.imread(self.image)
        # resize image so it can be processed
        # choose optimal dimensions such that important content is not lost
        image = cv2.resize(image, (1500, 880))
        # creating copy of original image
        orig = image.copy()
        # convert to grayscale and blur to smooth
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        blurred = cv2.GaussianBlur(gray, (5, 5), 0)
        #blurred = cv2.medianBlur(gray, 5)
        # apply Canny Edge Detection
        edged = cv2.Canny(blurred, 0, 50)
        orig_edged = edged.copy()
        # find the contours in the edged image, keeping only the
        # largest ones, and initialize the screen contour
        (_, contours, _) = cv2.findContours(edged, cv2.RETR_LIST,
                                            cv2.CHAIN_APPROX_NONE)
        contours = sorted(contours, key=cv2.contourArea, reverse=True)
        #x,y,w,h = cv2.boundingRect(contours[0])
        #cv2.rectangle(image,(x,y),(x+w,y+h),(0,0,255),0)
        # get approximate contour
        for c in contours:
            p = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.02 * p, True)

            if len(approx) == 4:
                target = approx
                break
        # mapping target points to 800x800 quadrilateral
        approx = rect.rectify(target)
        pts2 = np.float32([[0, 0], [800, 0], [800, 800], [0, 800]])
        M = cv2.getPerspectiveTransform(approx, pts2)
        dst = cv2.warpPerspective(orig, M, (800, 800))
        cv2.drawContours(image, [target], -1, (0, 255, 0), 2)
        dst = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)
        # using thresholding on warped image to get scanned effect (If Required)
        ret, th1 = cv2.threshold(dst, 127, 255, cv2.THRESH_BINARY)
        th2 = cv2.adaptiveThreshold(dst, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                    cv2.THRESH_BINARY, 11, 2)
        th3 = cv2.adaptiveThreshold(dst, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                    cv2.THRESH_BINARY, 11, 2)
        ret2, th4 = cv2.threshold(dst, 0, 255,
                                  cv2.THRESH_BINARY + cv2.THRESH_OTSU)

        self.all = [
            orig, gray, blurred, orig_edged, image, th1, th2, th3, th4, dst
        ]
        self.scanned = th1
Example #6
0
    def compute_L1_loss(self, predict_points, label_points, threshold = 0.):
        # label = rect.rectify(label_points)
        label = label_points
        predicted_label = rect.rectify(predict_points)

        diff = label - predicted_label

        square_diff = np.abs(diff)
        dists = square_diff.sum(axis=1, dtype=np.int32)
        avg_loss = np.sum(dists)/4

        score = 1 if avg_loss > threshold else 0
        return avg_loss, score
Example #7
0
    def extract_page(self):
        # We only need the largest contour
        contour = self.find_contours()[0]
        p = cv2.arcLength(contour, True)
        target = cv2.approxPolyDP(contour, 0.1 * p, True)
        approx = rect.rectify(target)

        pts2 = np.float32(
            [[0, 0], [self.WIDTH, 0], [self.WIDTH, self.HEIGHT], [0, self.HEIGHT]])
        M = cv2.getPerspectiveTransform(approx, pts2)
        self.im_page = cv2.warpPerspective(self.im_orig, M, (self.WIDTH, self.HEIGHT))

        # crop the page
        self.im_page = self.im_page[int(60/2):int(2735/2),
                                    int(65/2):int(1935/2)]

        # Black and white
        self.im_page_bw = cv2.cvtColor(self.im_page, cv2.COLOR_BGR2GRAY)
Example #8
0
contours = sorted(contours, key=cv2.contourArea, reverse=True)

#x,y,w,h = cv2.boundingRect(contours[0])
#cv2.rectangle(image,(x,y),(x+w,y+h),(0,0,255),0)

# get approximate contour
for c in contours:
    p = cv2.arcLength(c, True)
    approx = cv2.approxPolyDP(c, 0.02 * p, True)

    if len(approx) == 4:
        target = approx
        break

# mapping target points to 800x800 quadrilateral
approx = rect.rectify(target)
pts2 = np.float32([[0, 0], [800, 0], [800, 800], [0, 800]])

M = cv2.getPerspectiveTransform(approx, pts2)
dst = cv2.warpPerspective(orig, M, (800, 800))

cv2.drawContours(image, [target], -1, (0, 255, 0), 2)
dst = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)

# using thresholding on warped image to get scanned effect (If Required)
ret, th1 = cv2.threshold(dst, 127, 255, cv2.THRESH_BINARY)
th2 = cv2.adaptiveThreshold(dst,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
            cv2.THRESH_BINARY,11,2)
th3 = cv2.adaptiveThreshold(dst,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
            cv2.THRESH_BINARY,11,2)
ret2, th4 = cv2.threshold(dst, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
Example #9
0
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]

# loop over the contours
for c in cnts:
    # approximate the contour
    peri = cv2.arcLength(c, True)
    approx = cv2.approxPolyDP(c, 0.02 * peri, True)

    # if our approximated contour has four points, then we
    # can assume that we have found our screen
    if len(approx) == 4:
        screenCnt = approx
        break

# mapping target points to 800x800 quadrilateral
approx = rect.rectify(screenCnt)
pts2 = np.float32([[0, 0], [800, 0], [800, 800], [0, 800]])

M = cv2.getPerspectiveTransform(approx, pts2)
dst = cv2.warpPerspective(image, M, (800, 800))

cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)
dst = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)
cv2.imwrite('transformed.png', dst)
# show the contour (outline) of the piece of paper
cv2.drawContours(dst, [screenCnt], -1, (0, 255, 0), 2)
plt.imshow(dst, cmap='gray', interpolation='bicubic')
plt.xticks([]), plt.yticks([])  # to hide tick values on X and Y axis
plt.show()
#x,y,w,h = cv2.boundingRect(contours[0])
#cv2.rectangle(image,(x,y),(x+w,y+h),(0,0,255),0)

# get approximate contour
for c in contours:
    p = cv2.arcLength(c, True)
    approx = cv2.approxPolyDP(c, 0.02 * p, True)

    if len(approx) == 4:
        target = approx
        break


# mapping target points to 800x800 quadrilateral
approx = rect.rectify(target)
pts2 = np.float32([[0,0],[800,0],[800,800],[0,800]])

M = cv2.getPerspectiveTransform(approx,pts2)
dst = cv2.warpPerspective(orig,M,(800,800))

cv2.drawContours(image, [target], -1, (0, 255, 0), 2)
dst = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)


# using thresholding on warped image to get scanned effect (If Required)
ret,th1 = cv2.threshold(dst,127,255,cv2.THRESH_BINARY)
th2 = cv2.adaptiveThreshold(dst,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
            cv2.THRESH_BINARY,11,2)
th3 = cv2.adaptiveThreshold(dst,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
            cv2.THRESH_BINARY,11,2)
    path_fn = os.path.join(train_path, fn)
    image = cv2.imread(path_fn)

    height, width, _ = image.shape

    name_hash_angle = re.findall(r'(.+)-coordinate_1', fn)[0]

    info_lst = os.path.splitext(fn)[0]
    kw = re.compile(
        r'coordinate_1__([0-9E/.-]+)_([0-9E/.-]+)-coordinate_2__([0-9E/.-]+)_([0-9E/.-]+)-coordinate_3__([0-9E/.-]+)_([0-9E/.-]+)-coordinate_4__([0-9E/.-]+)_([0-9E/.-]+)'
    )
    points = kw.findall(info_lst)
    points = [int(float(p)) for p in points[0]]
    points = np.array(points).astype('float32').reshape(-1, 2)
    points = rect.rectify(points)
    points = np.hstack((points, np.ones(4).reshape(4, 1))).transpose(1, 0)

    # front
    angle = 0
    for j, op in enumerate(prefix[:4]):
        M = cv2.getRotationMatrix2D((width / 2, height / 2), angle, 1)
        if 0 == (j % 2):
            dst = cv2.warpAffine(image, M, (width, height), cv2.INTER_CUBIC)
        else:
            M[0, 2] += (height - width) / 2
            M[1, 2] += (width - height) / 2
            dst = cv2.warpAffine(image, M, (height, width), cv2.INTER_CUBIC)

        angle += 90
        new_points = np.dot(M, points).transpose(1, 0)
Example #12
0
#x,y,w,h = cv2.boundingRect(contours[0])
#cv2.rectangle(image,(x,y),(x+w,y+h),(0,0,255),0)

# get approximate contour
for c in contours:
    p = cv2.arcLength(c, True)
    approx_cont = cv2.approxPolyDP(c, 0.02 * p, True)

    if len(approx_cont) == 4:
        target = approx_cont
        break


# mapping target points to 800x800 quadrilateral
approx_cont = rect.rectify(target)
pts2 = np.float32([[0,0],[800,0],[800,800],[0,800]])

M = cv2.getPerspectiveTransform(approx_cont,pts2)
dst = cv2.warpPerspective(orig,M,(800,800))

cv2.drawContours(image, [target], -1, (0, 255, 0), 2)
dst = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)


# using thresholding on warped image to get scanned effect (If Required)
ret,th1 = cv2.threshold(dst,127,255,cv2.THRESH_BINARY)
th2 = cv2.adaptiveThreshold(dst,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
            cv2.THRESH_BINARY,11,2)
th3 = cv2.adaptiveThreshold(dst,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
            cv2.THRESH_BINARY,11,2)