import time
import cv2
import google_ocr
import dyanmo_ocr
import dynamodb

camera_port = 0


def image_capture():

    camera = cv2.VideoCapture(camera_port)
    time.sleep(5)  # After 5sec camera will open
    return_value, image = camera.read()
    cv2.imwrite("C:/Users/Rupali Singh/PycharmProjects/Drishti/opencv.png",
                image)
    del (camera)
    return image


if __name__ == '__main__':
    image_capture()
    i = 1
    while (i == 1):
        google_ocr.detect_text(
            path="C:/Users/Rupali Singh/PycharmProjects/Drishti/opencv.png")
        dynamodb.main("TextImage")
        i = i - 1
Example #2
0
def find_devide_point(dirId, n):
    dirpath = "images{0:02d}".format(dirId)
    df = pd.DataFrame(index=[], columns=['id', 'time', 'text', 'state'])
    imId = 1
    state = 0  # text: exist = 1, none = 0
    y = np.zeros(150)
    pbar = tqdm(total=120)
    cnt = 0
    hists = np.array([])
    before_text = ""
    while (os.path.isfile(dirpath + "/image{}.jpg".format(imId))):
        pbar.update(1)
        path = dirpath + "/image{}.jpg".format(imId)
        img = cv2.imread(path)
        mask = extract_text.extract_white(img)
        rects = extract_text.get_rects(mask)
        height, width = img.shape[:2]
        rects = [
            rect for rect in rects if rect[2] * rect[3] > height * width / n
        ]

        # textが存在しない場合
        if not rects:
            if state:
                state = 0
                y = np.zeros(150)
                series = pd.Series(
                    [imId - 1, (imId - 1) * 0.5, before_text, -1],
                    index=df.columns)
                df = df.append(series, ignore_index=True)
            imId += 1
            continue
        x = whitehist(rects, mask, n)
        min_x = min(rects, key=(lambda x: x[0]))
        min_y = min(rects, key=(lambda x: x[1]))
        max_w = max(rects, key=(lambda x: x[0] + x[2]))
        max_h = max(rects, key=(lambda x: x[1] + x[3]))
        max_rect = np.array([
            min_x[0], min_y[1], max_w[0] - min_x[0] + max_w[2],
            max_h[1] - min_y[1] + max_h[3]
        ])

        # 画面がホワイトアウトした場合
        if max_rect[2] * max_rect[3] >= height * width:
            if state:
                state = 0
                y = x
                series = pd.Series(
                    [imId - 1, (imId - 1) * 0.5, before_text, -1],
                    index=df.columns)
                df = df.append(series, ignore_index=True)
            imId += 1
            continue

        if isChange(x, y):
            cnt += 1
            text = google_ocr.detect_text(dirId, imId)
            text = text.replace(" ",
                                "").replace("\n",
                                            "").replace(u' ',
                                                        "").replace("\t", "")
            if mojimoji.zen_to_han(text) == mojimoji.zen_to_han(before_text):
                imId += 1
                y = x
                continue
            if state == 0:
                if text == "":
                    imId += 1
                    y = x
                    before_text = text
                    continue
                state = 1
                y = x
                series = pd.Series([imId, imId * 0.5, text, 1],
                                   index=df.columns)
                df = df.append(series, ignore_index=True)
                before_text = text
            else:
                state = 1
                series = pd.Series(
                    [imId - 1, (imId - 1) * 0.5, before_text, -1],
                    index=df.columns)
                df = df.append(series, ignore_index=True)
                y = x
                before_text = text
                if text:
                    series = pd.Series([imId, imId * 0.5, text, 1],
                                       index=df.columns)
                    df = df.append(series, ignore_index=True)
        y = x
        imId += 1
    datadir = "data"
    if not os.path.isdir(datadir):
        os.makedirs(datadir)
    df.to_csv(datadir + "/" + dirpath + ".csv")
    pbar.close()
    print(cnt)
Example #3
0
        #face_image = cv2.rectangle( face_image, (left,top), (right, bottom), (255,0,0))
        # Put the blurred face region back into the frame image
        #frame[top:bottom, left:right] = face_image

    # Display the resulting image
    cv2.imshow('Face detect', frame)

    # Hit 'q' on the keyboard to quit!
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

cv2.imwrite("face.jpg", frame)
# Initialize some variables

while True:
    # Grab a single frame of video
    ret, frame = video_capture.read()

    cv2.rectangle(frame, (100, 100), (600, 400), (0, 0, 255), 3)

    # Display the resulting image
    cv2.imshow('document', frame)

    # Hit 'q' on the keyboard to quit!
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
cv2.imwrite("document.jpg", frame[100:400, 100:600])
google_ocr.detect_text("document.jpg")
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
Example #4
0
import time
import cv2
import google_ocr
import dynamodb

camera_port = 0


def image_capture():

    camera = cv2.VideoCapture(camera_port)
    time.sleep(5)  # After 5sec camera will open
    return_value, image = camera.read()
    cv2.imwrite("D:/Drishti-ocr/opencv.png", image)
    del (camera)
    return image


if __name__ == '__main__':
    image_capture()
    i = 1
    while (i == 1):
        google_ocr.detect_text(path="D:/Drishti-ocr/opencv.png")
        dynamodb.main("TextImage")
        i = i - 1
Example #5
0
def captureimage():
    video_capture = cv2.VideoCapture(0)

    # Initialize some variables
    face_locations = []
    count = 30
    while True and count > 0:
        count -= 1
        # Grab a single frame of video
        ret, frame = video_capture.read()

        # Resize frame of video to 1/4 size for faster face detection processing
        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

        # Find all the faces and face encodings in the current frame of video
        face_locations = face_recognition.face_locations(small_frame,
                                                         model="cnn")

        # Display the results
        for top, right, bottom, left in face_locations:
            # Scale back up face locations since the frame we detected in was scaled to 1/4 size
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4

            # Extract the region of the image that contains the face
            frame = frame[top:bottom, left:right]

            # Blur the face image
            #face_image = cv2.GaussianBlur(face_image, (99, 99), 30)
            #face_image = cv2.rectangle( face_image, (left,top), (right, bottom), (255,0,0))
            # Put the blurred face region back into the frame image
            #frame[top:bottom, left:right] = face_image
        font = cv2.FONT_HERSHEY_SIMPLEX
        bottomLeftCornerOfText = (0, 30)
        fontScale = 1
        fontColor = (255, 255, 255)
        lineType = 2
        # Display the resulting image
        cv2.putText(frame, str(count), bottomLeftCornerOfText, font, fontScale,
                    fontColor, lineType)
        cv2.imshow('Video', frame)
        # Display the resulting image
        #cv2.imshow('Face detect', frame)

        # Hit 'q' on the keyboard to quit!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    face_frame = frame
    video_capture.release()
    cv2.destroyAllWindows()
    video_capture = cv2.VideoCapture(0)
    # Initialize some variables
    count = 100
    while True and count > 0:
        count -= 1
        # Grab a single frame of video
        ret, frame = video_capture.read()

        cv2.rectangle(frame, (100, 100), (600, 400), (0, 0, 255), 3)
        font = cv2.FONT_HERSHEY_SIMPLEX
        bottomLeftCornerOfText = (80, 80)
        fontScale = 1
        fontColor = (255, 255, 255)
        lineType = 2
        # Display the resulting image
        cv2.putText(frame, str(count), bottomLeftCornerOfText, font, fontScale,
                    fontColor, lineType)
        # Display the resulting image
        cv2.imshow('document', frame)

        # Hit 'q' on the keyboard to quit!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cv2.imwrite("document.jpg", frame[100:400, 100:600])
    result = google_ocr.detect_text("document.jpg")
    # Release handle to the webcam
    video_capture.release()
    cv2.destroyAllWindows()

    T.insert(END, "Is this your name?: {}\n".format(result["PERSON"][0]))
    print(result["PERSON"][0])
    print(face_frame)
    if result["PERSON"][0]:
        new_face_image = config.IMAGE_PATH + result["PERSON"][0] + ".jpg"
        cv2.imwrite(new_face_image, face_frame)
        image = face_recognition.load_image_file(new_face_image)
        face_encoding = face_recognition.face_encodings(image)[0]
        face_encoding.tofile(config.ENCODED_IMAGE + result["PERSON"][0] +
                             ".enc")
    T.insert(END, "We have recorded you!\n")