Exemplo n.º 1
0
    def get_frame(self):
        success, frame_orig = self.video.read()
        # We are using Motion JPEG, but OpenCV defaults to capture raw images,
        # so we must encode it into JPEG in order to correctly display the
        # video stream.
        # faces = face_cascade.detectMultiScale(image, 1.3, 5)
        frame = cv2.resize(src=frame_orig, dsize=(0, 0), fx=0.5, fy=0.5)
        embedding_dict = load_embeddings()

        frame = frame[:, :, ::-1]

        if frame.size > 0:
            faces, rects = get_faces_live(img=frame,
                                          pnet=pnet,
                                          rnet=rnet,
                                          onet=onet,
                                          image_size=image_size)

            # If there are human faces detected
            if faces:
                for i in range(len(faces)):
                    face_img = faces[i]
                    rect = rects[i]

                    # Scale coordinates of face locations by the resize ratio
                    rect = [coordinate * 2 for coordinate in rect]

                    face_embedding = forward_pass(
                        img=face_img,
                        session=facenet_persistent_session,
                        images_placeholder=images_placeholder,
                        embeddings=embeddings,
                        phase_train_placeholder=phase_train_placeholder,
                        image_size=image_size)

                    # Compare euclidean distance between this embedding and the embeddings in 'embeddings/'
                    identity = identify_face(embedding=face_embedding,
                                             embedding_dict=embedding_dict)

                    cv2.rectangle(img=frame_orig,
                                  pt1=(rect[0], rect[1]),
                                  pt2=(rect[2], rect[3]),
                                  color=(255, 215, 0),
                                  thickness=2)

                    W = int(rect[2] - rect[0]) // 2

                    cv2.putText(img=frame_orig,
                                text=identity,
                                org=(rect[0] + W - (W // 2), rect[1] - 7),
                                fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                                fontScale=0.5,
                                color=(255, 215, 0),
                                thickness=1,
                                lineType=cv2.LINE_AA)

        ret, jpeg = cv2.imencode('.jpg', frame_orig)
        return jpeg.tobytes()
Exemplo n.º 2
0
def predict_image(file):
    # file = request.files['file']
    # file = os.path.join(APP_ROOT, 'uploads/Abdulrahman Safh.png')
    # Read image file as numpy array of RGB dimension
    #img = io.imread(fname=file)
    img = imread(name=file, mode='RGB')
    # Detect and crop a 160 x 160 image containing a human face in the image file
    faces, rects = get_faces_live(img=img, pnet=pnet, rnet=rnet,
                           onet=onet, image_size=image_size)
    #global d
    # If there are human faces detected
    if faces:
        embedding_dict = load_embeddings()
        if embedding_dict:
            people_found = []
            for i in range(len(faces)):
                face_img = faces[i]
                rect = rects[i]

                face_embedding = forward_pass(
                    img=face_img, session=facenet_persistent_session,
                    images_placeholder=images_placeholder, embeddings=embeddings,
                    phase_train_placeholder=phase_train_placeholder,
                    image_size=image_size
                )

                # Compare euclidean distance between this embedding and the embeddings in 'embeddings/'
                identity = identify_face(
                    embedding=face_embedding, embedding_dict=embedding_dict)
                people_found.append(identity)

                cv2.rectangle(img, (rect[0], rect[1]), (rect[2], rect[3]), (0, 255, 0), 3)

                W = int(rect[2] - rect[0]) // 2
                H = int(rect[3] - rect[1]) // 2

                cv2.putText(img, identity, (rect[0] + W - (W // 2), rect[1] - 7),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 255), 1, cv2.LINE_AA)

            # code for saving the output images
            # cv2.imwrite("SavedImgesFull/file_%d.jpg" % d, img)
            #d += 1
            return people_found

        else:
            # return ["No Face"]
            return None
            # return render_template(
            #     'predict_result.html',
            #     identity="No embedding files detected! Please upload image files for embedding!"
            # )
    else:
        # return ["No Image"]
        return None
Exemplo n.º 3
0
def face_present(image_path):
    img = cv2.imread(image_path, -1)
    save_loc = 'saved_image/new.jpg'
    face_present = False

    faces, rects = get_faces_live(
	    img=img,
	    pnet=pnet,
	    rnet=rnet,
	    onet=onet,
	    image_size=image_size
    )
    
    for face_img in faces:
        cv2.imwrite(save_loc, face_img)
        face_present = True
        cv2.imwrite('static/saved_images/bounded.jpg', face_img)
    return face_present
Exemplo n.º 4
0
def face_detect_live():
    """Detects faces in real-time via Web Camera."""

    embedding_dict = load_embeddings()
    if embedding_dict:
        try:
            # Start non-blocking multi-threaded OpenCV video stream
            cap = WebcamVideoStream(src=0).start()

            while True:
                frame_orig = cap.read()  # Read frame

                # Resize frame to half its size for faster computation
                frame = cv2.resize(src=frame_orig,
                                   dsize=(0, 0),
                                   fx=0.5,
                                   fy=0.5)

                # Convert the image from BGR color (which OpenCV uses) to RGB color
                frame = frame[:, :, ::-1]

                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

                if frame.size > 0:
                    faces, rects = get_faces_live(img=frame,
                                                  pnet=pnet,
                                                  rnet=rnet,
                                                  onet=onet,
                                                  image_size=image_size)

                    # If there are human faces detected
                    if faces:
                        for i in range(len(faces)):
                            face_img = faces[i]
                            rect = rects[i]

                            # Scale coordinates of face locations by the resize ratio
                            rect = [coordinate * 2 for coordinate in rect]

                            face_embedding = forward_pass(
                                img=face_img,
                                session=facenet_persistent_session,
                                images_placeholder=images_placeholder,
                                embeddings=embeddings,
                                phase_train_placeholder=phase_train_placeholder,
                                image_size=image_size)

                            # Compare euclidean distance between this embedding and the embeddings in 'embeddings/'
                            identity = identify_face(
                                embedding=face_embedding,
                                embedding_dict=embedding_dict)

                            cv2.rectangle(img=frame_orig,
                                          pt1=(rect[0], rect[1]),
                                          pt2=(rect[2], rect[3]),
                                          color=(255, 215, 0),
                                          thickness=2)

                            W = int(rect[2] - rect[0]) // 2

                            cv2.putText(img=frame_orig,
                                        text=identity,
                                        org=(rect[0] + W - (W // 2),
                                             rect[1] - 7),
                                        fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                                        fontScale=0.5,
                                        color=(255, 215, 0),
                                        thickness=1,
                                        lineType=cv2.LINE_AA)

                        cv2.imshow(winname='Video', mat=frame_orig)
                    # Keep showing camera stream even if no human faces are detected
                    cv2.imshow(winname='Video', mat=frame_orig)
                else:
                    continue

            cap.stop()  # Stop multi-threaded Video Stream
            cv2.destroyAllWindows()

            return render_template(template_name_or_list='index.html')

        except Exception as e:
            print(e)

    else:
        return render_template(
            template_name_or_list="warning.html",
            status=
            "No embedding files detected! Please upload image files for embedding!"
        )
def face_detect_live():
    """Detects faces in real-time via Web Camera."""

    embedding_dict = load_embeddings()
    if embedding_dict:
        try:
            cap = cv2.VideoCapture(0)

            while True:
                cap.grab()  # For use in multi-camera environments when the cameras do not have hardware synchronization
                return_code, frame_orig = cap.read()  # Read frame

                # Resize frame to half its size for faster computation
                frame = cv2.resize(frame_orig, (0, 0), fx=0.5, fy=0.5)

                # Convert the image from BGR color (which OpenCV uses) to RGB color
                frame = frame[:, :, ::-1]

                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

                if frame.size > 0:
                    faces, rects = get_faces_live(img=frame, pnet=pnet, rnet=rnet, onet=onet, image_size=image_size)

                    # If there are human faces detected
                    if faces:
                        for i in range(len(faces)):
                            face_img = faces[i]
                            rect = rects[i]

                            # Scale coordinates of face locations by the resize ratio
                            rect = [coordinate * 2 for coordinate in rect]

                            face_embedding = forward_pass(
                                img=face_img,
                                session=facenet_persistent_session,
                                images_placeholder=images_placeholder,
                                embeddings=embeddings,
                                phase_train_placeholder=phase_train_placeholder,
                                image_size=image_size
                            )

                            # Compare euclidean distance between this embedding and the embeddings in 'embeddings/'
                            identity = identify_face(embedding=face_embedding, embedding_dict=embedding_dict)

                            cv2.rectangle(frame_orig, (rect[0], rect[1]), (rect[2], rect[3]), (255, 215, 0), 2)

                            W = int(rect[2] - rect[0]) // 2
                            H = int(rect[3] - rect[1]) // 2

                            cv2.putText(frame_orig, identity, (rect[0]+W-(W//2), rect[1]-7),
                                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 215, 0), 1, cv2.LINE_AA)

                        cv2.imshow('Video', frame_orig)
                    # Keep showing camera stream even if no human faces are detected
                    cv2.imshow('Video', frame_orig)
                else:
                    continue

            cap.release()
            cv2.destroyAllWindows()

            return render_template('index.html')

        except Exception as e:
            print(e)

    else:
        return render_template(
            "warning.html",
            status="No embedding files detected! Please upload image files for embedding!"
        )
Exemplo n.º 6
0
def get_frame():

    embedding_dict = load_embeddings()
    if embedding_dict:
        try:
            cap = cv2.VideoCapture(0)
            cap.set(cv2.CAP_PROP_FRAME_WIDTH, 500)
            cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 500)

            while True:
                #get camera frame

                ret, frame = cap.read()  # Read frame
                #print(frame)

                # Resize frame to half its size for faster computation
                frame = cv2.resize(src=frame, dsize=(0, 0), fx=0.8, fy=0.8)

                # Convert the image from BGR color (which OpenCV uses) to RGB color
                #frame = frame[:, :, ::-1]

                #if cv2.waitKey(1) & 0xFF == ord('q'):
                #    break

                #
                if frame.size > 0:
                    faces, rects = get_faces_live(img=frame,
                                                  pnet=pnet,
                                                  rnet=rnet,
                                                  onet=onet,
                                                  image_size=image_size)

                    if faces:
                        for i in range(len(faces)):
                            face_img = faces[i]
                            rect = rects[i]

                            # Scale coordinates of face locations by the resize ratio
                            rect = [coordinate for coordinate in rect]

                            face_embedding = forward_pass(
                                img=face_img,
                                session=facenet_persistent_session,
                                images_placeholder=images_placeholder,
                                embeddings=embeddings,
                                phase_train_placeholder=phase_train_placeholder,
                                image_size=image_size)

                            # Compare euclidean distance between this embedding and the embeddings in 'embeddings/'
                            identity = identify_face(
                                embedding=face_embedding,
                                embedding_dict=embedding_dict)

                            cv2.rectangle(img=frame,
                                          pt1=(rect[0], rect[1]),
                                          pt2=(rect[2], rect[3]),
                                          color=(0, 0, 255),
                                          thickness=2)

                            W = int(rect[2] - rect[0]) // 2

                            cv2.putText(img=frame,
                                        text=identity,
                                        org=(rect[0] + W - (W // 2),
                                             rect[1] - 7),
                                        fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                                        fontScale=0.5,
                                        color=(0, 0, 255),
                                        thickness=1,
                                        lineType=cv2.LINE_AA)

                        ret, jpeg = cv2.imencode('.jpg', frame)

                    ret, jpeg = cv2.imencode('.jpg', frame)
                    yield (b'--frame\r\n'
                           b'Content-Type: image/jpeg\r\n\r\n' +
                           jpeg.tobytes() + b'\r\n\r\n')

                else:
                    continue

        except Exception as e:
            print(e)
def face_detect_live():
    """Detects faces in real-time via Web Camera."""

    embedding_dict = load_embeddings()
    if embedding_dict:
        try:
            cap = cv2.VideoCapture(0)

            while True:
                return_code, frame = cap.read()  # RGB frame

                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

                if frame.size > 0:
                    faces, rects = get_faces_live(img=frame,
                                                  pnet=pnet,
                                                  rnet=rnet,
                                                  onet=onet,
                                                  image_size=image_size)
                    # If there are human faces detected
                    if faces:
                        for i in range(len(faces)):
                            face_img = faces[i]
                            rect = rects[i]

                            face_embedding = forward_pass(
                                img=face_img,
                                session=facenet_persistent_session,
                                images_placeholder=images_placeholder,
                                embeddings=embeddings,
                                phase_train_placeholder=phase_train_placeholder,
                                image_size=image_size)

                            # Compare euclidean distance between this embedding and the embeddings in 'embeddings/'
                            identity = identify_face(
                                embedding=face_embedding,
                                embedding_dict=embedding_dict)

                            cv2.rectangle(frame, (rect[0], rect[1]),
                                          (rect[2], rect[3]), (255, 215, 0), 2)

                            W = int(rect[2] - rect[0]) // 2
                            H = int(rect[3] - rect[1]) // 2

                            cv2.putText(frame, identity,
                                        (rect[0] + W - (W // 2), rect[1] - 7),
                                        cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                        (255, 215, 0), 1, cv2.LINE_AA)

                        cv2.imshow('Video', frame)
                    # Keep showing camera stream even if no human faces are detected
                    cv2.imshow('Video', frame)
                else:
                    continue

            cap.release()
            cv2.destroyAllWindows()
            return render_template('index.html')
        except Exception as e:
            print(e)
    else:
        return "No embedding files detected! Please upload image files for embedding!"
Exemplo n.º 8
0
def face_detect_live():
    # Load text reading engine
    #engine = pyttsx3.init()
    spoken_face_names = []
    greetings = [
        'How do you do', 'Hello', 'Hi', 'Hai', 'Hey', 'How have you been',
        'How are you', 'How is it going', 'Salam alikom ', 'Esh loonak ya',
        'Ahlaaaan'
    ]

    embedding_dict = load_embeddings()
    if embedding_dict:
        try:
            cap = cv2.VideoCapture(0)

            while True:
                return_code, frame = cap.read()  # RGB frame

                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

                faces, rects = get_faces_live(img=frame,
                                              pnet=pnet,
                                              rnet=rnet,
                                              onet=onet,
                                              image_size=image_size)
                # If there are human faces detected
                if faces:
                    for i in range(len(faces)):
                        face_img = faces[i]
                        rect = rects[i]

                        face_embedding = forward_pass(
                            img=face_img,
                            session=facenet_persistent_session,
                            images_placeholder=images_placeholder,
                            embeddings=embeddings,
                            phase_train_placeholder=phase_train_placeholder,
                            image_size=image_size)

                        # Compare euclidean distance between this embedding and the embeddings in 'embeddings/'
                        identity = identify_face(embedding=face_embedding,
                                                 embedding_dict=embedding_dict)

                        cv2.rectangle(frame, (rect[0], rect[1]),
                                      (rect[2], rect[3]), (255, 215, 0), 2)

                        W = int(rect[2] - rect[0]) // 2
                        H = int(rect[3] - rect[1]) // 2

                        cv2.putText(frame, identity,
                                    (rect[0] + W - (W // 2), rect[1] - 7),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                    (255, 215, 0), 1, cv2.LINE_AA)

                        if identity == "Unknown":
                            continue
                        elif identity in spoken_face_names:
                            continue
                        else:
                            print(random.choice(greetings) + " " + identity)
                            #engine.say(random.choice(greetings) + name)
                            #engine.runAndWait()
                            spoken_face_names.append(identity)
                            continue

                    cv2.imshow('Video', frame)
                else:
                    continue

            cap.release()
            cv2.destroyAllWindows()
            return render_template('index.html')
        except Exception as e:
            print(e)
    else:
        return "No loaded faces detected! Please upload image files for embedding!"