def main():
    text = "Facial Landmarks with Dlib"
    try:
        shape_predictor = "shape_predictor_68_face_landmarks.dat"
        dlib_flm = facial_landmarks.Dlib_FLM(shape_predictor)

        image_paths = sorted(list(edgeiq.list_images("images/")))
        print("Images:\n{}\n".format(image_paths))

        with edgeiq.Streamer(queue_depth=len(image_paths),
                             inter_msg_time=3) as streamer:
            for image_path in image_paths:
                image = cv2.imread(image_path)
                image, gray_image = dlib_flm.image_preprocessor(image)
                facial_coordinates, rectangles = dlib_flm.detect_faces_shapes(
                    gray_image)

                # Loop to markup image
                for (i, rectangle) in enumerate(rectangles):
                    (x, y, w,
                     h) = dlib_flm.dlib_rectangle_to_cv_bondingbox(rectangle)
                    cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0),
                                  2)
                    cv2.putText(image, "Face #{}".format(i + 1),
                                (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX,
                                0.5, (0, 255, 0), 2)

                for facial_coordinate in facial_coordinates:
                    for (x, y) in facial_coordinate:
                        cv2.circle(image, (x, y), 3, (255, 0, 0), -1)

                streamer.send_data(image, text)
            streamer.wait()
    finally:
        print("Program Ending")
def main():
    text = "Facial Part Detection with Dlib"
    try:
        shape_predictor = "shape_predictor_68_face_landmarks.dat"
        dlib_flm = facial_landmarks.Dlib_FLM(shape_predictor)

        image_paths = sorted(list(edgeiq.list_images("images/")))
        print("Images:\n{}\n".format(image_paths))

        with edgeiq.Streamer(queue_depth=len(image_paths), inter_msg_time=3) as streamer:
            for image_path in image_paths:
                image = cv2.imread(image_path)
                resized_image, gray_image = dlib_flm.image_preprocessor(image)
                facial_coordinates, rectangles =dlib_flm.detect_faces_shapes(gray_image)

                for facial_coordinate in facial_coordinates:
                    for (name, (i, j)) in FACIAL_LANDMARKS_IDXS.items():
                        print(name)
                        clone = resized_image.copy()
                        cv2.putText(clone, name, (10, 30), cv2.FONT_HERSHEY_SIMPLEX,
                            1.0, (255, 0, 0), 2)
                        for (x, y) in facial_coordinate[i:j]:
                            cv2.circle(clone, (x, y), 3, (255, 0, 0), -1)
                            streamer.send_data(clone, text)
                        streamer.wait()
    finally:
         print("Program Ending")
def main():

    text = "Facial Overlays with Dlib"

    fps = edgeiq.FPS()

    shape_predictor = "shape_predictor_68_face_landmarks.dat"
    dlib_flm = facial_landmarks.Dlib_FLM(shape_predictor)


    try:
        with edgeiq.WebcamVideoStream(cam=0) as webcam, \
                edgeiq.Streamer() as streamer:
            # Allow webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = webcam.read()

                resized_frame, gray_resized_frame = dlib_flm.image_preprocessor(frame)
                facial_coordinates, rectangles = dlib_flm.detect_faces_shapes(gray_resized_frame)

                left_eye = 0
                right_eye = 0

                for facial_coordinate in facial_coordinates:
                    for (name, (i, j)) in FACIAL_LANDMARKS_IDXS.items():
                        if name is 'left_eye':
                            left_eye = facial_coordinate[i:j]
                        #Uncoment if you want the patch on right eye as well.
                        #elif name is 'right_eye':
                        #    right_eye = facial_coordinate[i:j]
                    leftEyeSize, leftEyeCenter = eye_size(left_eye)
                    #Uncoment if you want the patch on right eye as well.
                    #rightEyeSize, rightEyeCenter = eye_size(right_eye)
                    place_mustache(resized_frame, facial_coordinate)
                    #Uncoment if you want to place spectacles on the face.
                    #place_glasses(resized_frame, facial_coordinate)
                    place_eye_patch(resized_frame, leftEyeCenter, leftEyeSize)
                    #place_eye(resized_frame, rightEyeCenter, rightEyeSize)

                streamer.send_data(resized_frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        # stop fps counter and display information
        fps.stop()
        print("[INFO] elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
def main():

    text = "Facial Landmarks with Dlib"

    fps = edgeiq.FPS()

    shape_predictor = "shape_predictor_68_face_landmarks.dat"
    dlib_flm = facial_landmarks.Dlib_FLM(shape_predictor)

    try:
        with edgeiq.WebcamVideoStream(cam=0) as webcam, \
                edgeiq.Streamer() as streamer:
            # Allow webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = webcam.read()

                resized_frame, gray_resized_frame = dlib_flm.image_preprocessor(
                    frame)
                facial_coordinates, rectangles = dlib_flm.detect_faces_shapes(
                    gray_resized_frame)

                # Loop to markup resized_frame
                for (i, rectangle) in enumerate(rectangles):
                    (x, y, w,
                     h) = dlib_flm.dlib_rectangle_to_cv_bondingbox(rectangle)
                    cv2.rectangle(resized_frame, (x, y), (x + w, y + h),
                                  (0, 255, 0), 2)
                    cv2.putText(resized_frame, "Face #{}".format(i + 1),
                                (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX,
                                0.5, (0, 255, 0), 2)

                for facial_coordinate in facial_coordinates:
                    for (x, y) in facial_coordinate:
                        cv2.circle(resized_frame, (x, y), 1, (255, 0, 0), -1)

                streamer.send_data(resized_frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        # stop fps counter and display information
        fps.stop()
        print("[INFO] elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")