示例#1
0
def main(filename):

    from TFLiteFaceDetector import UltraLightFaceDetecion
    from TFLiteFaceAlignment import CoordinateAlignmentModel

    cap = cv2.VideoCapture(filename)

    fd = UltraLightFaceDetecion("weights/RFB-320.tflite", conf_threshold=0.95)
    fa = CoordinateAlignmentModel("weights/coor_2d106.tflite")
    hp = HeadPoseEstimator("weights/head_pose_object_points.npy", cap.get(3),
                           cap.get(4))

    color = (125, 255, 125)

    while True:
        ret, frame = cap.read()

        if not ret:
            break

        bboxes, _ = fd.inference(frame)

        for pred in fa.get_landmarks(frame, bboxes):
            for p in np.round(pred).astype(np.int):
                cv2.circle(frame, tuple(p), 1, color, 1, cv2.LINE_AA)
            face_center = np.mean(pred, axis=0)
            euler_angle = hp.get_head_pose(pred).flatten()
            print(*euler_angle)
            hp.draw_axis(frame, euler_angle, face_center)

        cv2.imshow("result", frame)
        if cv2.waitKey(0) == ord('q'):
            break
示例#2
0
if __name__ == "__main__":
    import sys
    from SolvePnPHeadPoseEstimation import HeadPoseEstimator
    from TFLiteFaceAlignment import CoordinateAlignmentModel
    from TFLiteFaceDetector import UltraLightFaceDetecion

    gpu_ctx = -1
    video = sys.argv[1]
    YAW_THD = 45

    cap = cv2.VideoCapture(video)

    fd = UltraLightFaceDetecion(
        "pretrained/version-RFB-320_without_postprocessing.tflite",
        conf_threshold=0.9)
    fa = CoordinateAlignmentModel(
        "pretrained/coor_2d106_face_alignment.tflite")
    hp = HeadPoseEstimator("pretrained/head_pose_object_points.npy",
                           cap.get(3), cap.get(4))
    gs = IrisLocalizationModel("pretrained/iris_localization.tflite")

    counter = 0

    while True:
        ret, frame = cap.read()

        if not ret:
            break

        # frame = frame[:480, 380:920, :]  # dress
        # frame = cv2.resize(frame, (960, 1080))
示例#3
0
from TFLiteFaceAlignment import CoordinateAlignmentModel
from TFLiteIrisLocalization import IrisLocalizationModel
from SolvePnPHeadPoseEstimation import HeadPoseEstimator
from threading import Thread
import cv2
import sys
import numpy as np
from queue import Queue
import socketio

cap = cv2.VideoCapture(sys.argv[1])

fd = UltraLightFaceDetecion(
    "pretrained/version-RFB-320_without_postprocessing.tflite",
    conf_threshold=0.98)
fa = CoordinateAlignmentModel("pretrained/coor_2d106_face_alignment.tflite")
hp = HeadPoseEstimator("pretrained/head_pose_object_points.npy", cap.get(3),
                       cap.get(4))
gs = IrisLocalizationModel("pretrained/iris_localization.tflite")

QUEUE_BUFFER_SIZE = 18

box_queue = Queue(maxsize=QUEUE_BUFFER_SIZE)
landmark_queue = Queue(maxsize=QUEUE_BUFFER_SIZE)
iris_queue = Queue(maxsize=QUEUE_BUFFER_SIZE)
upstream_queue = Queue(maxsize=QUEUE_BUFFER_SIZE)

# ======================================================

sio = socketio.Client()
示例#4
0

if __name__ == "__main__":
    import sys
    from SolvePnPHeadPoseEstimation import HeadPoseEstimator
    from TFLiteFaceAlignment import CoordinateAlignmentModel
    from TFLiteFaceDetector import UltraLightFaceDetecion

    gpu_ctx = -1
    video = sys.argv[1]
    YAW_THD = 45

    cap = cv2.VideoCapture(video)

    fd = UltraLightFaceDetecion("weights/RFB-320.tflite", conf_threshold=0.9)
    fa = CoordinateAlignmentModel("weights/coor_2d106.tflite")
    hp = HeadPoseEstimator("weights/head_pose_object_points.npy", cap.get(3),
                           cap.get(4))
    gs = IrisLocalizationModel("weights/iris_localization.tflite")

    counter = 0

    while True:
        ret, frame = cap.read()

        if not ret:
            break

        # frame = frame[:480, 380:920, :]  # dress
        # frame = cv2.resize(frame, (960, 1080))