Ejemplo n.º 1
0
def video_inference(video, model_path, color=(125, 255, 0)):

    fd = UltraLightFaceDetecion(model_path, conf_threshold=0.88)

    cap = cv2.VideoCapture(video)

    while True:
        ret, frame = cap.read()

        if not ret:
            break

        start_time = time.perf_counter()
        boxes, scores = fd.inference(frame)
        print(time.perf_counter() - start_time)

        for result in boxes.astype(int):
            cv2.rectangle(frame, (result[0], result[1]),
                          (result[2], result[3]), color, 2)

        cv2.imshow('res', frame)
        if cv2.waitKey(1) == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
Ejemplo n.º 2
0
def main(filename):

    from TFLiteFaceDetector import UltraLightFaceDetecion
    from TFLiteFaceAlignment import CoordinateAlignmentModel

    cap = cv2.VideoCapture(filename)

    fd = UltraLightFaceDetecion("weights/RFB-320.tflite", conf_threshold=0.95)
    fa = CoordinateAlignmentModel("weights/coor_2d106.tflite")
    hp = HeadPoseEstimator("weights/head_pose_object_points.npy", cap.get(3),
                           cap.get(4))

    color = (125, 255, 125)

    while True:
        ret, frame = cap.read()

        if not ret:
            break

        bboxes, _ = fd.inference(frame)

        for pred in fa.get_landmarks(frame, bboxes):
            for p in np.round(pred).astype(np.int):
                cv2.circle(frame, tuple(p), 1, color, 1, cv2.LINE_AA)
            face_center = np.mean(pred, axis=0)
            euler_angle = hp.get_head_pose(pred).flatten()
            print(*euler_angle)
            hp.draw_axis(frame, euler_angle, face_center)

        cv2.imshow("result", frame)
        if cv2.waitKey(0) == ord('q'):
            break
Ejemplo n.º 3
0
def image_inference(image_path, model_path, color=(125, 255, 0)):

    fd = UltraLightFaceDetecion(model_path, conf_threshold=0.6)

    img = cv2.imread(image_path)

    boxes, scores = fd.inference(img)

    for result in boxes.astype(int):
        cv2.rectangle(img, (result[0], result[1]), (result[2], result[3]),
                      color, 2)

    cv2.imshow('res', img)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
Ejemplo n.º 4
0
            out = self._inference(inp)
            pred = self._postprocessing(out, M)

            # self._calibrate(pred, 1, skip=6)
            # yield self.pre_landmarks

            yield pred


if __name__ == '__main__':

    from TFLiteFaceDetector import UltraLightFaceDetecion
    import sys

    fd = UltraLightFaceDetecion(
        "pretrained/version-RFB-320_without_postprocessing.tflite",
        conf_threshold=0.88)
    fa = CoordinateAlignmentModel(
        "pretrained/coor_2d106_face_alignment.tflite")

    cap = cv2.VideoCapture(sys.argv[1])
    color = (125, 255, 125)

    while True:
        ret, frame = cap.read()

        if not ret:
            break

        start_time = time.perf_counter()
Ejemplo n.º 5
0
# coding: utf-8
from TFLiteFaceDetector import UltraLightFaceDetecion
from TFLiteFaceAlignment import CoordinateAlignmentModel
from TFLiteIrisLocalization import IrisLocalizationModel
from SolvePnPHeadPoseEstimation import HeadPoseEstimator
from threading import Thread
import cv2
import sys
import numpy as np
from queue import Queue
import socketio

cap = cv2.VideoCapture(sys.argv[1])

fd = UltraLightFaceDetecion(
    "pretrained/version-RFB-320_without_postprocessing.tflite",
    conf_threshold=0.98)
fa = CoordinateAlignmentModel("pretrained/coor_2d106_face_alignment.tflite")
hp = HeadPoseEstimator("pretrained/head_pose_object_points.npy", cap.get(3),
                       cap.get(4))
gs = IrisLocalizationModel("pretrained/iris_localization.tflite")

QUEUE_BUFFER_SIZE = 18

box_queue = Queue(maxsize=QUEUE_BUFFER_SIZE)
landmark_queue = Queue(maxsize=QUEUE_BUFFER_SIZE)
iris_queue = Queue(maxsize=QUEUE_BUFFER_SIZE)
upstream_queue = Queue(maxsize=QUEUE_BUFFER_SIZE)

# ======================================================
Ejemplo n.º 6
0
        return theta, pha, delta.T


if __name__ == "__main__":
    import sys
    from SolvePnPHeadPoseEstimation import HeadPoseEstimator
    from TFLiteFaceAlignment import CoordinateAlignmentModel
    from TFLiteFaceDetector import UltraLightFaceDetecion

    gpu_ctx = -1
    video = sys.argv[1]
    YAW_THD = 45

    cap = cv2.VideoCapture(video)

    fd = UltraLightFaceDetecion("weights/RFB-320.tflite", conf_threshold=0.9)
    fa = CoordinateAlignmentModel("weights/coor_2d106.tflite")
    hp = HeadPoseEstimator("weights/head_pose_object_points.npy", cap.get(3),
                           cap.get(4))
    gs = IrisLocalizationModel("weights/iris_localization.tflite")

    counter = 0

    while True:
        ret, frame = cap.read()

        if not ret:
            break

        # frame = frame[:480, 380:920, :]  # dress
        # frame = cv2.resize(frame, (960, 1080))