Ejemplo n.º 1
0
    def run(self, camera_index=0):
        cap = cv2.VideoCapture(camera_index)
        cap.set(3, 1280)
        cap.set(4, 720)
        print('type q for exit')
        while cap.isOpened():
            ret, frame = cap.read()
            if ret == False:
                raise Exception(
                    'the camera not recognized: change camera_index param to '
                    + str(0 if camera_index == 1 else 1))
            faces, boxes, scores, landmarks = self.face_detector.detect_align(
                frame)
            if len(faces.shape) > 1:
                emotions, emo_probs = self.emotion_detector.detect_emotion(
                    faces)
                for i, b in enumerate(boxes):
                    special_draw(frame,
                                 b,
                                 landmarks[i],
                                 name=emotions[i],
                                 score=emo_probs[i])

            cv2.imshow('frame', frame)
            if cv2.waitKey(1) == ord('q'):
                break

        cv2.destroyAllWindows()
Ejemplo n.º 2
0
    def run(self, camera_index=0):
        if len(self.targets) < 1:
            raise Exception(
                "you don't have any person in facebank: add new person with 'add_from_webcam' or 'add_from_folder' function"
            )

        cap = cv2.VideoCapture(camera_index)
        cap.set(3, 1280)
        cap.set(4, 720)
        # frame rate 6 due to my laptop is quite slow...
        print('type q for exit')
        while cap.isOpened():
            ret, frame = cap.read()
            if ret == False:
                raise Exception(
                    'the camera not recognized: change camera_index param to '
                    + str(0 if camera_index == 1 else 1))
            faces, boxes, scores, landmarks = self.detector.detect_align(frame)
            if len(faces.shape) > 1:
                results, score = self.recognizer.infer(self.conf,
                                                       faces,
                                                       self.targets,
                                                       tta=self.tta)
                for idx, bbox in enumerate(boxes):
                    special_draw(frame, bbox, landmarks[idx],
                                 self.names[results[idx] + 1], score[idx])
            cv2.imshow('face Capture', frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        cap.release()
        cv2.destroyAllWindows()
Ejemplo n.º 3
0
    face_rec = FaceRecognizer(conf, inference=True)
    face_rec.threshold = args.threshold
    face_rec.model.eval()

    if args.update:
        targets, names = update_facebank(conf,
                                         face_rec.model,
                                         detector,
                                         tta=args.tta)
    else:
        targets, names = load_facebank(conf)

    # init camera
    cap = cv2.VideoCapture(1)
    cap.set(3, 1280)
    cap.set(4, 720)
    # frame rate 6 due to my laptop is quite slow...
    while cap.isOpened():
        _, frame = cap.read()
        faces, boxes, scores, landmarks = detector.detect_align(frame)
        if len(faces.shape) > 1:
            results, score = face_rec.infer(conf, faces, targets, args.tta)
            for idx, bbox in enumerate(boxes):
                special_draw(frame, bbox, landmarks[idx],
                             names[results[idx] + 1], score[idx])
        cv2.imshow('face Capture', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
Ejemplo n.º 4
0
from facelib import special_draw
import cv2

face_detector = FaceDetector(name='mobilenet',
                             weight_path='../Retinaface/weights/mobilenet.pth',
                             device='cuda',
                             face_size=(224, 224))
emotion_detector = EmotionDetector(name='densnet121',
                                   weight_path='weights/densnet121.pth',
                                   device='cuda')

vid = cv2.VideoCapture(1)
vid.set(3, 1280)
vid.set(4, 720)
while True:
    ret, frame = vid.read()
    faces, boxes, scores, landmarks = face_detector.detect_align(frame)
    if len(faces.shape) > 1:
        emotions, emo_probs = emotion_detector.detect_emotion(faces)
        for i, b in enumerate(boxes):
            special_draw(frame,
                         b,
                         landmarks[i],
                         name=emotions[i],
                         score=emo_probs[i])

    cv2.imshow('frame', frame)
    if cv2.waitKey(1) == ord('q'):
        break

cv2.destroyAllWindows()
Ejemplo n.º 5
0
from facelib import special_draw
import cv2
from time import time

face_detector = FaceDetector(name='mobilenet',
                             weight_path='../Retinaface/weights/mobilenet.pth',
                             device='cuda')
age_gender_detector = AgeGenderEstimator(
    name='full', weight_path='weigths/ShufflenetFull.pth', device='cuda')

vid = cv2.VideoCapture(1)
vid.set(3, 1280)
vid.set(4, 720)
while True:
    ret, frame = vid.read()
    faces, boxes, scores, landmarks = face_detector.detect_align(frame)
    if len(faces.shape) > 1:
        tic = time()
        genders, ages = age_gender_detector.detect(faces)
        print(time() - tic)
        for i, b in enumerate(boxes):
            special_draw(frame,
                         b,
                         landmarks[i],
                         name=genders[i] + ' ' + str(ages[i]))

    cv2.imshow('frame', frame)
    if cv2.waitKey(1) == ord('q'):
        break

cv2.destroyAllWindows()
Ejemplo n.º 6
0
from facelib import FaceDetector

detector = FaceDetector(name='mobilenet',
                        weight_path='weights/mobilenet.pth',
                        device='cuda',
                        face_size=(224, 224))

cap = cv2.VideoCapture(1)
cap.set(3, 1280)
cap.set(4, 720)
while True:
    _, frame = cap.read()
    tic = time()
    # boxes, scores, landmarks = detector.detect_faces(frame)
    faces, boxes, scores, landmarks = detector.detect_align(frame)
    print('forward time: ', time() - tic)
    if len(faces.shape) > 1:
        for idx, bbox in enumerate(boxes):
            special_draw(frame,
                         bbox,
                         landmarks[idx],
                         name='face',
                         score=scores[idx])

        cv2.imshow('frame', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

cap.release()
cv2.destroyAllWindows()