Ejemplo n.º 1
0
    def get_biggest_face(self, gray_image, emotion_offsets):
        detected_faces, score, idx = self.detect_faces(gray_image)
        size = 0
        coord = [0, 0, 0, 0]
        # 选出最大的人脸
        for detected_face in detected_faces:
            face_coordinates = make_face_coordinates(detected_face)
            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            new_size = abs((x1 - x2) * (y1 - y2))
            if new_size > size:
                coord[0] = x1
                coord[1] = x2
                coord[2] = y1
                coord[3] = y2
                size = new_size

        return gray_image[coord[2]:coord[3], coord[0]:coord[1]], coord
gender_window = []
emotion_window = []

# starting video streaming
cv2.namedWindow('window_frame')
video_capture = cv2.VideoCapture(0)
while True:

    bgr_image = video_capture.read()[1]
    gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
    rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
    detected_faces, score, idx = detect_faces(face_detection, gray_image)

    for detected_face in detected_faces:

        face_coordinates = make_face_coordinates(detected_face)

        x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
        rgb_face = rgb_image[y1:y2, x1:x2]

        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]
        try:
            rgb_face = cv2.resize(rgb_face, (gender_target_size))
            gray_face = cv2.resize(gray_face, (emotion_target_size))
        except:
            continue
        gray_face = preprocess_input(gray_face, False)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))