Beispiel #1
0
    def update_buffer(self):

        detection_model_path = './Engine/trained_models/detection_models/haarcascade_frontalface_default.xml'
        face_detection = load_detection_model(detection_model_path)
        # hyper-parameters for bounding boxes shape
        frame_window = 10
        emotion_offsets = (20, 40)
        emotion_labels = get_labels('fer2013')

        # starting lists for calculating modes
        emotion_window = []

        while True:

            if self.VideoStreamer.more():

                bgr_image = self.VideoStreamer.read()

                # sleep(1/30)

                # bgr_image = trim_frame(bgr_image)
                gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
                rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)

                faces = detect_faces(face_detection, gray_image)
                # print(str(faces))

                for face_coordinates in faces:

                    x1, x2, y1, y2 = apply_offsets(face_coordinates,
                                                   emotion_offsets)
                    gray_face = gray_image[y1:y2, x1:x2]
                    try:
                        gray_face = cv2.resize(gray_face,
                                               (self.emotion_target_size))
                    except:
                        continue

                    gray_face = preprocess_input(gray_face, True)
                    gray_face = np.expand_dims(gray_face, 0)
                    gray_face = np.expand_dims(gray_face, -1)

                    with self.graph.as_default():
                        emotion_prediction = self.emotion_classifier.predict(
                            gray_face)
                        # print(str(emotion_prediction))

                    # angry = emotion_prediction[0][0]
                    # disgust = emotion_prediction[0][1]
                    # fear = emotion_prediction[0][2]
                    # happy = emotion_prediction[0][3]
                    # sad = emotion_prediction[0][4]
                    # surprise = emotion_prediction[0][5]
                    # neutral = emotion_prediction[0][6]

                    # with open('../emotion.txt', 'a') as f:
                    #     f.write('{},{},{},{},{},{},{}\n'.format(angry, disgust, fear, happy, sad, surprise, neutral))

                    emotion_probability = np.max(emotion_prediction)
                    emotion_label_arg = np.argmax(emotion_prediction)
                    emotion_text = emotion_labels[emotion_label_arg]
                    emotion_window.append(emotion_text)

                    if len(emotion_window) > frame_window:
                        emotion_window.pop(1)
                    try:
                        emotion_mode = mode(emotion_window)
                    except:
                        continue

                    if emotion_text == 'angry':
                        color = emotion_probability * np.asarray((255, 0, 0))
                    elif emotion_text == 'sad':
                        color = emotion_probability * np.asarray((0, 0, 255))
                    elif emotion_text == 'happy':
                        color = emotion_probability * np.asarray((255, 255, 0))
                    elif emotion_text == 'surprise':
                        color = emotion_probability * np.asarray((0, 255, 255))
                    else:
                        color = emotion_probability * np.asarray((0, 255, 0))

                    color = color.astype(int)
                    color = color.tolist()

                    draw_bounding_box(face_coordinates, rgb_image, color)
                    draw_text(face_coordinates, rgb_image, emotion_mode, color,
                              0, -45, 1, 1)

                    bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)

                    if not self.Q.full():
                        self.Q.put(bgr_image)
                    else:
                        continue
            else:
                continue
            continue

        if emotion_text == 'angry':
            color = emotion_probability * np.asarray((186,18,0))
        elif emotion_text == 'disgust':
            color = emotion_probability * np.asarray((126,105,88))
        elif emotion_text == 'fear':
            color = emotion_probability * np.asarray((255, 255, 255))
        elif emotion_text == 'happy':
            color = emotion_probability * np.asarray((255,186,8))
        elif emotion_text == 'sad':
            color = emotion_probability * np.asarray((45,114,143))
        elif emotion_text == 'surprise':
            color = emotion_probability * np.asarray((136,73,143))
        elif emotion_text == 'neutral':
            color = emotion_probability * np.asarray((83,221,108))

        color = color.astype(int)
        color = color.tolist()

        draw_bounding_box(face_coordinates, rgb_image, color)
        draw_text(face_coordinates, rgb_image, emotion_mode,
                  color, 0, -45, 1, 1)


    bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
    cv2.imshow('window_frame', bgr_image)

    if cv2.waitKey(1) & 0xFF == ord('q'):
        print("end")
        break
    def update_buffer(self):

        detection_model_path = './Engine/trained_models/detection_models/haarcascade_frontalface_default.xml'
        face_detection = load_detection_model(detection_model_path)
        # hyper-parameters for bounding boxes shape
        frame_window = 10
        emotion_offsets = (20, 40)
        emotion_labels = get_labels('fer2013')

        # starting lists for calculating modes
        emotion_window = []

        cv2.namedWindow('image', cv2.WINDOW_NORMAL)

        while True:

            if self.VStreamer.more():

                bgr_image = self.VStreamer.read()

                # sleep(1/30)

                # bgr_image = trim_frame(bgr_image)
                gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)

                rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)

                faces = detect_faces(face_detection, gray_image)
                # print(str(faces))

                for face_coordinates in faces:

                    x1, x2, y1, y2 = apply_offsets(face_coordinates,
                                                   emotion_offsets)
                    gray_face = gray_image[y1:y2, x1:x2]
                    try:
                        gray_face = cv2.resize(gray_face,
                                               (self.emotion_target_size))
                    except:
                        continue

                    gray_face = preprocess_input(gray_face, True)
                    gray_face = np.expand_dims(gray_face, 0)
                    gray_face = np.expand_dims(gray_face, -1)

                    with self.graph.as_default():
                        emotion_prediction = self.emotion_classifier.predict(
                            gray_face)

                    percentage = np.amax(emotion_prediction)
                    emotion_label_arg = np.argmax(emotion_prediction)
                    emotion_text = emotion_labels[emotion_label_arg]

                    draw_bounding_box(face_coordinates, rgb_image,
                                      percentage * np.asarray((83, 221, 108)))

                    cv2.imshow('image', rgb_image)
                    cv2.waitKey(1)

                    if not self.Q.full():

                        self.Q.put([emotion_text, percentage])

                    else:
                        continue

            else:
                continue