def process_image(image):

    try:
        # parameters for loading data and images
        detection_model_path = './trained_models/detection_models/haarcascade_frontalface_default.xml'
        emotion_model_path = './trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
        emotion_labels = get_labels('fer2013')

        # hyper-parameters for bounding boxes shape
        emotion_offsets = (0, 0)

        # loading models
        face_detection = load_detection_model(detection_model_path)
        emotion_classifier = load_model(emotion_model_path, compile=False)

        # getting input model shapes for inference
        emotion_target_size = emotion_classifier.input_shape[1:3]

        # loading images
        image_array = np.fromstring(image, np.uint8)
        unchanged_image = cv2.imdecode(image_array, cv2.IMREAD_UNCHANGED)
        gray_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2GRAY)

        faces = detect_faces(face_detection, gray_image)
        detected_emotions = []
        for face_coordinates in faces:
            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]

            try:
                gray_face = cv2.resize(gray_face, emotion_target_size)
            except:
                continue

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
            detected_emotions.append({
                "coordinates": [str(y1), str(x2), str(y2), str(x1)],
                "emotion": emotion_labels[emotion_label_arg]})

        return detected_emotions

    except Exception as err:
        logging.error('Error in emotion gender processor: "{0}"'.format(err))
コード例 #2
0
def index():

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)
    # gender_classifier = load_model(gender_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]
    # gender_target_size = gender_classifier.input_shape[1:3]

    print('Request-form', list(request.form.keys()), file=sys.stderr)

    # load and prepare the photograph

    image_string = request.form['image']

    image = Image.open(BytesIO(base64.b64decode(image_string)))

    #Save input file...
    image.save("Input.jpg", format="JPEG")

    rgb_image = np.array(image)

    # loading images
    # rgb_image = load_image(image_path, grayscale=False)
    gray_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2GRAY)
    # gray_image = load_image(image_path, grayscale=True)
    gray_image = np.squeeze(gray_image)
    # gray_image = gray_image.astype('uint8')

    faces = detect_faces(face_detection, gray_image)

    print("Num of faces detected =", len(faces), file=sys.stderr)

    #Default response..
    response = "Error: Found nothing! Try Again"

    if len(faces) == 0:
        response = "No Face Detected! Try Again\nTip: Keep face aligned with Camera"

    #single face detected
    elif len(faces) == 1:
        face_coordinates = faces[0]

        # x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
        # rgb_face = rgb_image[y1:y2, x1:x2]

        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]

        # try:
        # rgb_face = cv2.resize(rgb_face, (gender_target_size))

        gray_face = cv2.resize(gray_face, (emotion_target_size))

        # except:
        #     continue

        # rgb_face = preprocess_input(rgb_face, False)
        # rgb_face = np.expand_dims(rgb_face, 0)

        # gender_prediction = gender_classifier.predict(rgb_face)
        # gender_label_arg = np.argmax(gender_prediction)
        # gender_text = gender_labels[gender_label_arg]

        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
        emotion_text = emotion_labels[emotion_label_arg]

        face = FaceCV(depth=16, width=8)

        result = face.detect_face(rgb_image, gray_image)

        if result:
            gender, age = result
            response = "It's an unknown {0} {1} year old {2}.".format(
                emotion_text, age, gender)

        else:
            response = "No Face Detected! Try Again\nTip: Keep face aligned with Camera"
        # if gender_text == gender_labels[0]:
        #     color = (0, 0, 255)
        # else:
        #     color = (255, 0, 0)

        # draw_bounding_box(face_coordinates, rgb_image, color)
        # draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20, 1, 2)
        # draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50, 1, 2)

    #More than 1 face detected..
    else:
        response = "More than 1 faces detected..\nTry 1 face at a time for best results!"

    # for face_coordinates in faces:
    #     x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
    #     rgb_face = rgb_image[y1:y2, x1:x2]

    #     x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
    #     gray_face = gray_image[y1:y2, x1:x2]

    #     try:
    #         rgb_face = cv2.resize(rgb_face, (gender_target_size))
    #         gray_face = cv2.resize(gray_face, (emotion_target_size))
    #     except:
    #         continue

    #     rgb_face = preprocess_input(rgb_face, False)
    #     rgb_face = np.expand_dims(rgb_face, 0)
    #     gender_prediction = gender_classifier.predict(rgb_face)
    #     gender_label_arg = np.argmax(gender_prediction)
    #     gender_text = gender_labels[gender_label_arg]

    #     gray_face = preprocess_input(gray_face, True)
    #     gray_face = np.expand_dims(gray_face, 0)
    #     gray_face = np.expand_dims(gray_face, -1)
    #     emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
    #     emotion_text = emotion_labels[emotion_label_arg]

    #     if gender_text == gender_labels[0]:
    #         color = (0, 0, 255)
    #     else:
    #         color = (255, 0, 0)

    #     draw_bounding_box(face_coordinates, rgb_image, color)
    #     draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20, 1, 2)
    #     draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50, 1, 2)

    # # bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)

    # result_image = Image.fromarray(rgb_image)

    # #print('rotated_image.shape = ',input_array.shape)

    # #result_image.save('output.jpg',format='JPEG')

    # #convert image back to string..
    # buffered = BytesIO()
    # result_image.save(buffered, format="JPEG")
    # final_img_str = base64.b64encode(buffered.getvalue())

    # # cv2.imwrite('../images/predicted_test_image.png', bgr_image)

    # return final_img_str

    print(response, file=sys.stderr)

    K.clear_session()

    return response
コード例 #3
0
emotion_offsets = (0, 0)

# loading models
face_detection = load_detection_model()
emotion_classifier = load_model(emotion_model_path, compile=False)

# getting input model shapes for inference
emotion_target_size = emotion_classifier.input_shape[1:3]

# loading images
rgb_image = load_image(image_path, grayscale=False)
gray_image = load_image(image_path, grayscale=True)
gray_image = np.squeeze(gray_image)
gray_image = gray_image.astype('uint8')

detected_faces, score, idx = detect_faces(face_detection, gray_image)

for detected_face in detected_faces:

    face_coordinates = make_face_coordinates(detected_face)

    x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
    gray_face = gray_image[y1:y2, x1:x2]

    try:
        gray_face = cv2.resize(gray_face, (emotion_target_size))
    except:
        continue

    gray_face = preprocess_input(gray_face, True)
    gray_face = np.expand_dims(gray_face, 0)
コード例 #4
0
rgb_image = load_image(image_path, color_mode='rgb')
gray_image = load_image(image_path, color_mode='grayscale')
if isgray:
    # gray_image = rgb2gray(rgb_image)
    gray_image = np.squeeze(gray_image)
    gray_image = gray_image.astype('uint8')
    face_image = gray_image
else:
    rgb_image = np.squeeze(rgb_image).astype('uint8')
    face_image = rgb_image

emotion_offsets = (0, 0)

# face = gray_image

faces = detect_faces(face_detection, face_image)
face_crop = face_image
if len(faces) != 0:
    face = faces[0]
    x1, x2, y1, y2 = apply_offsets(face, emotion_offsets)
    face_crop = face_image[y1:y2, x1:x2]

face_image = cv2.resize(face_crop, emotion_target_size)
face_image = process_img(face_image)
face_image = np.expand_dims(face_image, 0)
if isgray:
    face_image = np.expand_dims(face_image, -1)

emotion_values = emotion_classifier.predict(face_image)
emotion_label_arg = np.argmax(emotion_values)
emotion_text = emotion_labels[int(emotion_label_arg)]
コード例 #5
0
def test():
    if request.method == 'POST':
        # convert string of image data to uint8
        nparr = np.fromstring(request.data, np.uint8)
        # decode image
        img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
        # write the image in the folder
        # cv2.imwrite('image.jpeg', img)

        face_detection = load_detection_model(detection_model_path)
        emotion_classifier = load_model(emotion_model_path, compile=False)
        gender_classifier = load_model(gender_model_path, compile=False)

        emotion_target_size = emotion_classifier.input_shape[1:3]
        gender_target_size = gender_classifier.input_shape[1:3]

        gender_window = []
        emotion_window = []
        faces = []
        # bgr_image = cv2.imread('image.jpeg')
        bgr_image = img
        gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
        rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)

        detectFaces = detect_faces(face_detection, gray_image)

        # print('Total Face:', len(detectFaces))#comment it out
        for face_coordinates in detectFaces:
            x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
            # print(face_coordinates)
            rgb_face = rgb_image[y1:y2, x1:x2]
            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]
            rgb_face = cv2.resize(rgb_face, (gender_target_size))
            gray_face = cv2.resize(gray_face, (emotion_target_size))

            gray_face = preprocess_input(gray_face, False)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            try:
                emotion_label_arg = np.argmax(
                    emotion_classifier.predict(gray_face))
                emotion_text = emotion_labels[emotion_label_arg]
            except ValueError:
                emotion_text = "undefined"

            rgb_face = np.expand_dims(rgb_face, 0)
            rgb_face = preprocess_input(rgb_face, False)
            try:
                gender_prediction = gender_classifier.predict(rgb_face)
                gender_label_arg = np.argmax(gender_prediction)
                gender_text = gender_labels[gender_label_arg]
            except ValueError:
                gender_text = "undefined"

            face = dict()
            face['faceRectangle'] = {
                "top": str(face_coordinates[0]),
                "left": str(face_coordinates[1]),
                "width": str(face_coordinates[2]),
                "height": str(face_coordinates[3])
            }
            face['emotion'] = emotion_text
            face['gender'] = gender_text
            faces.append(face)

            faces_pickled = json.dumps(faces)
            coordinates = json.dumps(face['faceRectangle'])

        # encode response using jsonpickle
        if len(faces) > 0:
            response_pickled = jsonpickle.encode(faces)
            # return faces
            return Response(response=faces_pickled, status=200, mimetype="application/json")
        else:
            message = {'message': 'Please use another image'}
            message_pickled = jsonpickle.encode(message)
            return Response(response=message_pickled, status=200, mimetype="application/json")
コード例 #6
0
# hyper-parameters for bounding boxes shape
emotion_offsets = (0, 0)

# loading models
emotion_classifier = load_model(emotion_model_path, compile=False)

# getting input model shapes for inference
emotion_target_size = emotion_classifier.input_shape[1:3]

# loading images
rgb_image = load_image(image_path, grayscale=False)
gray_image = load_image(image_path, grayscale=True)
gray_image = np.squeeze(gray_image)
gray_image = gray_image.astype('uint8')

faces = detect_faces(gray_image)

for face_coordinates in faces:

    x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
    gray_face = gray_image[y1:y2, x1:x2]
    try:
        gray_face = cv2.resize(gray_face, (emotion_target_size))
    except:
        continue

    gray_face = preprocess_input(gray_face, True)
    gray_face = np.expand_dims(gray_face, 0)
    gray_face = np.expand_dims(gray_face, -1)
    emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
    emotion_text = emotion_labels[emotion_label_arg]
コード例 #7
0
def generateResults(rgb_image, runNumber):

    # parameters for loading data and images
    #image_path = path
    detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
    emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
    gender_model_path = '../trained_models/gender_models/simple_CNN.81-0.96.hdf5'
    emotion_labels = get_labels('fer2013')
    gender_labels = get_labels('imdb')
    font = cv2.FONT_HERSHEY_SIMPLEX

    # hyper-parameters for bounding boxes shape
    gender_offsets = (30, 60)
    gender_offsets = (10, 10)
    emotion_offsets = (20, 40)
    emotion_offsets = (0, 0)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)
    gender_classifier = load_model(gender_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]
    gender_target_size = gender_classifier.input_shape[1:3]

    # loading images
    #rgb_image = load_image(image_path, grayscale=False)
    #gray_image = load_image(image_path, grayscale=True)
    gray_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2GRAY)
    gray_image = np.squeeze(gray_image)
    gray_image = gray_image.astype('uint8')

    result = []

    faces = detect_faces(face_detection, gray_image)
    for face_coordinates in faces:
        x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
        rgb_face = rgb_image[y1:y2, x1:x2]

        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]

        try:
            rgb_face = cv2.resize(rgb_face, (gender_target_size))
            gray_face = cv2.resize(gray_face, (emotion_target_size))
        except:
            continue

        rgb_face = preprocess_input(rgb_face, False)
        rgb_face = np.expand_dims(rgb_face, 0)

        gender_prediction = gender_classifier.predict(rgb_face)
        gender_label_arg = np.argmax(gender_prediction)
        gender_text = gender_labels[gender_label_arg]

        print(gender_labels)
        print(gender_prediction)
        print(gender_label_arg)
        print(emotion_labels)


        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
        emotion_text = emotion_labels[emotion_label_arg]

        print(emotion_label_arg)
        result.append((gender_label_arg, emotion_label_arg))

        if gender_text == gender_labels[0]:
            color = (0, 0, 255)
        else:
            color = (255, 0, 0)

        draw_bounding_box(face_coordinates, rgb_image, color)
        draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20, 1, 2)
        draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50, 1, 2)

    bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
    cv2.imwrite('../images/predicted_test_image' + str(runNumber) + '.png' , bgr_image)
    cv2.imwrite('../images/source_test_image' + str(runNumber) + '.png' , rgb_image)
    
    return result


#generateResults(sys.argv[1])
コード例 #8
0
def main():
    cpt = 0
    RESEND_LIMIT = 15  # about half a second lol

    context = zmq.Context()
    socket = context.socket(zmq.REQ)
    socket.connect('tcp://127.0.0.1:5555')

    # parameters for loading data and images
    detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
    emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
    emotion_labels = get_labels('fer2013')

    # hyper-parameters for bounding boxes shape
    frame_window = 10
    emotion_offsets = (20, 40)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]

    # starting lists for calculating modes
    emotion_window = []

    # starting video streaming
    cv2.namedWindow('window_frame')
    video_capture = cv2.VideoCapture(0)
    while True:
        bgr_image = video_capture.read()[1]
        gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
        rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
        faces = detect_faces(face_detection, gray_image)

        for face_coordinates in faces:
            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]
            try:
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_prediction = emotion_classifier.predict(gray_face)
            emotion_probability = np.max(emotion_prediction)
            emotion_label_arg = np.argmax(emotion_prediction)
            emotion_text = emotion_labels[emotion_label_arg]
            emotion_window.append(emotion_text)

            if len(emotion_window) > frame_window:
                emotion_window.pop(0)
            try:
                emotion_mode = mode(emotion_window)
            except:
                continue

            cpt += 1
            if (cpt > RESEND_LIMIT):
                socket.send_string(emotion_text)
                msg = socket.recv()
                cpt = 0

            if emotion_text == 'angry':
                color = emotion_probability * np.asarray((255, 0, 0))
            elif emotion_text == 'sad':
                color = emotion_probability * np.asarray((0, 0, 255))
            elif emotion_text == 'happy':
                color = emotion_probability * np.asarray((255, 255, 0))
            elif emotion_text == 'surprise':
                color = emotion_probability * np.asarray((0, 255, 255))
            else:
                color = emotion_probability * np.asarray((0, 255, 0))

            color = color.astype(int)
            color = color.tolist()

            draw_bounding_box(face_coordinates, rgb_image, color)
            draw_text(face_coordinates, rgb_image, emotion_mode, color, 0, -45,
                      1, 1)

        bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
        cv2.imshow('window_frame', bgr_image)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    video_capture.release()
    cv2.destroyAllWindows()
コード例 #9
0
def main(argv):
    # parameters for loading data and images
    detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
    emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
    gender_model_path = '../trained_models/gender_models/simple_CNN.81-0.96.hdf5'
    emotion_labels = get_labels('fer2013')
    gender_labels = get_labels('imdb')
    font = cv2.FONT_HERSHEY_SIMPLEX

    # hyper-parameters for bounding boxes shape
    frame_window = 10
    gender_offsets = (30, 60)
    emotion_offsets = (20, 40)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)
    gender_classifier = load_model(gender_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]
    gender_target_size = gender_classifier.input_shape[1:3]

    # starting lists for calculating modes
    gender_window = []
    emotion_window = []

    # starting video streaming
    cv2.namedWindow('window_frame')
    video_capture = cv2.VideoCapture(0)

    try:
        opts, args = getopt.getopt(argv, "hs:", ["subject="])
    except getopt.GetoptError:
        print("emotion.py -s <subject>")
        sys.exit(2)

    subject = Subject()
    for opt, arg in opts:
        if opt == '-h':
            print("emotion.py - s <subject>")
            sys.exit()
        elif opt in ("-s", "--subject"):
            subject.name = arg

    print(subject.name)

    subject.start()

    while True:

        bgr_image = video_capture.read()[1]
        gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
        rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
        faces = detect_faces(face_detection, gray_image)

        for face_coordinates in faces:

            x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
            rgb_face = rgb_image[y1:y2, x1:x2]

            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]
            try:
                rgb_face = cv2.resize(rgb_face, (gender_target_size))
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue
            gray_face = preprocess_input(gray_face, False)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_label_arg = np.argmax(
                emotion_classifier.predict(gray_face))
            emotion_text = emotion_labels[emotion_label_arg]
            emotion_window.append(emotion_text)

            rgb_face = np.expand_dims(rgb_face, 0)
            rgb_face = preprocess_input(rgb_face, False)
            gender_prediction = gender_classifier.predict(rgb_face)
            gender_label_arg = np.argmax(gender_prediction)
            gender_text = gender_labels[gender_label_arg]
            gender_window.append(gender_text)

            subject.addMood(emotion_text, gender_text)

            if len(gender_window) > frame_window:
                emotion_window.pop(0)
                gender_window.pop(0)
            try:
                emotion_mode = mode(emotion_window)
                gender_mode = mode(gender_window)
            except:
                continue

            if gender_text == gender_labels[0]:
                color = (0, 0, 255)
            else:
                color = (255, 0, 0)

            draw_bounding_box(face_coordinates, rgb_image, color)
            draw_text(face_coordinates, rgb_image, gender_mode, color, 0, -20,
                      1, 1)
            draw_text(face_coordinates, rgb_image, emotion_mode, color, 0, -45,
                      1, 1)

        bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
        cv2.imshow('window_frame', bgr_image)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    subject.stop()
コード例 #10
0
    def main_thread(self):

        if self.data_bridge.processing_chosen_by_radio_butten == 'img':
            flag=0
            detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
            emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
            gender_model_path = '../trained_models/gender_models/simple_CNN.81-0.96.hdf5'
            emotion_labels = get_labels('fer2013')
            gender_labels = get_labels('imdb')
            face_detection = load_detection_model(detection_model_path)
            emotion_classifier = load_model(emotion_model_path, compile=False)
            gender_classifier = load_model(gender_model_path, compile=False)
            emotion_target_size = emotion_classifier.input_shape[1:3]
            gender_target_size = gender_classifier.input_shape[1:3]

            while self.data_bridge.start_process_manager and flag==0:
                flag=1
                image_path = self.data_bridge.selected_video_file_path
                font = cv2.FONT_HERSHEY_SIMPLEX

                # hyper-parameters for bounding boxes shape
                gender_offsets = (30, 60)
                gender_offsets = (10, 10)
                emotion_offsets = (20, 40)
                emotion_offsets = (0, 0)

                rgb_image = load_image(image_path, grayscale=False)
                gray_image = load_image(image_path, grayscale=True)
                gray_image = np.squeeze(gray_image)
                gray_image = gray_image.astype('uint8')

                faces = detect_faces(face_detection, gray_image)
                for face_coordinates in faces:
                    x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
                    rgb_face = rgb_image[y1:y2, x1:x2]
                    x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
                    gray_face = gray_image[y1:y2, x1:x2]
                    try:
                        rgb_face = cv2.resize(rgb_face, (gender_target_size))
                        gray_face = cv2.resize(gray_face, (emotion_target_size))
                    except:
                        continue
                    rgb_face = preprocess_input(rgb_face, False)
                    rgb_face = np.expand_dims(rgb_face, 0)
                    gender_prediction = gender_classifier.predict(rgb_face)
                    gender_label_arg = np.argmax(gender_prediction)
                    gender_text = gender_labels[gender_label_arg]
                    gray_face = preprocess_input(gray_face, True)
                    gray_face = np.expand_dims(gray_face, 0)
                    gray_face = np.expand_dims(gray_face, -1)
                    emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
                    emotion_text = emotion_labels[emotion_label_arg]
                    if gender_text == gender_labels[0]:
                        color = (0, 0, 255)
                    else:
                        color = (255, 0, 0)
                    draw_bounding_box(face_coordinates, rgb_image, color)
                    draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20, 1, 2)
                    draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50, 1, 2)


                bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
                cv2.imwrite('../images/predicted_test_image.png', bgr_image)

                print("File has been stored in Images folder")
                print("Press stop processing to exit")
                self.gui_root.update()


        if( (self.data_bridge.processing_chosen_by_radio_butten == 'vid') or (self.data_bridge.processing_chosen_by_radio_butten=='web')):
            detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
            emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
            gender_model_path = '../trained_models/gender_models/simple_CNN.81-0.96.hdf5'
            emotion_labels = get_labels('fer2013')
            gender_labels = get_labels('imdb')
            # Models
            face_detection = load_detection_model(detection_model_path)
            emotion_classifier = load_model(emotion_model_path, compile=False)
            gender_classifier = load_model(gender_model_path, compile=False)
            emotion_target_size = emotion_classifier.input_shape[1:3]
            gender_target_size = gender_classifier.input_shape[1:3]

            while self.data_bridge.start_process_manager:
                font = cv2.FONT_HERSHEY_SIMPLEX
                frame_window = 10
                gender_offsets = (30, 60)
                emotion_offsets = (20, 40)
                gender_window = []
                emotion_window = []
                # starting video streaming
                cv2.namedWindow('Window_frame')
                if self.data_bridge.processing_chosen_by_radio_butten=='vid':
                    self.cap=cv2.VideoCapture(self.data_bridge.selected_video_file_path)
                else:
                    self.cap = cv2.VideoCapture(0)
                fourcc = cv2.VideoWriter_fourcc(*'XVID')
                out = cv2.VideoWriter('Save.avi', fourcc, 20.0, (720, 480))
                while self.data_bridge.start_process_manager:
                    ret, bgr_image = self.cap.read()
                    gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
                    rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
                    faces = detect_faces(face_detection, gray_image)

                    for face_coordinates in faces:

                        x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
                        rgb_face = rgb_image[y1:y2, x1:x2]

                        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
                        gray_face = gray_image[y1:y2, x1:x2]
                        try:
                            rgb_face = cv2.resize(rgb_face, (gender_target_size))
                            gray_face = cv2.resize(gray_face, (emotion_target_size))
                        except:
                            continue
                        gray_face = preprocess_input(gray_face, False)
                        gray_face = np.expand_dims(gray_face, 0)
                        gray_face = np.expand_dims(gray_face, -1)
                        emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
                        emotion_text = emotion_labels[emotion_label_arg]
                        emotion_window.append(emotion_text)

                        rgb_face = np.expand_dims(rgb_face, 0)
                        rgb_face = preprocess_input(rgb_face, False)
                        gender_prediction = gender_classifier.predict(rgb_face)
                        gender_label_arg = np.argmax(gender_prediction)
                        gender_text = gender_labels[gender_label_arg]
                        gender_window.append(gender_text)

                        if len(gender_window) > frame_window:
                            emotion_window.pop(0)
                            gender_window.pop(0)
                        try:
                            emotion_mode = mode(emotion_window)
                            gender_mode = mode(gender_window)
                        except:
                            continue

                        if gender_text == gender_labels[0]:
                            color = (0, 0, 0)
                        else:
                            color = (0, 0, 0)

                        draw_bounding_box(face_coordinates, rgb_image, color)
                        draw_text(face_coordinates, rgb_image, gender_mode,
                                  color, 0, -20, 1, 1)
                        draw_text(face_coordinates, rgb_image, emotion_mode,
                                  color, 0, -45, 1, 1)

                    bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
                    cv2.imshow('Window_frame', bgr_image)
                    self.gui_root.update()
                self.cap.release()
                cv2.destroyAllWindows()
コード例 #11
0
def func(video_path):
    # file to store metadata
    metaData = open(
        'C:/Users/ASUS/Desktop/Face Recognition/trial1/Face Detection and Emotion Analysis/src/final1.csv',
        'a')
    writer = csv.writer(metaData)

    # parameters for loading data and images
    detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
    emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
    emotion_labels = get_labels('fer2013')

    # hyper-parameters for bounding boxes shape
    frame_window = 10
    emotion_offsets = (20, 40)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]

    # starting lists for calculating modes
    emotion_window = []

    toc = time.time()
    # starting video streaming
    cv2.namedWindow('window_frame')
    #video_capture = cv2.VideoCapture(sys.argv[1])
    video_capture = cv2.VideoCapture(video_path)
    #video_capture = cv2.VideoCapture('videoplayback.mp4')

    while True:
        bgr_image = video_capture.read()[1]
        gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
        rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
        faces = detect_faces(face_detection, gray_image)

        frame_count = int(video_capture.get(cv2.CAP_PROP_POS_FRAMES))

        tic = time.time()

        for face_coordinates in faces:

            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]
            try:
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            actor_face = cv2.resize(gray_face, (128, 128))
            cv2.imwrite(
                "E:/tensorflow-master/tensorflow/examples/image_retraining/face.jpg",
                actor_face)

            video_capture.set(1, int(frame_count))
            ret, frame = video_capture.read()
            cv2.imwrite(
                "E:/Object Detection/models-master/tutorials/image/imagenet/object.jpg",
                gray_image)

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_prediction = emotion_classifier.predict(gray_face)
            emotion_probability = np.max(emotion_prediction)
            emotion_label_arg = np.argmax(emotion_prediction)
            emotion_text = emotion_labels[emotion_label_arg]
            emotion_window.append(emotion_text)

            s2_out = subprocess.check_output([
                sys.executable,
                "E:/tensorflow-master/tensorflow/examples/label_image/label_image.py",
                "--graph=E:/tmp/output_graph.pb",
                "--labels=E:/tmp/output_labels.txt", "--input_layer=Mul",
                "--output_layer=final_result", "--input_mean=128",
                "--input_std=128",
                "--image=E:/tensorflow-master/tensorflow/examples/image_retraining/face.jpg"
            ])
            actor_confidence = s2_out.split()[1]
            if (float(actor_confidence) > 0.5):
                actor = s2_out.split()[0]
            else:
                actor = ""

            print(s2_out)

            s3_out = subprocess.check_output([
                sys.executable,
                "E:/Object Detection/models-master/tutorials/image/imagenet/classify_image.py",
                "--image_file=E:/Object Detection/models-master/tutorials/image/imagenet/object.jpg"
            ])
            object1 = s3_out.split()[0]
            print(s3_out)

            writer.writerows([[(tic - toc), frame_count, emotion_text,
                               emotion_probability, actor, actor_confidence,
                               face_coordinates, object1]])

            if len(emotion_window) > frame_window:
                emotion_window.pop(0)
            try:
                emotion_mode = mode(emotion_window)
            except:
                continue

            if emotion_text == 'angry':
                color = emotion_probability * np.asarray((255, 0, 0))
            elif emotion_text == 'sad':
                color = emotion_probability * np.asarray((0, 0, 255))
            elif emotion_text == 'happy':
                color = emotion_probability * np.asarray((255, 255, 0))
            elif emotion_text == 'surprise':
                color = emotion_probability * np.asarray((0, 255, 255))
            else:
                color = emotion_probability * np.asarray((0, 255, 0))

            color = color.astype(int)
            color = color.tolist()

            draw_bounding_box(face_coordinates, rgb_image, color)
            draw_text(face_coordinates, rgb_image, emotion_mode, color, 0, -20,
                      1, 1)
            draw_text(face_coordinates, rgb_image, actor, color, 0, -45, 1, 1)

        bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
        cv2.imshow('window_frame', bgr_image)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
コード例 #12
0
def main():

    protoFile = "C:\\Users\\asus\\Documents\\major_project\\Project\\openpose-master\\openpose-master\\models\\pose\\coco\\pose_deploy_linevec.prototxt"
    weightsFile = "C:\\Users\\asus\\Documents\\major_project\\Project\\openpose-master\\openpose-master\\models\\pose\\coco\\pose_iter_440000.caffemodel"
    nPoints = 18
    # COCO Output Format
    keypointsMapping = [
        'Nose', 'Neck', 'R-Sho', 'R-Elb', 'R-Wr', 'L-Sho', 'L-Elb', 'L-Wr',
        'R-Hip', 'R-Knee', 'R-Ank', 'L-Hip', 'L-Knee', 'L-Ank', 'R-Eye',
        'L-Eye', 'R-Ear', 'L-Ear'
    ]

    POSE_PAIRS = [[1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1,
                                                                   8], [8, 9],
                  [9, 10], [1, 11], [11, 12], [12, 13], [1, 0], [0, 14],
                  [14, 16], [0, 15], [15, 17], [2, 17], [5, 16]]

    # index of pafs correspoding to the POSE_PAIRS
    # e.g for POSE_PAIR(1,2), the PAFs are located at indices (31,32) of output, Similarly, (1,5) -> (39,40) and so on.
    mapIdx = [[31, 32], [39, 40], [33, 34], [35, 36], [41, 42], [43, 44],
              [19, 20], [21, 22], [23, 24], [25, 26], [27, 28], [29, 30],
              [47, 48], [49, 50], [53, 54], [51, 52], [55, 56], [37, 38],
              [45, 46]]

    colors = [[0, 100, 255], [0, 100, 255], [0, 255, 255], [0, 100, 255],
              [0, 255, 255], [0, 100, 255], [0, 255, 0], [255, 200, 100],
              [255, 0, 255], [0, 255, 0], [255, 200, 100], [255, 0, 255],
              [0, 0, 255], [255, 0, 0], [200, 200, 0], [255, 0, 0],
              [200, 200, 0], [0, 0, 0]]

    detection_model_path = 'C:\\Users\\asus\\Documents\\major_project\\Project\\face_classification-master\\face_classification-master\\trained_models\\detection_models\\haarcascade_frontalface_default.xml'
    emotion_model_path = 'C:\\Users\\asus\\Documents\\major_project\\Project\\face_classification-master\\face_classification-master\\trained_models\\emotion_models\\fer2013_mini_XCEPTION.102-0.66.hdf5'
    emotion_labels = get_labels('fer2013')
    font = cv2.FONT_HERSHEY_SIMPLEX
    # hyper-parameters for bounding boxes shape
    emotion_offsets = (20, 40)
    emotion_offsets = (0, 0)
    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)
    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]

    while True:
        poll()
        print(image_queue)
        # parameters for loading data and images
        image_name = image_queue.get()[0]
        print(image_name)
        image_path = get_image(image_name)  #sys.argv[1]

        # loading images
        rgb_image = load_image(image_path, grayscale=False)
        gray_image = load_image(image_path, grayscale=True)
        gray_image = np.squeeze(gray_image)
        gray_image = gray_image.astype('uint8')

        faces = detect_faces(face_detection, gray_image)
        cat_count = {
            'angry': 0,
            'disgust': 0,
            'fear': 0,
            'happy': 0,
            'sad': 0,
            'surprise': 0,
            'neutral': 0
        }
        total_count = 0
        for face_coordinates in faces:
            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]

            try:
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_label_arg = np.argmax(
                emotion_classifier.predict(gray_face))
            emotion_text = emotion_labels[emotion_label_arg]
            cat_count[emotion_text] = cat_count[emotion_text] + 1
            total_count = total_count + 1
            color = (255, 0, 0)
            draw_bounding_box(face_coordinates, rgb_image, color)
            draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50,
                      1, 2)
        cv2.imwrite("../pose_images/" + image_name, rgb_image)
        upload_file("../pose_images/" + image_name,
                    'major-project-processed-images', image_name)

        connection = pymysql.connect(host,
                                     user=user,
                                     port=port,
                                     passwd=password,
                                     db=dbname)
        cursor = connection.cursor()
        cursor.execute(
            '''INSERT INTO `expressions`(`name`, `happy`, `angry`, `sad`, `suprised`, `fear`,`neutral`,`disgust`,`total`)
                        VALUES (%s,
                                %s,
                                %s,
                                %s,
                                %s,
                                %s,
                                %s,
                                %s,
                                %s)''',
            (image_name, cat_count['happy'], cat_count['angry'],
             cat_count['sad'], cat_count['surprise'], cat_count['fear'],
             cat_count['neutral'], cat_count['disgust'], total_count))
        cursor.execute("commit")
        #bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
        #cv2.imwrite('../images/predicted_test_image.png', bgr_image)

        #pose estimation code........
        image1 = cv2.imread(image_path)
        frameWidth = image1.shape[1]
        frameHeight = image1.shape[0]
        #t = time.time()
        net = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)

        # Fix the input Height and get the width according to the Aspect Ratio
        inHeight = 368
        inWidth = int((inHeight / frameHeight) * frameWidth)

        inpBlob = cv2.dnn.blobFromImage(image1,
                                        1.0 / 255, (inWidth, inHeight),
                                        (0, 0, 0),
                                        swapRB=False,
                                        crop=False)

        net.setInput(inpBlob)
        output = net.forward()
        #print("Time Taken in forward pass = {}".format(time.time() - t))

        detected_keypoints = []
        keypoints_list = np.zeros((0, 3))
        keypoint_id = 0
        threshold = 0.1
        keypoint_location = {}
        for part in range(nPoints):
            probMap = output[0, part, :, :]
            probMap = cv2.resize(probMap, (image1.shape[1], image1.shape[0]))
            keypoints = getKeypoints(probMap, threshold)
            print("Keypoints - {} : {}".format(keypointsMapping[part],
                                               keypoints))
            keypoints_with_id = []
            for i in range(len(keypoints)):
                keypoints_with_id.append(keypoints[i] + (keypoint_id, ))
                keypoints_list = np.vstack([keypoints_list, keypoints[i]])
                keypoint_location[keypoint_id] = keypoints[i]
                keypoint_id += 1
            detected_keypoints.append(keypoints_with_id)

        frameClone = image1.copy()
        for i in range(nPoints):
            for j in range(len(detected_keypoints[i])):
                cv2.circle(frameClone, detected_keypoints[i][j][0:2], 5,
                           colors[i], -1, cv2.LINE_AA)
                #cv2.imshow("Keypoints",frameClone)

        valid_pairs, invalid_pairs = getValidPairs(output, frameWidth,
                                                   frameHeight, mapIdx,
                                                   detected_keypoints,
                                                   POSE_PAIRS)
        personwiseKeypoints = getPersonwiseKeypoints(valid_pairs,
                                                     invalid_pairs,
                                                     keypoints_list, mapIdx,
                                                     POSE_PAIRS)
        for i in range(17):
            for n in range(len(personwiseKeypoints)):
                index = personwiseKeypoints[n][np.array(POSE_PAIRS[i])]
                if -1 in index:
                    continue
                B = np.int32(keypoints_list[index.astype(int), 0])
                A = np.int32(keypoints_list[index.astype(int), 1])
                cv2.line(frameClone, (B[0], A[0]), (B[1], A[1]), colors[i], 3,
                         cv2.LINE_AA)
        cv2.imwrite("../pose_images/image0.jpg", frameClone)
        class_status = classStatus(personwiseKeypoints, keypoint_location)
        print(class_status)
        cursor.execute(
            '''INSERT INTO `gestures`(`name`,`left_turned`, `right_turned`, `back_turned`, `raised_hands`, `total`)
                        VALUES (%s,
                                %s,
                                %s,
                                %s,
                                %s,
                                %s
                                )''',
            (image_name, class_status['turnedleft'],
             class_status['turnedright'], class_status['turnedback'],
             class_status['raisedhands'], class_status['classtotal']))
        cursor.execute("commit")
        #end of pose estimation code......................
        cursor.execute(
            ''' UPDATE `images` SET `isprocessed`=1 WHERE `name`=%s''',
            (image_name))
        cursor.execute("commit")

        cursor.close()
        connection.close()
コード例 #13
0
def fun(in_path, out_video_path, out_info_path, in_finished_path, model_path,
        video_resolution):
    """
     >>>  fun(/fer_input, /fer_output, /fer_result, /fer_finished, /fer_model, video_resolution)
    .mp4 files in the fer_intput folder will move to fer_finished folder.
    Processed .mp4 files will be saved in fer_output folder.
    .csv files will be saved in fer_result folder.
    only process the video that its resolution is 720p and above(video_resolution = 720, can be adjusted)
    """
    global model, F
    detect_emo = True

    #save config
    save_video = True
    save_info = True

    show_video = False

    #config = tf.ConfigProto()
    # config.gpu_options.per_process_gpu_memory_fraction = 0.7
    # config.gpu_options.allow_growth = True
    # session = InteractiveSession(config=config)
    #%%
    # parameters for loading data and images
    detection_model_path = model_path + '/haarcascade_frontalface_default.xml'
    if detect_emo:
        emotion_model_path = model_path + '/googlenet__googlenetwei__2020Aug29_16.21'
        emotion_labels = get_labels('fer2013')
        emotion_offsets = (20, 40)
        # loading models
        model = getattr(model, 'googlenet')
        model = model(in_channels=3, num_classes=7)
        #print(torch.cuda.is_available())
        #print(torch.cuda.device_count())
        state = torch.load(emotion_model_path, map_location='cpu')
        model.load_state_dict(state['net'])

        #model.cuda()
        model.eval()
        # getting input model shapes for inference
        emotion_target_size = (224, 224)
        # starting lists for calculating modes
        emotion_window = []

    # hyper-parameters for bounding boxes shape
    frame_window = 10
    emotion_offsets = (20, 40)

    # loading models
    face_detection = load_detection_model(detection_model_path)

    info_name = [
        'time', 'frame', 'face_x', 'face_y', 'face_w', 'face_h', 'emotion',
        'angry_prob', 'disgust_prob', 'fear_prob', 'happy_prob', 'sad_prob',
        'surprise_prob', 'neutral_prob'
    ]

    input_video_root = in_path
    output_video_root = out_video_path
    output_info_root = out_info_path
    for video_path in glob.glob(input_video_root + '/**/*.mp4',
                                recursive=True):
        print(video_path)
        no_root_path = video_path[len(input_video_root):].replace(
            video_path.split('/')[-1], '')
        video_capture = cv2.VideoCapture(video_path)
        video_cap_ori = video_capture
        video_name = video_path.split('/')[-1].split('.mp4')[0]
        ori_video_name = video_path.split('/')[-1]

        fps_float = video_capture.get(cv2.CAP_PROP_FPS)
        fps = round(video_capture.get(cv2.CAP_PROP_FPS))
        size = (round(video_capture.get(3)), round(video_capture.get(4))
                )  # float
        ori_size = size
        reduce_resolution = 0
        scaling_factor_x = 1
        scaling_factor_y = 1
        if video_resolution == "720p" and size[0] > 1280 and size[1] > 720:
            #need to reduce resolution to 720p
            reduce_resolution = 1
            out_path = input_video_root + no_root_path + 'resize_to_720p_' + video_path.split(
                '/')[-1]
            fourcc = cv2.VideoWriter_fourcc(*'MP4V')
            out = cv2.VideoWriter(out_path, fourcc, fps, (1280, 720))
            while True:
                ret, frame = video_capture.read()
                if ret == True:
                    b = cv2.resize(frame, (1280, 720),
                                   fx=0,
                                   fy=0,
                                   interpolation=cv2.INTER_CUBIC)
                    out.write(b)
                else:
                    break
            video_capture.release()
            out.release()

            scaling_factor_x = size[0] / 1280
            scaling_factor_y = size[1] / 720

            #original resolution video move to fer_finished dir
            src = video_path
            dst = in_finished_path + no_root_path + video_name + ".mp4"
            os.makedirs(os.path.dirname(in_finished_path + no_root_path),
                        exist_ok=True)
            shutil.move(src, dst)

            #capture ori resolution video to draw bounding box
            video_cap_ori = cv2.VideoCapture(dst)

            #capture reducing resolution video to construct csv file
            video_path = out_path
            video_capture = cv2.VideoCapture(video_path)
            video_name = video_path.split('/')[-1].split('.mp4')[0]
            fps_float = video_capture.get(cv2.CAP_PROP_FPS)
            fps = round(video_capture.get(cv2.CAP_PROP_FPS))
            size = (round(video_capture.get(3)), round(video_capture.get(4))
                    )  # float

        if size[0] == 1280 and size[1] == 720:
            if save_video:
                os.makedirs(os.path.dirname(output_video_root + no_root_path),
                            exist_ok=True)
                out_path = output_video_root + no_root_path + ori_video_name
                fourcc = cv2.VideoWriter_fourcc(*'MP4V')
                out = cv2.VideoWriter(out_path, fourcc, fps, ori_size)
            if save_info:
                os.makedirs(os.path.dirname(output_info_root + no_root_path),
                            exist_ok=True)
                csv_info = codecs.open(output_info_root + no_root_path +
                                       video_name + '_info.csv',
                                       'w',
                                       encoding="utf_8_sig")
                csv_writer = csv.writer(csv_info)
                csv_writer.writerow(info_name)

            frame_idx = 0
            st_time = time.time()
            while (video_cap_ori.isOpened()):
                if frame_idx % 10 == 0:
                    print('Processing frame: ' + str(frame_idx) + ' ......')

                video_flag_ori, bgr_image_ori = video_cap_ori.read(
                )  #ori image
                video_flag, bgr_image = video_capture.read()  #downscale image

                if video_flag_ori:
                    frame_idx += 1
                    gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
                    #rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)

                    gray_image_ori = cv2.cvtColor(bgr_image_ori,
                                                  cv2.COLOR_BGR2GRAY)
                    rgb_image_ori = cv2.cvtColor(bgr_image_ori,
                                                 cv2.COLOR_BGR2RGB)

                    faces = detect_faces(face_detection, gray_image)
                    if not isinstance(faces, tuple):
                        faces = faces[faces[:, 0].argsort()]
                        faces = faces[faces[:, 1].argsort()]
                        faces = faces[faces[:, 2].argsort()]
                        faces = faces[faces[:, 3].argsort()]

                    for face_coordinates in faces:
                        x_1, x_2, y_1, y_2 = apply_offsets(
                            face_coordinates, emotion_offsets)

                        if detect_emo:
                            gray_face = gray_image[y_1:y_2, x_1:x_2]
                            try:
                                gray_face = cv2.resize(gray_face,
                                                       (emotion_target_size))
                            except:
                                continue

                            gray_face = np.dstack([gray_face] * 3)

                            gray_face = transforms.Compose([
                                transforms.ToPILImage(),
                                transforms.ToTensor(),
                            ])(np.uint8(gray_face))

                            gray_face = torch.stack([gray_face], 0)
                            #gray_face = gray_face.cuda(non_blocking=True)
                            outputs = model(gray_face).cpu()
                            outputs = F.softmax(outputs, 1)

                            emotion_prediction = torch.sum(outputs, 0).cpu(
                            ).detach().numpy()  # outputs.shape [tta_size, 7]
                            emotion_probability = np.max(emotion_prediction)
                            emotion_label_arg = np.argmax(emotion_prediction)
                            emotion_text = emotion_labels[emotion_label_arg]
                            emotion_window.append(emotion_text)

                            if len(emotion_window) > frame_window:
                                emotion_window.pop(0)
                            try:
                                emotion_mode = mode(emotion_window)
                            except:
                                continue
                            x = int(
                                float(face_coordinates[0] * scaling_factor_x))
                            y = int(
                                float(face_coordinates[1] * scaling_factor_y))
                            w = int(
                                float(face_coordinates[2] * scaling_factor_x))
                            h = int(
                                float(face_coordinates[3] * scaling_factor_y))
                            if emotion_text == 'angry':
                                cv2.rectangle(bgr_image_ori, (x, y),
                                              (x + w, y + h), (255, 0, 0), 4)
                                cv2.putText(bgr_image_ori, 'angry', (int(
                                    float(x + w / 2 - 43)), y - 10),
                                            cv2.FONT_HERSHEY_DUPLEX, 1,
                                            (255, 0, 0), 1, cv2.LINE_AA)
                            elif emotion_text == 'sad':
                                cv2.rectangle(bgr_image_ori, (x, y),
                                              (x + w, y + h), (0, 0, 255), 4)
                                cv2.putText(bgr_image_ori, 'sad', (int(
                                    float(x + w / 2 - 43)), y - 10),
                                            cv2.FONT_HERSHEY_DUPLEX, 1,
                                            (0, 0, 255), 1, cv2.LINE_AA)
                            elif emotion_text == 'happy':
                                cv2.rectangle(bgr_image_ori, (x, y),
                                              (x + w, y + h), (255, 255, 0), 4)
                                cv2.putText(bgr_image_ori, 'happy', (int(
                                    float(x + w / 2 - 43)), y - 10),
                                            cv2.FONT_HERSHEY_DUPLEX, 1,
                                            (255, 255, 0), 1, cv2.LINE_AA)
                            elif emotion_text == 'surprise':
                                cv2.rectangle(bgr_image_ori, (x, y),
                                              (x + w, y + h), (0, 255, 255), 4)
                                cv2.putText(bgr_image_ori, 'surprise', (int(
                                    float(x + w / 2 - 43)), y - 10),
                                            cv2.FONT_HERSHEY_DUPLEX, 1,
                                            (0, 255, 255), 1, cv2.LINE_AA)
                            elif emotion_text == 'disgust':
                                cv2.rectangle(bgr_image_ori, (x, y),
                                              (x + w, y + h), (0, 0, 0), 4)
                                cv2.putText(bgr_image_ori, 'disgust', (int(
                                    float(x + w / 2 - 43)), y - 10),
                                            cv2.FONT_HERSHEY_DUPLEX, 1,
                                            (0, 0, 0), 1, cv2.LINE_AA)
                            elif emotion_text == 'fear':
                                cv2.rectangle(bgr_image_ori, (x, y),
                                              (x + w, y + h), (255, 0, 255), 4)
                                cv2.putText(bgr_image_ori, 'fear', (int(
                                    float(x + w / 2 - 43)), y - 10),
                                            cv2.FONT_HERSHEY_DUPLEX, 1,
                                            (255, 0, 255), 1, cv2.LINE_AA)
                            else:
                                cv2.rectangle(bgr_image_ori, (x, y),
                                              (x + w, y + h), (0, 255, 0), 4)
                                cv2.putText(bgr_image_ori, 'neutral', (int(
                                    float(x + w / 2 - 43)), y - 10),
                                            cv2.FONT_HERSHEY_DUPLEX, 1,
                                            (0, 255, 0), 1, cv2.LINE_AA)

                        if not detect_emo:
                            color = np.asarray((0, 0, 0))
                            color = color.astype(int)
                            color = color.tolist()
                            draw_bounding_box(face_coordinates, rgb_image,
                                              color)

                        if save_info:
                            op_info_list = [
                                round(frame_idx / fps_float, 3), frame_idx,
                                face_coordinates[0] * scaling_factor_x,
                                face_coordinates[1] * scaling_factor_y,
                                face_coordinates[2] * scaling_factor_x,
                                face_coordinates[3] * scaling_factor_y
                            ]
                            for i in range(len(op_info_list)):
                                op_info_list[i] = str(op_info_list[i])
                            if detect_emo:
                                op_info_list.append(emotion_text)
                                for prob in emotion_prediction:
                                    op_info_list.append(prob)
                            csv_writer.writerow(op_info_list)

                    #bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)

                    if save_video:
                        out.write(bgr_image_ori)
                    if show_video:
                        cv2.imshow('window_frame', bgr_image)
                        if cv2.waitKey(1) & 0xFF == ord('q'):
                            break
                else:
                    break
            if save_video:
                out.release()
            if save_info:
                csv_info.close()
            print(video_path + ' DONE!!\tSpend Time: ' +
                  str(time.time() - st_time) + '(s)')
            video_capture.release()
            video_cap_ori.release()
            if show_video:
                cv2.destroyAllWindows()

        else:
            os.makedirs(os.path.dirname(output_info_root + no_root_path),
                        exist_ok=True)
            csv_info = codecs.open(output_info_root + no_root_path +
                                   video_name + '_info.csv',
                                   'w',
                                   encoding="utf_8_sig")
            csv_writer = csv.writer(csv_info)
            err_msg = "The resolution of " + video_name + ".mp4 is lower than 720p."
            csv_writer.writerow([err_msg])
            csv_info.close()

        src = video_path
        dst = in_finished_path + no_root_path + video_name + ".mp4"
        os.makedirs(os.path.dirname(in_finished_path + no_root_path),
                    exist_ok=True)
        shutil.move(src, dst)
        if reduce_resolution == 1:
            video_ori_name = video_name[15:]
            csv_path_rename = output_info_root + no_root_path + video_name + '_info.csv'
            os.remove(dst)
            os.rename(
                output_info_root + no_root_path + video_name + '_info.csv',
                output_info_root + no_root_path + video_ori_name + '_info.csv')

    shutil.rmtree(input_video_root, ignore_errors=True)
コード例 #14
0
def emotion_analyse(path):
    print('Emotion Analysing...........................')
    with graph.as_default():
        emotion_analyse_data = {'angry': 0, 'sad': 0, 'happy': 0, 'surprise': 0, 'fear': 0, 'angry': 0}

        # video_capture = cv2.VideoCapture(path)
        vs = VideoStream(path).start()

        # length = int(vs.get(cv2.CAP_PROP_FRAME_COUNT))

        while True:

            # bgr_image = video_capture.read()[1]
            bgr_image = vs.read()
            if bgr_image is None:
                break

            gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
            rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
            faces = detect_faces(face_detection, gray_image)

            for face_coordinates in faces:

                x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
                gray_face = gray_image[y1:y2, x1:x2]
                try:
                    gray_face = cv2.resize(gray_face, (emotion_target_size))
                except:
                    continue

                gray_face = preprocess_input(gray_face, True)
                gray_face = np.expand_dims(gray_face, 0)
                gray_face = np.expand_dims(gray_face, -1)
                emotion_prediction = emotion_classifier.predict(gray_face)
                emotion_probability = np.max(emotion_prediction)
                emotion_label_arg = np.argmax(emotion_prediction)
                emotion_text = emotion_labels[emotion_label_arg]
                emotion_window.append(emotion_text)

                if len(emotion_window) > frame_window:
                    emotion_window.pop(0)
                try:
                    emotion_mode = mode(emotion_window)
                except:
                    continue

                if emotion_text == 'angry':
                    color = emotion_probability * np.asarray((255, 0, 0))
                    emotion_analyse_data['angry'] = emotion_analyse_data.pop('angry') + 1
                elif emotion_text == 'sad':
                    color = emotion_probability * np.asarray((0, 0, 255))
                    emotion_analyse_data['sad'] = emotion_analyse_data.pop('sad') + 1
                elif emotion_text == 'happy':
                    color = emotion_probability * np.asarray((255, 255, 0))
                    emotion_analyse_data['happy'] = emotion_analyse_data.pop('happy') + 1
                elif emotion_text == 'surprise':
                    color = emotion_probability * np.asarray((0, 255, 255))
                    emotion_analyse_data['surprise'] = emotion_analyse_data.pop('surprise') + 1
                else:
                    color = emotion_probability * np.asarray((0, 255, 0))
                    emotion_analyse_data['fear'] = emotion_analyse_data.pop('fear') + 1

                color = color.astype(int)
                color = color.tolist()
                # print(emotion_text)

                draw_bounding_box(face_coordinates, rgb_image, color)
                draw_text(face_coordinates, rgb_image, emotion_mode,
                          color, 0, -45, 1, 1)

        print('Emotion Analysed...........................')

        return emotion_analyse_data
コード例 #15
0
def main():
    parser = argparse.ArgumentParser(
        description='Emotion detection with demo video file',
        prog='video_playback_emotion_demo.py')
    parser.add_argument("-f",
                        "--file",
                        type=str,
                        help='video file',
                        default='../w251demo/afew_train_angry.avi')
    args = parser.parse_args()
    video_file = args.file

    cv2.namedWindow('window_frame')
    video_capture = cv2.VideoCapture(video_file)
    frame_count = 0
    while (video_capture.isOpened()):
        bgr_image = video_capture.read()[1]
        gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
        rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
        faces = detect_faces(face_detection, gray_image)

        for face_coordinates in faces:

            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]
            try:
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_prediction = emotion_classifier.predict(gray_face)
            emotion_probability = np.max(emotion_prediction)
            emotion_label_arg = np.argmax(emotion_prediction)
            emotion_text = emotion_labels[emotion_label_arg]
            emotion_window.append(emotion_text)

            if len(emotion_window) > frame_window:
                emotion_window.pop(0)
            try:
                emotion_mode = mode(emotion_window)
            except:
                continue

            if emotion_text == 'angry':
                color = emotion_probability * np.asarray((255, 0, 0))
            elif emotion_text == 'sad':
                color = emotion_probability * np.asarray((0, 0, 255))
            elif emotion_text == 'happy':
                color = emotion_probability * np.asarray((255, 255, 0))
            elif emotion_text == 'surprise':
                color = emotion_probability * np.asarray((0, 255, 255))
            else:
                color = emotion_probability * np.asarray((0, 255, 0))

            color = color.astype(int)
            color = color.tolist()

            draw_bounding_box(face_coordinates, rgb_image, color)
            draw_text(face_coordinates, rgb_image, emotion_mode, color, 0, -45,
                      1, 1)

            print(
                "*********************** START PUBLISHING ***********************"
            )
            # Create JSON string
            str_ts = str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
            str_classify = "{video_file: %s, frame_number: %s, emotion_probability: %s, emotion_text: %s, timestamp: %s}" % (
                video_file, frame_count, emotion_probability, emotion_text,
                str_ts)
            print("Classification String: ", str_classify)
            # Publish Image to MQTT
            encode_im = cv2.imencode(".jpg", rgb_image)[1].tostring()
            encode64 = base64.b64encode(encode_im)
            #publish.single(topic="test_ap_mqtt", payload=encode64, hostname="localhost")
            publish.single(topic="fc_img_mqtt",
                           payload=encode64,
                           hostname="50.23.173.22")
            publish.single(topic="fc_json_mqtt",
                           payload=str_classify,
                           hostname="50.23.173.22")
            frame_count += 1
            print(
                "*********************** END PUBLISHING ***********************"
            )

        bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
        cv2.imshow('window_frame', bgr_image)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
コード例 #16
0
def process_image(image):

    with graph.as_default():

        # Raw data
        #arr = []

        try:
            # loading images
            image_array = np.fromstring(image, np.uint8)
            unchanged_image = cv2.imdecode(image_array, cv2.IMREAD_UNCHANGED)

            #rgb_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2RGB)
            gray_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2GRAY)

            faces = detect_faces(face_detection, gray_image)
            for face_coordinates in faces:
                #x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
                #rgb_face = rgb_image[y1:y2, x1:x2]

                x1, x2, y1, y2 = apply_offsets(face_coordinates,
                                               emotion_offsets)
                gray_face = gray_image[y1:y2, x1:x2]

                try:
                    #rgb_face = cv2.resize(rgb_face, (gender_target_size))
                    gray_face = cv2.resize(gray_face, (emotion_target_size))
                except:
                    continue

                #rgb_face = preprocess_input(rgb_face, False)
                #rgb_face = np.expand_dims(rgb_face, 0)
                #gender_prediction = gender_classifier.predict(rgb_face)
                #gender_label_arg = np.argmax(gender_prediction)
                #gender_text = gender_labels[gender_label_arg]

                gray_face = preprocess_input(gray_face, True)
                gray_face = np.expand_dims(gray_face, 0)
                gray_face = np.expand_dims(gray_face, -1)

                # Return raw data
                #arr.append(gender_prediction)
                #return emotion_classifier.predict(gray_face)
                return emotion_labels[np.argmax(
                    emotion_classifier.predict(gray_face))]
                continue

                emotion_label_arg = np.argmax(
                    emotion_classifier.predict(gray_face))
                emotion_text = emotion_labels[emotion_label_arg]

                if gender_text == gender_labels[0]:
                    color = (0, 0, 255)
                else:
                    color = (255, 0, 0)

                draw_bounding_box(face_coordinates, rgb_image, color)
                draw_text(face_coordinates, rgb_image, gender_text, color, 0,
                          -20, 1, 2)
                draw_text(face_coordinates, rgb_image, emotion_text, color, 0,
                          -50, 1, 2)
        except Exception as err:
            logging.error(
                'Error in emotion gender processor: "{0}"'.format(err))

        # Return array of raw data
        #return arr
        return 'no face'

        bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)

        dirname = 'result'
        if not os.path.exists(dirname):
            os.mkdir(dirname)

        cv2.imwrite(os.path.join(dirname, 'predicted_image.png'), bgr_image)
def process_image(image):

    try:
        # parameters for loading data and images
        detection_model_path = './trained_models/detection_models/haarcascade_frontalface_default.xml'
        emotion_model_path = './trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
        gender_model_path = './trained_models/gender_models/simple_CNN.81-0.96.hdf5'
        emotion_labels = get_labels('fer2013')
        gender_labels = get_labels('imdb')
        font = cv2.FONT_HERSHEY_SIMPLEX

        # hyper-parameters for bounding boxes shape
        gender_offsets = (30, 60)
        gender_offsets = (10, 10)
        emotion_offsets = (20, 40)
        emotion_offsets = (0, 0)

        # loading models
        face_detection = load_detection_model(detection_model_path)
        emotion_classifier = load_model(emotion_model_path, compile=False)
        gender_classifier = load_model(gender_model_path, compile=False)

        # getting input model shapes for inference
        emotion_target_size = emotion_classifier.input_shape[1:3]
        gender_target_size = gender_classifier.input_shape[1:3]

        # loading images
        image_array = np.fromstring(image, np.uint8)
        unchanged_image = cv2.imdecode(image_array, cv2.IMREAD_UNCHANGED)

        rgb_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2RGB)
        gray_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2GRAY)

        faces = detect_faces(face_detection, gray_image)
        for face_coordinates in faces:
            x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
            rgb_face = rgb_image[y1:y2, x1:x2]

            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]

            try:
                rgb_face = cv2.resize(rgb_face, (gender_target_size))
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            rgb_face = preprocess_input(rgb_face, False)
            rgb_face = np.expand_dims(rgb_face, 0)
            gender_prediction = gender_classifier.predict(rgb_face)
            gender_label_arg = np.argmax(gender_prediction)
            gender_text = gender_labels[gender_label_arg]

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
            emotion_text = emotion_labels[emotion_label_arg]

            if gender_text == gender_labels[0]:
                color = (0, 0, 255)
            else:
                color = (255, 0, 0)

            draw_bounding_box(face_coordinates, rgb_image, color)
            draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20, 1, 2)
            draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50, 1, 2)
    except Exception as err:
        logging.error('Error in emotion gender processor: "{0}"'.format(err))

    bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)

    dirname = 'result'
    if not os.path.exists(dirname):
        os.mkdir(dirname)

    cv2.imwrite(os.path.join(dirname, 'predicted_image.png'), bgr_image)
コード例 #18
0
def emotion_demo():
    # parameters for loading data and images
    detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
    emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
    emotion_labels = get_labels('fer2013')

    # 目のカスケードファイル追加
    lefteyecc__path = "../trained_models/detection_models/haarcascade_lefteye_2splits.xml"
    righteyecc_path = "../trained_models/detection_models/haarcascade_righteye_2splits.xml"
    nose_path = "../trained_models/detection_models/data_haarcascades_haarcascade_mcs_nose.xml"
    lefteyecc = cv2.CascadeClassifier(lefteyecc__path)
    righteyecc = cv2.CascadeClassifier(righteyecc_path)
    nose = cv2.CascadeClassifier(nose_path)
    lex = 0
    ley = 0
    lew = 0
    leh = 0
    rex = 0
    rey = 0
    rew = 0
    reh = 0
    nox = 0
    noy = 0
    now = 0
    noh = 0

    # dlib
    dlib_ini()

    # hyper-parameters for bounding boxes shape
    frame_window = 10
    emotion_offsets = (20, 40)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]

    # starting lists for calculating modes
    emotion_window = []

    global img, flag, slp_count
    img = cv2.imread('../img/happy.png')
    flag = 0
    slp_count = 0

    # dlib用グローバル変数
    global gray_image, rgb_image, gray_face, mark_list

    # starting video streaming
    cv2.namedWindow('window_frame', cv2.WINDOW_NORMAL)
    video_capture = cv2.VideoCapture(0)  # 0は内蔵カメラ, 1はUSBカメラ

    while True:
        bgr_image = video_capture.read()[1]
        gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
        rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGRA2RGBA)
        faces = detect_faces(face_detection, gray_image)

        try:
            for face_coordinates in faces:
                # 目や鼻認識用
                (x, y, w, h) = face_coordinates
                video_face = gray_image[y:y + h, x:x + w]
                x1, x2, y1, y2 = apply_offsets(face_coordinates,
                                               emotion_offsets)
                gray_face = gray_image[y1:y2, x1:x2]
                try:
                    gray_face = cv2.resize(gray_face, (emotion_target_size))
                except:
                    continue

                # ランドマーク検出
                marks_list = marks_list_def(bgr_image, x, y, w, h)
                # print(marks_list)

                gray_face = preprocess_input(gray_face, True)
                gray_face = np.expand_dims(gray_face, 0)
                gray_face = np.expand_dims(gray_face, -1)
                emotion_prediction = emotion_classifier.predict(gray_face)
                emotion_probability = np.max(emotion_prediction)
                emotion_label_arg = np.argmax(emotion_prediction)
                emotion_text = emotion_labels[emotion_label_arg]
                emotion_window.append(emotion_text)

                if len(emotion_window) > frame_window:
                    emotion_window.pop(0)
                try:
                    emotion_mode = mode(emotion_window)
                except:
                    continue

                if flag == 0 or flag == 1:
                    if emotion_text == 'angry':
                        img = cv2.imread('../img/angry.png', -1)
                        color = emotion_probability * np.asarray((255, 0, 0))
                    elif emotion_text == 'sad':
                        img = cv2.imread('../img/sad.png', -1)  # 関数にする
                        color = emotion_probability * np.asarray((0, 0, 255))
                    elif emotion_text == 'happy':
                        img = cv2.imread('../img/happy.png', -1)
                        color = emotion_probability * np.asarray((255, 255, 0))
                    elif emotion_text == 'surprise':
                        img = cv2.imread('../img/odoroki.png', -1)
                        color = emotion_probability * np.asarray((0, 255, 255))
                    else:
                        img = cv2.imread('../img/neutral.png', -1)
                        color = emotion_probability * np.asarray((0, 255, 0))
                else:
                    if emotion_text == 'angry':
                        img = cv2.imread('../img/ikari.png', -1)
                        color = emotion_probability * np.asarray((255, 0, 0))
                    elif emotion_text == 'sad':
                        img = cv2.imread('../img/shock.png', -1)
                        color = emotion_probability * np.asarray((0, 0, 255))
                    elif emotion_text == 'happy':
                        img = cv2.imread('../img/kirakira.png', -1)
                        color = emotion_probability * np.asarray((255, 255, 0))
                    elif emotion_text == 'surprise':
                        img = cv2.imread('../img/bikkuri.png', -1)
                        color = emotion_probability * np.asarray((0, 255, 255))
                    else:
                        img = cv2.imread('../img/toumei.png', -1)
                        color = emotion_probability * np.asarray((0, 255, 0))

                color = color.astype(int)
                color = color.tolist()

            if flag == 0:
                draw_bounding_box(face_coordinates, rgb_image, color)
            elif flag == 1:
                rgb_image = draw_bounding_box2(face_coordinates, rgb_image,
                                               color, img, marks_list)
            elif flag == 2:
                overlay_pic = draw_bounding_box3(face_coordinates, rgb_image,
                                                 color, img, marks_list)
                rgb_image = overlay_pic

            draw_text(face_coordinates, rgb_image, emotion_mode, color, 0, -45,
                      1, 1)
            bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
        except:
            flag = 0

        if flag == 0:
            img = image_resize(img)
            cv2.imshow('image', img)
            cv2.destroyWindow('window_frame')
        elif flag == 1 or flag == 2:
            cv2.destroyWindow('image')
            cv2.imshow('window_frame', bgr_image)
        cv2.waitKey(10)

        # cv2.imshow('own_window', bgr_image)

        if cv2.waitKey(1) & 0xFF == ord('z'):
            flag = 0
        elif cv2.waitKey(1) & 0xFF == ord('x'):
            flag = 1
        elif cv2.waitKey(1) & 0xFF == ord('c'):
            flag = 2
        elif cv2.waitKey(1) & 0xFF == ord('q'):
            break

    video_capture.release()
    cv2.destroyAllWindows()
  def callback(self,data):
    try:
      bgr_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
      cv2.imshow("Image window", bgr_image)
    except CvBridgeError as e:
      print(e)



######################### Start the image processing
    gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
    rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
    faces = detect_faces(self.face_detection, gray_image)

    for face_coordinates in faces:

        x1, x2, y1, y2 = apply_offsets(face_coordinates, self.emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]
	
        try:
            gray_face = cv2.resize(gray_face, (self.emotion_target_size))
        except:
            continue

        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        print "start"
        print len(gray_face)
        
        print gray_face
        print "end"

        emotion_prediction = self.emotion_classifier.predict(gray_face)
        emotion_probability = np.max(emotion_prediction)
        emotion_label_arg = np.argmax(emotion_prediction)
        emotion_text = emotion_labels[emotion_label_arg]	
	print emotion_text
	print(emotion_probability)
	print('%')
        self.emotion_window.append(emotion_text)

	


        if len(self.emotion_window) > self.frame_window:
            self.emotion_window.pop(0)
        try:
            emotion_mode = mode(self.emotion_window)
        except:
            continue

        if emotion_text == 'angry':
            color = emotion_probability * np.asarray((255, 0, 0))
        elif emotion_text == 'sad':
            color = emotion_probability * np.asarray((0, 0, 255))
        elif emotion_text == 'happy':
            color = emotion_probability * np.asarray((255, 255, 0))
        elif emotion_text == 'surprise':
            color = emotion_probability * np.asarray((0, 255, 255))
        else:
            color = emotion_probability * np.asarray((0, 255, 0))

        color = color.astype(int)
        color = color.tolist()

        draw_bounding_box(face_coordinates, rgb_image, color)
        draw_text(face_coordinates, rgb_image, emotion_mode,
                  color, 0, -45, 1, 1)

    bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
    
######################### End the image processing	



    cv2.imshow("Image window", bgr_image)
    cv2.waitKey(3)

    try:
      self.image_pub.publish(self.bridge.cv2_to_imgmsg(bgr_image, "bgr8"))
    except CvBridgeError as e:
      print(e)
コード例 #20
0
def process_image(image):

    try:


        # hyper-parameters for bounding boxes shape
        gender_offsets = (30, 60)
        gender_offsets = (10, 10)
        emotion_offsets = (20, 40)
        emotion_offsets = (0, 0)

        emotion_classifier = load_model(emotion_model_path, compile=False)
        gender_classifier = load_model(gender_model_path, compile=False)

        # getting input model shapes for inference
        emotion_target_size = emotion_classifier.input_shape[1:3]
        gender_target_size = gender_classifier.input_shape[1:3]

        # loading images
        image_array = np.fromstring(image, np.uint8)
        unchanged_image = cv2.imdecode(image_array, cv2.IMREAD_UNCHANGED)

        rgb_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2RGB)
        gray_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2GRAY)

        faces = detect_faces(face_detection, gray_image)

        features = []

        for face_coordinates in faces:
            x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
            rgb_face = rgb_image[y1:y2, x1:x2]

            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]

            try:
                rgb_face = cv2.resize(rgb_face, (gender_target_size))
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            rgb_face = preprocess_input(rgb_face, False)
            rgb_face = np.expand_dims(rgb_face, 0)
            gender_prediction = gender_classifier.predict(rgb_face)

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_prediction = emotion_classifier.predict(gray_face)

            features.append({'gender_prediction_female':str(gender_prediction[0][0]),
            'gender_prediction_male':str(gender_prediction[0][1]),
             'emotion_prediction0':str(emotion_prediction[0][0]),
             'emotion_prediction1':str(emotion_prediction[0][1]),
             'emotion_prediction2':str(emotion_prediction[0][2]),
             'emotion_prediction3':str(emotion_prediction[0][3]),
             'emotion_prediction4':str(emotion_prediction[0][4]),
             'emotion_prediction5':str(emotion_prediction[0][5]),
             'emotion_prediction6':str(emotion_prediction[0][6]),
             'x1':str(x1),
             'x2':str(x2),
             'y1':str(y1),
             'y2':str(y2) 
             })
    except Exception as err:
        logging.error('Error in emotion gender processor: "{0}"'.format(err))


    K.clear_session()
    return features
コード例 #21
0
    if value == 'PrivateTest':
        emotion = int(row['emotion'])
        labels_Pritest.append(emotion)

# garbage collection
gc.collect()

# loading images
test_img = np.zeros((len(labels_Pritest), 96, 96))
j = count = 0
show = []
for i in os.listdir(img_fold_path):
    test = np.squeeze(
        load_image(os.path.join(img_fold_path, i),
                   grayscale=True)).astype('uint8')
    faces = detect_faces(face_detection, test)
    for face_coordinates in faces:
        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
        test2 = test[x1:x2, y1:y2]

    try:
        test2 = transform.resize(test2, emotion_target_size)
    except:
        continue
    test2 = np.expand_dims(test2, 0)
    test2 = np.expand_dims(test2, -1)
    emotion_label_arg = np.argmax(emotion_classifier.predict(test2))
    if labels_Pritest[j] == emotion_label_arg:
        show.append(j)
        test_img[count] = test
        count += 1
コード例 #22
0
face_detection = load_detection_model(detection_model_path)
gender_classifier = load_model(gender_model_path, compile=False)

gender_target_size = gender_classifier.input_shape[1:3]

gender_window = []

cv2.namedWindow('window_frame')
video_capture = cv2.VideoCapture('x.MP4')
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('Save.avi', fourcc, 25.0, (1280, 720))
while True:

    bgr_image = video_capture.read()[1]
    rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
    faces = detect_faces(face_detection, bgr_image)  #gray_image)

    for face_coordinates in faces:

        x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
        rgb_face = rgb_image[y1:y2, x1:x2]
        try:
            rgb_face = cv2.resize(rgb_face, (gender_target_size))
        except:
            continue

        rgb_face = np.expand_dims(rgb_face, 0)
        rgb_face = preprocess_input(rgb_face, False)
        gender_prediction = gender_classifier.predict(rgb_face)
        gender_label_arg = np.argmax(gender_prediction)
        gender_text = gender_labels[gender_label_arg]
コード例 #23
0
def face_gender(inf, inf3):  #sleep 5ms

    #cv2.namedWindow('AI3',0)               #new window
    #cv2.resizeWindow('AI3', 800, 400);     #640*480
    cap = cv2.VideoCapture('1.avi')
    currentFrame = 0
    totalFrame = cap.get(7)
    print(totalFrame)

    # parameters for loading data and images
    detection_model_path = './trained_models/detection_models/haarcascade_frontalface_default.xml'
    emotion_model_path = './trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
    gender_model_path = './trained_models/gender_models/simple_CNN.81-0.96.hdf5'
    emotion_labels = get_labels('fer2013')
    gender_labels = get_labels('imdb')
    font = cv2.FONT_HERSHEY_SIMPLEX

    # hyper-parameters for bounding boxes shape
    frame_window = 10
    gender_offsets = (30, 60)
    emotion_offsets = (20, 40)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)
    gender_classifier = load_model(gender_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]
    gender_target_size = gender_classifier.input_shape[1:3]

    # starting lists for calculating modes
    gender_window = []
    emotion_window = []

    # starting video streaming
    #cv2.namedWindow('window_frame')
    video_capture = cv2.VideoCapture(0)
    runFlag = 0
    displayTimes = 0
    pictureNum = 0
    menNumber = 0
    womenNumber = 0
    showHairStatus = 0

    while 1:

        time.sleep(0.01)
        #cv2.imshow("AI", frame)  # show fps
        currentFrame += 1
        #print (currentFrame)
        if currentFrame >= (totalFrame - 1):
            currentFrame = 0
            cap.set(cv2.CAP_PROP_POS_FRAMES, 1)
        cv2.waitKey(1)

        bgr_image = video_capture.read()[1]
        gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
        rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
        faces = detect_faces(face_detection, gray_image)

        for face_coordinates in faces:

            x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
            rgb_face = rgb_image[y1:y2, x1:x2]

            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]
            try:
                rgb_face = cv2.resize(rgb_face, (gender_target_size))
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue
            gray_face = preprocess_input(gray_face, False)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_label_arg = np.argmax(
                emotion_classifier.predict(gray_face))
            emotion_text = emotion_labels[emotion_label_arg]
            emotion_window.append(emotion_text)

            rgb_face = np.expand_dims(rgb_face, 0)
            rgb_face = preprocess_input(rgb_face, False)
            gender_prediction = gender_classifier.predict(rgb_face)
            gender_label_arg = np.argmax(gender_prediction)
            gender_text = gender_labels[gender_label_arg]
            gender_window.append(gender_text)

            if len(gender_window) > frame_window:
                emotion_window.pop(0)
                gender_window.pop(0)
            try:
                emotion_mode = mode(emotion_window)
                gender_mode = mode(gender_window)
            except:
                continue

            if gender_text == gender_labels[0]:
                color = (0, 0, 255)
            else:
                color = (255, 0, 0)

            draw_bounding_box(face_coordinates, rgb_image, color)
            draw_text(face_coordinates, rgb_image, gender_mode, color, 0, -20,
                      1, 2)
            draw_text(face_coordinates, rgb_image, emotion_mode, color, 0, -45,
                      1, 2)
            #print (gender_mode)
            if gender_mode == 'man':
                menNumber += 1
            if gender_mode == 'woman':
                womenNumber += 1
            if menNumber > 10000:
                menNumber = 5000
            if womenNumber > 10000:
                womenNumber = 5000
        bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
        size = (int(400), int(400))
        if inf3.empty() == False:
            pictureNum = inf3.get()
            runFlag = 1
            print(pictureNum)
        if runFlag == 1:
            #			pictureNum += 1
            #			pictureNum < 100:
            if womenNumber > menNumber:
                if pictureNum == 1:
                    frame = cv2.imread('./woman/1.jpg')
                if pictureNum == 2:
                    frame = cv2.imread('./woman/2.jpg')
                if pictureNum == 3:
                    frame = cv2.imread('./woman/3.jpg')
                if pictureNum == 4:
                    frame = cv2.imread('./woman/4.jpg')
                if pictureNum == 5:
                    frame = cv2.imread('./woman/5.jpg')
                if pictureNum == 6:
                    frame = cv2.imread('./woman/6.jpg')
            else:
                if pictureNum == 1:
                    frame = cv2.imread('./man/1.jpg')
                if pictureNum == 2:
                    frame = cv2.imread('./man/2.jpg')
                if pictureNum == 3:
                    frame = cv2.imread('./man/3.jpg')
                if pictureNum == 4:
                    frame = cv2.imread('./man/4.jpg')
                if pictureNum == 5:
                    frame = cv2.imread('./man/5.jpg')
                if pictureNum == 6:
                    frame = cv2.imread('./man/6.jpg')
            #else:
            runFlag = 0
            pictureNum = 0
            womenNumber = 0
            menNumber = 0
            showHairStatus = 1
        if showHairStatus == 0:
            ret, frame = cap.read()  # get image
        if showHairStatus == 1:
            displayTimes += 1
            if displayTimes >= 150:
                displayTimes = 0
                showHairStatus = 0

        bgr_image = cv2.resize(bgr_image, size, interpolation=cv2.INTER_AREA)

        #if frame.empty() == False:
        frame = cv2.resize(frame, size, interpolation=cv2.INTER_AREA)
        htitch = np.hstack((bgr_image, frame))
        cv2.imshow('AI', htitch)
        #inf.put(htitch)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
コード例 #24
0
def exe_c(path):
    image_path = path                                  #!!!获取图片路径的赋值到image_path

    # parameters for loading data and images                    加载数据和图片的参数
        #sys.argv[1]
    img_i=1
    detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
    emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
    gender_model_path = '../trained_models/gender_models/simple_CNN.81-0.96.hdf5'
    emotion_labels = get_labels('fer2013')
    gender_labels = get_labels('imdb')
    font = cv2.FONT_HERSHEY_SIMPLEX

    # hyper-parameters for bounding boxes shape                     边框大小——的超参数
    #gender_offsets = (18, 25)
    gender_offsets = (10, 10)
    #emotion_offsets = (18, 25)
    emotion_offsets = (0, 0)

    # loading models                                                加载模型__face_detection,emotion_classifier,gender_classifier
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)
    gender_classifier = load_model(gender_model_path, compile=False)

    # getting input model shapes for inference                      获取输入模型的引用
    emotion_target_size = emotion_classifier.input_shape[1:3]
    gender_target_size = gender_classifier.input_shape[1:3]

    # loading images                                                 加载图片
    rgb_image = load_image(image_path, grayscale=False)
    gray_image = load_image(image_path, grayscale=True)
    gray_image = np.squeeze(gray_image)
    gray_image = gray_image.astype('uint8')

    faces = detect_faces(face_detection, gray_image)                 #检测脸(face_detection,灰度图像?)

    Jsons = []
    for inx, face_coordinates in enumerate(faces):                   #enumerate 循环获取出inx值
        x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
        rgb_face = rgb_image[y1:y2, x1:x2]

        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]

        try:
            rgb_face = cv2.resize(rgb_face, (gender_target_size))
            gray_face = cv2.resize(gray_face, (emotion_target_size))
        except:
            continue

        rgb_face = preprocess_input(rgb_face, False)
        rgb_face = np.expand_dims(rgb_face, 0)
        gender_prediction = gender_classifier.predict(rgb_face)
        gender_label_arg = np.argmax(gender_prediction)
        gender_text = gender_labels[gender_label_arg]

        #jsonp['gender_text'] = gender_text                           #添加jsonp的gender标签

        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
        emotion_text = emotion_labels[emotion_label_arg]

       # jsonp['emotion_text'] = emotion_text                        #添加jsonp的emotion标签

        if gender_text == gender_labels[0]:
            color = (0, 0, 255)
        else:
            color = (255, 0, 0)

        draw_bounding_box(face_coordinates, rgb_image, color)
        draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20, 1, 2)
        draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50, 1, 2)

        #以下是bound列表转换成字典的方法
        bound_key=['x','y','w','h']
        c_x,c_y,c_w,c_h=face_coordinates
        bound_xy=[str(c_x),str(c_y),str(c_w),str(c_h)]
        bound_box = dict(zip(bound_key, bound_xy))
        print(gender_text)

        Jsons.append({'person':inx,"gender":gender_text,"emotion":emotion_text,'bound_box':bound_box})     #循环添加jsonp字典到jsons列表
    bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
    img_i+=1
    cv2.imwrite('../images/upload/result/predict_img'+str(img_i)+'.png', bgr_image)             #将opencv2写出图像到指定路径

        #{'gender':gender_text,'emotion':emotion_text}
    return Jsons
コード例 #25
0
def recognition(f_2_s):

    tt = False  #이중 조건문을 탈출하기 위해서, goto문이 불가능하다 파이썬은..
    emotion_c = ""
    #recap == False
    # parameters for loading data and images
    #image_path = path_r
    image_path = image_handler(f_2_s)
    detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
    emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
    emotion_labels = get_labels('fer2013')
    font = cv2.FONT_HERSHEY_SIMPLEX

    # hyper-parameters for bounding boxes shape
    emotion_offsets = (20, 40)
    emotion_offsets = (0, 0)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]

    # loading images
    rgb_image = load_image(image_path, grayscale=False)
    ########################################################
    #rgb_image2 = load_image(image_path, grayscale=False)
    ########################################################
    gray_image = load_image(image_path, grayscale=True)
    gray_image = np.squeeze(gray_image)
    gray_image = gray_image.astype('uint8')

    faces = detect_faces(face_detection, gray_image)
    for face_coordinates in faces:

        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]

        try:
            gray_face = cv2.resize(gray_face, (emotion_target_size))
        except:
            #pyautogui.confirm(text='one more', title='test', buttons=['ok', 'exit'])
            #recap = True
            continue

        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
        emotion_text = ""
        emotion_text = emotion_labels[emotion_label_arg]

        #감정인식이 성공되면 감정 상태를 물어보고, 감정 확인 후 저장 or 탈출

        tof = pyautogui.confirm(text='Are you ' + emotion_text + '?',
                                title=emotion_text,
                                buttons=['yes', 'no'])
        if (tof == 'yes'):
            tt = True

            emotion_c = emotion_text

            color = (255, 0, 0)  # 감정 정보 글씨 빨간색, 사각형도

            #draw_bounding_box(face_coordinates, rgb_image, color)
            #draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -30, 1.5, 2)

        elif (tof == 'no'):
            tt = False
            break

        #color = (255, 0, 0) # 감정 정보 글씨 빨간색, 사각형도

        #draw_bounding_box(face_coordinates, rgb_image, color)
        #draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -30, 1.5, 2)

    if (tt == True):
        bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
        next_num = next_index(emotion_text)
        cv2.imwrite('../src/' + emotion_text + 'z' + str(next_num) + '.jpg',
                    bgr_image)  # 변수 활용 원래는 bgr_image
        f = open(emotion_text + 'z' + str(next_num) + ".txt",
                 'w',
                 encoding="UTF8")
        f.close()

        img = cv2.imread(image_path, cv2.IMREAD_COLOR)

        draw_bounding_box(face_coordinates, img, color)
        draw_text(face_coordinates, img, emotion_text, color, 0, -30, 1.5, 2)

        while (True):
            cv2.imshow(image_path, img)

            if cv2.waitKey(1) > 0:
                break

        #check_recoged_img('../src/'+ emotion_text +'z'+ str(next_num) +'.jpg')    --이것은 얼굴에 사각형, 감정정보 입혀진 사진 저장
    else:
        pyautogui.alert(text='no emtion captured', title='error', button='OK')
コード例 #26
0
    def show_face_information(self):
        # bgr_image = img
        gray_image = cv2.cvtColor(self.bgr_image, cv2.COLOR_BGR2GRAY)
        rgb_image = cv2.cvtColor(self.bgr_image, cv2.COLOR_BGR2RGB)
        faces = detect_faces(face_detection, gray_image)

        img_h, img_w, _ = np.shape(rgb_image)

        for face_coordinates in faces:
            x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
            rgb_face = rgb_image[y1:y2, x1:x2]

            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]
            try:
                rgb_face = cv2.resize(rgb_face, (gender_target_size))
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue
            # run_thread(bgr_image)

            gray_face = preprocess_input(gray_face, False)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_label_arg = np.argmax(
                emotion_classifier.predict(gray_face))
            emotion_text = emotion_labels[emotion_label_arg]

            emotion_window.append(emotion_text)
            # emotion_window.append(English_2_chinese_emotion(emotion_text))

            rgb_face = np.expand_dims(rgb_face, 0)
            rgb_face = preprocess_input(rgb_face, False)
            gender_prediction = gender_classifier.predict(rgb_face)
            gender_label_arg = np.argmax(gender_prediction)
            gender_text = gender_labels[gender_label_arg]
            # gender_window.append(English_2_chinese_gender(gender_text))

            set_icon = emotion_text + "_" + gender_text
            print(set_icon)
            icon_img = self.icon_dict[set_icon]
            words_img = self.words_dict[set_icon]

            # if len(gender_window) > frame_window:
            #     emotion_window.pop(0)
            #     gender_window.pop(0)
            # try:
            #     emotion_mode = mode(emotion_window)
            #     gender_mode = mode(gender_window)
            # except:
            #     continue

            if gender_text == gender_labels[0]:
                color = (0, 0, 255)
            else:
                color = (255, 0, 0)

            ###################
            if (self.frq % 60 == 0):

                # detect faces using dlib detector
                detected = self.detector(rgb_image, 1)
                print(detected)
                faces_age = np.empty((len(detected), img_size, img_size, 3))

                if len(detected) > 0:
                    for i, d in enumerate(detected):
                        x1, y1, x2, y2, w, h = d.left(), d.top(), d.right(
                        ) + 1, d.bottom() + 1, d.width(), d.height()
                        xw1 = max(int(x1 - self.margin * w), 0)
                        yw1 = max(int(y1 - self.margin * h), 0)
                        xw2 = min(int(x2 + self.margin * w), img_w - 1)
                        yw2 = min(int(y2 + self.margin * h), img_h - 1)
                        cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 2)
                        # cv2.rectangle(img, (xw1, yw1), (xw2, yw2), (255, 0, 0), 2)
                        faces_age[i, :, :, :] = cv2.resize(
                            img[yw1:yw2 + 1, xw1:xw2 + 1, :],
                            (img_size, img_size))

                    # predict ages and genders of the detected faces
                    results = self.model.predict(faces_age)
                    ages = np.arange(0, 101).reshape(101, 1)
                    predicted_ages = results[1].dot(ages).flatten()
                    print(predicted_ages)
            ###################

            self.frq += 1

            if ((face_coordinates[0] - face_coordinates[2]) > 50
                    and (face_coordinates[0] - face_coordinates[2]) < 180
                    and (face_coordinates[1] - 80) > 20):
                solid_box = draw_solid_box(face_coordinates, rgb_image)
                draw_bounding_box(face_coordinates, rgb_image, color)
                solid_box = Addemotion(face_coordinates, solid_box, icon_img)
                solid_box = Addemotion_word(face_coordinates, solid_box,
                                            words_img)
                draw_text(face_coordinates, rgb_image,
                          str(int(predicted_ages)), (255, 255, 255), 0, -20, 1,
                          1)

            return rgb_image
コード例 #27
0
def recognition(f_2_s):
    #이중 조건문을 탈출하기 위해서, goto문이 불가능하다 파이썬은..
    tt = False  # 이중 조건문 탈출 위한 변수 설정
    emotion_c = ""  # 이중 조건문 탈출 위한 변수 설정

    #recap == False
    # parameters for loading data and images
    #image_path = path_r

    image_path = image_handler(f_2_s)  # 저장 여부 확인까지 완료한 뒤에 저장된 사진의 경로를 반환,받음

    # 학습된 모델과 감정labels의 경로를 설정해준 부분
    detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
    emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
    emotion_labels = get_labels('fer2013')
    font = cv2.FONT_HERSHEY_SIMPLEX  #폰트 --> emotion정보 보여줄 때 사용

    # hyper-parameters for bounding boxes shape
    emotion_offsets = (20, 40)
    emotion_offsets = (0, 0)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]

    # loading images
    rgb_image = load_image(image_path, grayscale=False)
    gray_image = load_image(image_path, grayscale=True)
    gray_image = np.squeeze(gray_image)
    gray_image = gray_image.astype('uint8')

    faces = detect_faces(face_detection, gray_image)
    for face_coordinates in faces:

        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]

        try:
            gray_face = cv2.resize(gray_face, (emotion_target_size))
        except:
            #pyautogui.confirm(text='one more', title='test', buttons=['ok', 'exit'])
            #recap = True
            continue

        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
        emotion_text = ""
        emotion_text = emotion_labels[emotion_label_arg]

        #감정인식이 성공되면 감정 상태를 물어보고, 감정 확인 후 저장 or 탈출

        tof = pyautogui.confirm(text='Are you ' + emotion_text + '?',
                                title=emotion_text,
                                buttons=['yes', 'no'
                                         ])  # 인식된 감정의 정답 여부 질문, 사용자의 입력을 받음
        if (tof == 'yes'):  # 알림 창의 yes 버튼을 눌렀을 때
            tt = True  # 이중 조건문 탈출 위해

            emotion_c = emotion_text

            color = (255, 0, 0)  # 감정 정보 글씨 빨간색, 사각형도

            #draw_bounding_box(face_coordinates, rgb_image, color)
            #draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -30, 1.5, 2)

        elif (tof == 'no'):  # 알림 창의 no 버튼을 눌렀을 때
            tt = False
            break

        #color = (255, 0, 0) # 감정 정보 글씨 빨간색, 사각형도

        #draw_bounding_box(face_coordinates, rgb_image, color)
        #draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -30, 1.5, 2)

    if (tt == True):
        # yes 버튼을 눌렀을 때는 그 감정 여부에 맞는 파일명을 지어서 사진 저장.
        bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
        next_num = next_index(
            emotion_text)  # 인식된 감정 상태와 같은 파일들이 몇개 있는지 정보 얻고 다음 숫자가 저장됨
        cv2.imwrite('../src/' + emotion_text + 'z' + str(next_num) + '.jpg',
                    bgr_image)  #  새로운 감정 인식 사진이 생성된다.
        f = open(emotion_text + 'z' + str(next_num) + ".txt",
                 'w',
                 encoding="UTF8")  # 그에 매칭되는 일기장도 생성.
        f.close()

        # 이후 인식 된 얼굴 범위와 감정 정보를 화면을 통해 사용자에게 보여줌
        img = cv2.imread(image_path, cv2.IMREAD_COLOR)

        draw_bounding_box(face_coordinates, img, color)
        draw_text(face_coordinates, img, emotion_text, color, 0, -30, 1.5, 2)

        while (True):  # 키 입력을 기다리며 화면 정지
            cv2.imshow(image_path, img)

            if cv2.waitKey(1) > 0:
                break
        # 체크가 완료되면 함수 탈출.

        #check_recoged_img('../src/'+ emotion_text +'z'+ str(next_num) +'.jpg')    --이것은 얼굴에 사각형, 감정정보 입혀진 사진 저장

    else:  # 알림 창을 띄워서 인식 된 감정이 없다는 것을 알려줌 -->> (인식의 오류 or 사용자가 생각한 감정과의 불일치 시)
        pyautogui.alert(text='no emtion captured', title='error', button='OK')
コード例 #28
0
def process_image(image):

    try:
        # parameters for loading data and images
        detection_model_path = './trained_models/detection_models/haarcascade_frontalface_default.xml'
        emotion_model_path = './trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
        gender_model_path = './trained_models/gender_models/simple_CNN.81-0.96.hdf5'
        emotion_labels = get_labels('fer2013')
        gender_labels = get_labels('imdb')
        font = cv2.FONT_HERSHEY_SIMPLEX

        # hyper-parameters for bounding boxes shape
        gender_offsets = (30, 60)
        gender_offsets = (10, 10)
        emotion_offsets = (20, 40)
        emotion_offsets = (0, 0)

        # loading models
        face_detection = load_detection_model(detection_model_path)
        emotion_classifier = load_model(emotion_model_path, compile=False)
        gender_classifier = load_model(gender_model_path, compile=False)

        # getting input model shapes for inference
        emotion_target_size = emotion_classifier.input_shape[1:3]
        gender_target_size = gender_classifier.input_shape[1:3]

        # loading images
        image_array = np.fromstring(image, np.uint8)
        unchanged_image = cv2.imdecode(image_array, cv2.IMREAD_UNCHANGED)

        rgb_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2RGB)
        gray_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2GRAY)

        faces = detect_faces(face_detection, gray_image)
        for face_coordinates in faces:
            x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
            rgb_face = rgb_image[y1:y2, x1:x2]

            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]

            try:
                rgb_face = cv2.resize(rgb_face, (gender_target_size))
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            rgb_face = preprocess_input(rgb_face, False)
            rgb_face = np.expand_dims(rgb_face, 0)
            gender_prediction = gender_classifier.predict(rgb_face)
            gender_label_arg = np.argmax(gender_prediction)
            gender_text = gender_labels[gender_label_arg]

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
            emotion_text = emotion_labels[emotion_label_arg]

            return emotion_text

#             if gender_text == gender_labels[0]:
#                 color = (0, 0, 255)
#             else:
#                 color = (255, 0, 0)

#             draw_bounding_box(face_coordinates, rgb_image, color)
#             draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20, 1, 2)
#             draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50, 1, 2)
    except Exception as err:
        logging.error('Error in emotion gender processor: "{0}"'.format(err))
コード例 #29
0
detection_model_path = '../models/haarcascade_files/haarcascade_frontalface_default.xml'
emotion_model_path = '../models/model.85-0.65.hdf5'
source_image_path = sys.argv[1]

# hyper-parameters for bounding boxes shape
# loading models
face_detection = load_detection_model(detection_model_path)
emotion_classifier = load_model(emotion_model_path, compile=False)
EMOTIONS = get_labels()

# reading the frame
frame = cv2.imread(source_image_path)
frame = imutils.resize(frame, width=400)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
faces = detect_faces(face_detection, gray)

if len(faces) > 0:
    faces = sorted(faces,
                   reverse=True,
                   key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0]
    (fX, fY, fW, fH) = faces
    # Extract the ROI of the face from the grayscale image, resize it to a fixed 48x48 pixels, and then prepare
    # the ROI for classification via the CNN
    roi = gray[fY:fY + fH, fX:fX + fW]
    roi = cv2.resize(roi, (48, 48))
    roi = roi.astype("float") / 255.0
    roi = img_to_array(roi)
    roi = np.expand_dims(roi, axis=0)

    preds = emotion_classifier.predict(roi)[0]
コード例 #30
0
def detection_FER():
    # parameters for loading data and images
    detection_model_path = '../FER_CNN/models/haarcascade_frontalface_default.xml'
    emotion_model_path = '../FER_CNN/models/cnn_model.hdf5'
    emotion_labels = {
        0: 'Angry',
        1: 'Disgust',
        2: 'Fear',
        3: 'Happy',
        4: 'Sad',
        5: 'Surprise',
        6: 'Neutral'
    }

    # hyper-parameters for bounding boxes shape
    frame_window = 10
    emotion_offsets = (20, 40)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]

    # starting lists for calculating modes
    emotion_window = []

    # starting video streaming
    cv2.namedWindow('Face Expression Recognition')
    video_capture = cv2.VideoCapture(0)
    while True:
        bgr_image = video_capture.read()[1]
        gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
        rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
        faces = detect_faces(face_detection, gray_image)

        for face_coordinates in faces:

            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]
            try:
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_prediction = emotion_classifier.predict(gray_face)
            emotion_probability = np.max(emotion_prediction)
            emotion_label_arg = np.argmax(emotion_prediction)
            emotion_text = emotion_labels[emotion_label_arg]
            emotion_window.append(emotion_text)

            if len(emotion_window) > frame_window:
                emotion_window.pop(0)
            try:
                emotion_mode = mode(emotion_window)
            except:
                continue

            if emotion_text == 'Angry':
                color = emotion_probability * np.asarray((255, 0, 0))
            elif emotion_text == 'Sad':
                color = emotion_probability * np.asarray((0, 0, 255))
            elif emotion_text == 'Happy':
                color = emotion_probability * np.asarray((255, 255, 0))
            elif emotion_text == 'Surprise':
                color = emotion_probability * np.asarray((0, 255, 255))
            elif emotion_text == 'Neutral':
                color = emotion_probability * np.asarray((0, 255, 255))
            elif emotion_text == 'Fear':
                color = emotion_probability * np.asarray((0, 255, 255))
            else:
                color = emotion_probability * np.asarray((0, 255, 0))
            color = color.astype(int)
            color = color.tolist()

            draw_bounding_box(face_coordinates, rgb_image, color)
            draw_text(face_coordinates, rgb_image, emotion_mode, color, 30,
                      290, 1, 1)

        bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
        cv2.imshow('Face Expression Recognition', bgr_image)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break


# if __name__=="__main__":
#     detection_FER()
コード例 #31
0
def inference(topic, args, cam_source, config, publisher):
    from statistics import mode
    import cv2
    from keras.models import load_model
    import numpy as np

    from utils.datasets import get_labels
    from utils.inference import detect_faces
    from utils.inference import draw_text
    from utils.inference import draw_bounding_box
    from utils.inference import apply_offsets
    from utils.inference import load_detection_model
    from utils.preprocessor import preprocess_input

    logger.info("inference")

    if args.intu:
        logger.info("inference(): results will be published to intu")
    else:
        logger.info("inference(): working standalone")

    # parameters for loading data and images
    detection_model_path = config.get("model", "detection")
    emotion_model_path = config.get("model", "emotion")
    emotion_labels = get_labels('fer2013')

    # timeout for video source access
    if args.intu:
        timeout = float(config.get("intu", "video_timeout"))
    else:
        timeout = 0
    end_time = time.time() + timeout

    # hyper-parameters for bounding boxes shape
    frame_window = 10
    emotion_offsets = (20, 40)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]

    # starting lists for calculating modes
    emotion_window = []

    # starting video streaming
    cv2.namedWindow('emotion_inference')

    video_capture = cv2.VideoCapture(cam_source)
    while True:
        ret, bgr_image = video_capture.read()
        logger.debug("inference: video_capture.read() ret is %s", ret)
        while not ret:
            logger.info("inference: error occurred capturing video, ensure your camera is accessible to the system and you've the appropriate numeral to access it")
            logger.info("end_time is %s", str(end_time))
            logger.info("the current time is %s", str(time.time()))
            if math.isclose(end_time, time.time(), abs_tol=5.0):
                logger.info("video stream access timeout, exiting...")
                exit()
            else:
                logger.info("waiting 10 seconds for the next try, delta %s", end_time - time.time())
                time.sleep(10)
                ret, bgr_image = video_capture.read()

        end_time = time.time() + 20
        gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
        rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
        faces = detect_faces(face_detection, gray_image)

        for face_coordinates in faces:

            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]
            try:
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_prediction = emotion_classifier.predict(gray_face)
            emotion_probability = np.max(emotion_prediction)
            emotion_label_arg = np.argmax(emotion_prediction)
            emotion_text = emotion_labels[emotion_label_arg]
            emotion_window.append(emotion_text)

            if len(emotion_window) > frame_window:
                emotion_window.pop(0)
            try:
                emotion_mode = mode(emotion_window)
            except:
                continue
            logger.info("emotion is %s, with probability %s", emotion_text, emotion_probability)
            publisher.write_and_pub({'emotion_text': emotion_text, 'emotion_probability': float(emotion_probability), 'hostname': socket.gethostname()})

            if args.intu:
                FaceEmotionClient.publish_emotion(emotion_label_arg, emotion_text, emotion_probability)

            if emotion_text == 'angry':
                color = emotion_probability * np.asarray((255, 0, 0))
            elif emotion_text == 'sad':
                color = emotion_probability * np.asarray((0, 0, 255))
            elif emotion_text == 'happy':
                color = emotion_probability * np.asarray((255, 255, 0))
            elif emotion_text == 'surprise':
                color = emotion_probability * np.asarray((0, 255, 255))
            else:
                color = emotion_probability * np.asarray((0, 255, 0))

            color = color.astype(int)
            color = color.tolist()

            draw_bounding_box(face_coordinates, rgb_image, color)
            draw_text(face_coordinates, rgb_image, emotion_mode,
                      color, 0, -45, 1, 1)

        bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
        cv2.imshow('emotion_inference', bgr_image)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            logger.info("canceled with keyboard, exiting...")
            break
        if args.intu and (not topic.is_connected):
            logger.info("disconnected from intu, exiting...")
            break
コード例 #32
0
def gender_detection(image_path):
    # parameters for loading data and images
    detection_model_path = 'C:\\Users\\l1f15bscs0049\\Desktop\\haarcascade_frontalface_default.xml'
    gender_model_path = 'C:\\Users\\l1f15bscs0049\\Desktop\\simple_CNN.81-0.96.hdf5'
    gender_labels = get_labels('imdb')
    font = cv2.FONT_HERSHEY_SIMPLEX

    # hyper-parameters for bounding boxes shape
    gender_offsets = (30, 60)
    gender_offsets = (10, 10)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    gender_classifier = load_model(gender_model_path, compile=False)

    # getting input model shapes for inference
    gender_target_size = gender_classifier.input_shape[1:3]

    # loading images
    rgb_image = load_image(image_path, grayscale=False)
    gray_image = load_image(image_path, grayscale=True)
    gray_image = np.squeeze(gray_image)
    gray_image = gray_image.astype('uint8')

    #creating a file
    save_path = 'C:\\Users\\l1f15bscs0049\\Desktop'
    completeName = os.path.join(save_path, "hellojee.txt")
    file = open(completeName, "a")

    faces = detect_faces(face_detection, gray_image)
    for face_coordinates in faces:
        x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
        rgb_face = rgb_image[y1:y2, x1:x2]

        try:
            rgb_face = cv2.resize(rgb_face, (gender_target_size))

        except:
            continue

        rgb_face = preprocess_input(rgb_face, False)
        rgb_face = np.expand_dims(rgb_face, 0)
        gender_prediction = gender_classifier.predict(rgb_face)
        gender_label_arg = np.argmax(gender_prediction)
        gender_text = gender_labels[gender_label_arg]
        #print(gender_label_arg)
        file.write(str(gender_label_arg))
        file.write("\n")

        if gender_text == gender_labels[0]:
            color = (0, 0, 255)
        else:
            color = (255, 0, 0)

        draw_bounding_box(face_coordinates, rgb_image, color)
        draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20, 1,
                  2)
        #print(gender_label_arg)

    bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
    #cv2.imwrite('C:\\Users\\l1f15bscs0049\\Desktop\\a.png', bgr_image)
    print('\n\tGender Detection Done')

    file.close()

    #check men women count
    from collections import Counter
    with open(completeName, "r") as f:
        cd = Counter(int(line.split(None, 1)[0]) for line in f)
    #print(cd)

    women_count = cd[0]
    men_count = cd[1]
    # print(women_count)
    #print(men_count)
    #print(cd[0])
    #print(cd[1])
    os.remove(completeName)
    print("file removed")
    #call a wrapper function
    if (women_count > men_count):
        print("Women detected")
        Wrapper_func(0)

    elif (men_count > women_count):
        print("men detected")
        Wrapper_func(1)

    else:
        print("no Detection\n Random Ad's playing\n")
        random_ads()

    file.close()
コード例 #33
0
color = (0, 255, 0)

# getting input model shapes for inference
target_size = model.input_shape[1:3]

# starting lists for calculating modes
emotion_window = []

# starting video streaming
cv2.namedWindow('window_frame')
video_capture = cv2.VideoCapture(1)
while True:
    bgr_image = video_capture.read()[1]
    gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
    rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
    faces = detect_faces(face_detection, gray_image)

    for face_coordinates in faces:

        x1, x2, y1, y2 = apply_offsets(face_coordinates, offsets)
        gray_face = gray_image[y1:y2, x1:x2]
        try:
            gray_face = cv2.resize(gray_face, (target_size))
        except:
            continue

        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        guided_gradCAM = calculate_guided_gradient_CAM(gray_face,
                                                       gradient_function,
        def callback(self,data):
                    #/////////////////////////////////////////////
                    #cv_image = bridge.imgmsg_to_cv2(image_msg, desired_encoding="passthrough")
                    #cv_image = cv2.resize(cv_image, target_size)  # resize image
                    #np_image = np.asarray(cv_image)               # read as np array
                    #np_image = np.expand_dims(np_image, axis=0)   # Add another dimension for tensorflow
                    #np_image = np_image.astype(float)  # preprocess needs float64 and img is uint8
                    #np_image = preprocess_input(np_image)         # Regularize the data
                    #/////////////////////////////////////////////
                    if(not USE_LOCAL_CAMERA):                                                    
                        try:
                                frame = self.bridge.imgmsg_to_cv2(data, desired_encoding="passthrough")
                        except CvBridgeError as e:
                                print(e)

                    # Capture frame-by-frame
                    if(USE_LOCAL_CAMERA):
                        ret, frame1 = self.video_capture.read()
                    #/////////////////////////////////////////////    
                    
                    
                    
                    #print"--------"
                    #print('input_msg height  : {}'.format(frame.height))
                    #print('input_msg width   : {}'.format(frame.width))
                    #print('input_msg step    : {}'.format(frame.step))
                    #print('input_msg encoding: {}'.format(frame.encoding))
                    #print('output dtype      : {}'.format(frame.dtype))
                    #print('output shape      : {}'.format(frame.shape))
                    #print"--------"
                   
                    
                    gray_image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                    rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                    faces = detect_faces(self.face_detection, gray_image)

                    for face_coordinates in faces:
                            print "inside the for loop!"
                            print face_coordinates

                            x1, x2, y1, y2 = apply_offsets(face_coordinates, self.emotion_offsets)
                            gray_face = gray_image[y1:y2, x1:x2]
                            #print len(frame)
                            #print gray_face.size
                            #print gray_face.shape
                            #print gray_face.dtype

                            try:
                                    gray_face = cv2.resize(gray_face, (self.emotion_target_size))
                            except:
                                    continue

                    
                            gray_face = preprocess_input(gray_face, True)
                            gray_face = np.expand_dims(gray_face, 0)
                            gray_face = np.expand_dims(gray_face, -1)
                            
                            #print"************"
                            #print('gray_face dtype      : {}'.format(gray_face.dtype))
                            #print('gray_face shape      : {}'.format(gray_face.shape))
                            #print('gray_face size      : {}'.format(gray_face.size))
                            #print"************"


                            ## This is a workaround for asynchronous execution using TF and ROS
                            # https://github.com/keras-team/keras/issues/2397
                            # http://projectsfromtech.blogspot.com/2017/10/visual-object-recognition-in-ros-using.html
                            #global self.graph                                  
                            with self.graph.as_default():
                                emotion_prediction = self.emotion_classifier.predict(gray_face)
                                emotion_probability = np.max(emotion_prediction)
                                emotion_label_arg = np.argmax(emotion_prediction)
                                emotion_text = self.emotion_labels[emotion_label_arg]	
                                print emotion_text
                                print(emotion_probability)
                                print('%')
                                self.emotion_window.append(emotion_text)

                
                                #self.emotion_msg.data = emotion_text
                                #self.emotion_publisher.publish(emotion_msg)
                                #self.speech_msg.data = 'I see that you are ' + emotion_text
                                #self.speech_publisher.publish(speech_msg)

                                if len(self.emotion_window) > self.frame_window:
                                    self.emotion_window.pop(0)
                                try:
                                    emotion_mode = mode(self.emotion_window)
                                except:
                                    continue

                                if emotion_text == 'angry':
                                    color = emotion_probability * np.asarray((255, 0, 0))
                                elif emotion_text == 'sad':
                                    color = emotion_probability * np.asarray((0, 0, 255))
                                elif emotion_text == 'happy':
                                    color = emotion_probability * np.asarray((255, 255, 0))
                                elif emotion_text == 'surprise':
                                    color = emotion_probability * np.asarray((0, 255, 255))
                                else:
                                    color = emotion_probability * np.asarray((0, 255, 0))

                                color = color.astype(int)
                                color = color.tolist()

                                draw_bounding_box(face_coordinates, rgb_image, color)
                                draw_text(face_coordinates, rgb_image, emotion_mode,
                                        color, 0, -45, 1, 1)
                         
                    try:
                        self.image_pub.publish(self.bridge.cv2_to_imgmsg(rgb_image, "bgr8"))
                    except CvBridgeError as e:
                        print(e)
コード例 #35
0
color = (0, 255, 0)

# getting input model shapes for inference
target_size = model.input_shape[1:3]

# starting lists for calculating modes
emotion_window = []

# starting video streaming
cv2.namedWindow('window_frame')
video_capture = cv2.VideoCapture(0)
while True:
    bgr_image = video_capture.read()[1]
    gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
    rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
    faces = detect_faces(face_detection, gray_image)

    for face_coordinates in faces:

        x1, x2, y1, y2 = apply_offsets(face_coordinates, offsets)
        gray_face = gray_image[y1:y2, x1:x2]
        try:
            gray_face = cv2.resize(gray_face, (target_size))
        except:
            continue

        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        guided_gradCAM = calculate_guided_gradient_CAM(gray_face,
                            gradient_function, saliency_function)
コード例 #36
0
def emotion_identify(img_url):
    # parameters for loading data and images

    detection_model_path = 'C:/Users/Admin/PycharmProjects/Emotion_Detection/trained_models/detection_models/haarcascade_frontalface_default.xml'
    emotion_model_path = 'C:/Users/Admin/PycharmProjects/Emotion_Detection/trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
    gender_model_path = 'C:/Users/Admin/PycharmProjects/Emotion_Detection/trained_models/gender_models/simple_CNN.81-0.96.hdf5'
    emotion_labels = get_labels('fer2013')
    gender_labels = get_labels('imdb')
    font = cv2.FONT_HERSHEY_SIMPLEX

    # hyper-parameters for bounding boxes shape
    gender_offsets = (30, 60)
    gender_offsets = (10, 10)
    emotion_offsets = (20, 40)
    emotion_offsets = (0, 0)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)
    gender_classifier = load_model(gender_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]
    gender_target_size = gender_classifier.input_shape[1:3]
    # loading images
    image_path = img_url
    rgb_image = load_image(image_path, grayscale=False)
    gray_image = load_image(image_path, grayscale=True)
    gray_image = np.squeeze(gray_image)
    gray_image = gray_image.astype('uint8')

    faces = detect_faces(face_detection, gray_image)
    if len(faces) == 0:
        print("No face")
        K.clear_session()
        return False

    emotions = collections.defaultdict(int)
    for face_coordinates in faces:
        x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
        rgb_face = rgb_image[y1:y2, x1:x2]

        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]

        try:
            rgb_face = cv2.resize(rgb_face, (gender_target_size))
            gray_face = cv2.resize(gray_face, (emotion_target_size))
        except:
            continue

        rgb_face = preprocess_input(rgb_face, False)
        rgb_face = np.expand_dims(rgb_face, 0)
        gender_prediction = gender_classifier.predict(rgb_face)
        gender_label_arg = np.argmax(gender_prediction)
        gender_text = gender_labels[gender_label_arg]

        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
        emotion_text = emotion_labels[emotion_label_arg]
        emotions[emotion_text] += 1
        if gender_text == gender_labels[0]:
            color = (0, 0, 255)
        else:
            color = (255, 0, 0)

        draw_bounding_box(face_coordinates, rgb_image, color)
        draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20, 1, 2)
        draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50, 1, 2)
    max_num = 0
    max_emotion = None
    for key, value in emotions.items():
        if value > max_num:
            max_num = value
            max_emotion = key
    print("The emotion of this picture is: ", max_emotion)
    bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
    cv2.imwrite('./result_images/predicted_test_image.png', bgr_image)
    K.clear_session()
    return max_emotion