def __init__(self):
    rospy.init_node('image_converter', anonymous=True)
    
    rospy.loginfo("recognizer started")
    print "1................................................"
    
    self.bridge = CvBridge()

    self._image_topic_output = "~image_topic_output"
    print rospy.has_param(self._image_topic_output)
    if rospy.has_param(self._image_topic_output):
      self.image_topic_output = rospy.get_param(self._image_topic_output)
      self.image_pub = rospy.Publisher(self.image_topic_output, Image, queue_size=10)

    self._image_topic_input = "~image_topic_input"
    print rospy.has_param(self._image_topic_input)
    if rospy.has_param(self._image_topic_input):
      self.image_topic_input = rospy.get_param(self._image_topic_input)
      self.image_sub = rospy.Subscriber(self.image_topic_input, Image, self.callback)


    self._detection_model_path = "~detection_model_path"
    print rospy.has_param(self._detection_model_path)
    if rospy.has_param(self._detection_model_path):
      self.detection_model_path = rospy.get_param(self._detection_model_path)

    self._emotion_model_path = "~emotion_model_path"
    print rospy.has_param(self._emotion_model_path)
    if rospy.has_param(self._emotion_model_path):
      self.emotion_model_path = rospy.get_param(self._emotion_model_path)


    self.emotion_labels = get_labels('fer2013')

    # hyper-parameters for bounding boxes shape
    self.frame_window = 10
    self.emotion_offsets = (20, 40)

    # loading models
    self.face_detection = load_detection_model(self.detection_model_path)
    self.emotion_classifier = load_model(self.emotion_model_path, compile=False)

    # getting input model shapes for inference
    self.emotion_target_size = self.emotion_classifier.input_shape[1:3]

    # starting lists for calculating modes
    self.emotion_window = []    
def process_image(image):

    try:
        # parameters for loading data and images
        detection_model_path = './trained_models/detection_models/haarcascade_frontalface_default.xml'
        emotion_model_path = './trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
        emotion_labels = get_labels('fer2013')

        # hyper-parameters for bounding boxes shape
        emotion_offsets = (0, 0)

        # loading models
        face_detection = load_detection_model(detection_model_path)
        emotion_classifier = load_model(emotion_model_path, compile=False)

        # getting input model shapes for inference
        emotion_target_size = emotion_classifier.input_shape[1:3]

        # loading images
        image_array = np.fromstring(image, np.uint8)
        unchanged_image = cv2.imdecode(image_array, cv2.IMREAD_UNCHANGED)
        gray_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2GRAY)

        faces = detect_faces(face_detection, gray_image)
        detected_emotions = []
        for face_coordinates in faces:
            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]

            try:
                gray_face = cv2.resize(gray_face, emotion_target_size)
            except:
                continue

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
            detected_emotions.append({
                "coordinates": [str(y1), str(x2), str(y2), str(x1)],
                "emotion": emotion_labels[emotion_label_arg]})

        return detected_emotions

    except Exception as err:
        logging.error('Error in emotion gender processor: "{0}"'.format(err))
Пример #3
0
import numpy as np
from keras.models import load_model
from statistics import mode
from utils.datasets import get_labels
from utils.inference import detect_faces
from utils.inference import draw_text
from utils.inference import draw_bounding_box
from utils.inference import apply_offsets
from utils.inference import load_detection_model
from utils.preprocessor import preprocess_input
import warnings

warnings.filterwarnings("ignore")  #Filter out warnings

emotion_model_path = './models/emotion_model.hdf5'  #Load the neural network using the hdf5 file
emotion_labels = get_labels(
    'fer2013')  #The different emotions- neutral, happy, angry, sad, surprise

frame_window = 10  #emotion needs to persist for 10 frames to be counted as valid
emotion_offsets = (20, 40)

print("[INFO] loading model...")  #Load the DNN for face detection
net = cv2.dnn.readNetFromCaffe("deploy.prototxt.txt",
                               "res10_300x300_ssd_iter_140000.caffemodel")

#face_cascade = cv2.CascadeClassifier('./models/haarcascade_frontalface_default.xml')
emotion_classifier = load_model(emotion_model_path)

emotion_target_size = emotion_classifier.input_shape[
    1:3]  #Gets the input shape for the network which is 48 pixels

# starting lists for calculating modes
Пример #4
0
def game(points):
    USE_WEBCAM = True  # If false, loads video file source

    # parameters for loading data and images
    emotion_model_path = './models/emotion_model.hdf5'
    emotion_labels = get_labels('fer2013')

    # hyper-parameters for bounding boxes shape
    frame_window = 10
    emotion_offsets = (20, 40)

    # loading models
    face_cascade = cv2.CascadeClassifier(
        './models/haarcascade_frontalface_default.xml')
    emotion_classifier = load_model(emotion_model_path)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]

    # starting lists for calculating modes
    emotion_window = []

    # starting video streaming

    cv2.namedWindow('Meme_Matcher')
    video_capture = cv2.VideoCapture(0)

    # Select video or webcam feed
    cap = None
    if (USE_WEBCAM == True):
        cap = cv2.VideoCapture(0)  # Webcam source
    else:
        cap = cv2.VideoCapture('./demo/b08.jpg')  # Video file source

    folder = r"C:\Users\Matthew\Downloads\Emotion-master\Pictures"
    pick = random.choice(os.listdir(folder))
    #print(a)
    #meme = cv2.imread(folder+'\\'+a)
    #print(pick)
    meme = cv2.imread(folder + '\\' + str(pick))
    meme = cv2.cvtColor(meme, cv2.COLOR_BGR2GRAY)

    memes = face_cascade.detectMultiScale(meme,
                                          scaleFactor=1.1,
                                          minNeighbors=7,
                                          minSize=(30, 30),
                                          flags=cv2.CASCADE_SCALE_IMAGE)
    meme_emotion = ""
    meme_prob = 0
    #image = Image.open(folder+'\\'+str(pick))
    #image.show()

    window = tk.Tk()
    window.configure(background='white')
    img = ImageTk.PhotoImage(Image.open(folder + '\\' + str(pick)))
    panel = tk.Label(window, image=img)
    panel.pack(side="bottom", fill="both", expand="yes")
    window.after(
        3000, lambda: window.destroy())  # Destroy the widget after 30 seconds
    window.mainloop()
    for face_coordinates in memes:
        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
        gray_face = meme[y1:y2, x1:x2]
        try:
            gray_face = cv2.resize(gray_face, (emotion_target_size))
        except:
            continue
        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        emotion_prediction = emotion_classifier.predict(gray_face)
        emotion_probability = np.max(emotion_prediction)
        #print("meme prob" + str(emotion_probability))
        meme_prob = emotion_probability
        emotion_label_arg = np.argmax(emotion_prediction)
        emotion_text = emotion_labels[emotion_label_arg]
        meme_emotion = emotion_text
        #print(emotion_text)

    start_time = time.time()

    while cap.isOpened():  # True:
        ret, bgr_image = cap.read()

        #bgr_image = video_capture.read()[1]

        gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
        rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)

        faces = face_cascade.detectMultiScale(gray_image,
                                              scaleFactor=1.1,
                                              minNeighbors=7,
                                              minSize=(30, 30),
                                              flags=cv2.CASCADE_SCALE_IMAGE)

        for face_coordinates in faces:

            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]
            try:
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_prediction = emotion_classifier.predict(gray_face)
            emotion_probability = np.max(emotion_prediction)
            #print("human prob" + str(emotion_probability))
            emotion_label_arg = np.argmax(emotion_prediction)
            emotion_text = emotion_labels[emotion_label_arg]
            emotion_window.append(emotion_text)

            if len(emotion_window) > frame_window:
                emotion_window.pop(0)
            try:
                emotion_mode = mode(emotion_window)
            except:
                continue

            #if emotion_text == 'angry':
            #    color = emotion_probability * np.asarray((255, 0, 0))
            #if emotion_text == 'sad':
            #    color = emotion_probability * np.asarray((0, 0, 255))
            #elif emotion_text == 'happy':
            #    color = emotion_probability * np.asarray((255, 255, 0))
            #elif emotion_text == 'surprise':
            #    color = emotion_probability * np.asarray((0, 255, 255))
            #else:
            #    color = emotion_probability * np.asarray((0, 255, 0))

            if emotion_text == meme_emotion and meme_emotion == 'angry' and abs(
                    emotion_probability -
                    meme_prob) < 0.08 and time.time() - start_time < 10:
                cv2.imwrite("Haha.jpg", rgb_image)
                img = cv2.imread(folder + '\\' + str(pick))
                img2 = cv2.imread("Haha.jpg")
                points = int(points + (10 - (time.time() - start_time)))
                compare_images(img, img2, emotion_probability, meme_prob,
                               "Blah blah", points)
                cap.release()
                cv2.destroyAllWindows()
            elif emotion_text == meme_emotion and meme_emotion == 'sad' and abs(
                    emotion_probability -
                    meme_prob) < 0.08 and time.time() - start_time < 10:
                cv2.imwrite("Haha.jpg", rgb_image)
                img = cv2.imread(folder + '\\' + str(pick))
                img2 = cv2.imread("Haha.jpg")
                points = int(points + (10 - (time.time() - start_time)))
                compare_images(img, img2, emotion_probability, meme_prob,
                               "Blah blah", points)
                cap.release()
                cv2.destroyAllWindows()
            elif emotion_text == meme_emotion and abs(
                    emotion_probability -
                    meme_prob) < 0.02 and time.time() - start_time < 10:
                cv2.imwrite("Haha.jpg", rgb_image)
                img = cv2.imread(folder + '\\' + str(pick))
                img2 = cv2.imread("Haha.jpg")
                points = int(points + (10 - (time.time() - start_time)))
                compare_images(img, img2, emotion_probability, meme_prob,
                               "Blah blah", points)
                cap.release()
                cv2.destroyAllWindows()
            elif time.time() - start_time > 10:
                cap.release()
                cv2.destroyAllWindows()

            #color = color.astype(int)
            #color = color.tolist()

            #draw_bounding_box(face_coordinates, rgb_image, color)
            #draw_text(face_coordinates, rgb_image, emotion_mode,
            #          color, 0, -45, 1, 1)

        bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
        cv2.imshow('window_frame', bgr_image)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
from utils.datasets import get_labels
from utils.inference import detect_faces
from utils.inference import draw_text
from utils.inference import draw_bounding_box
from utils.inference import apply_offsets
from utils.inference import load_detection_model
from utils.inference import load_image
from utils.preprocessor import preprocess_input

# parameters for loading data and images
image_path = sys.argv[1]
detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
gender_model_path = '../trained_models/gender_models/simple_CNN.81-0.96.hdf5'
emotion_labels = get_labels('fer2013')
gender_labels = get_labels('imdb')
font = cv2.FONT_HERSHEY_SIMPLEX

# hyper-parameters for bounding boxes shape
gender_offsets = (30, 60)
gender_offsets = (10, 10)
emotion_offsets = (20, 40)
emotion_offsets = (0, 0)

# loading models
face_detection = load_detection_model(detection_model_path)
emotion_classifier = load_model(emotion_model_path, compile=False)
gender_classifier = load_model(gender_model_path, compile=False)

# getting input model shapes for inference
Пример #6
0
def game(cam, window00):
    #  セクション1 - オプションの設定と標準レイアウト

    sg.theme('Dark Blue 3')
    pygame.mixer.init()
    players = list(range(1, 11))
    time0 = list(range(0, 61))

    frame1 = sg.Frame(
        "いじらなくてもOK",
        [[
            sg.Text(
                '感度',
                size=(15, 1),
            ),
            sg.Spin(players, size=(7, 1), initial_value=5, key='-sensitive-'),
            sg.Text(
                '低いほど敏感',
                size=(15, 1),
            )
        ],
         [
             sg.Text('ライフ', size=(15, 1)),
             sg.Spin(players, size=(7, 1), initial_value=1, key='-life-'),
             sg.Text('何回笑っていいか', size=(20, 1))
         ],
         [
             sg.Text(
                 '忖度レベル',
                 size=(15, 1),
             ),
             sg.Spin(players, size=(7, 1), initial_value=1, key='-sontaku-'),
             sg.Text(
                 '接待用',
                 size=(15, 1),
             )
         ],
         [
             sg.Text(
                 'FPS',
                 size=(15, 1),
             ),
             sg.Spin(time0, size=(7, 1), initial_value=20, key='-fps-'),
             sg.Text(
                 '重い時は下げよう',
                 size=(15, 1),
             )
         ]])

    layout0 = [[sg.Text('ログイン→チェックの順で')],
               [
                   sg.Text('nicknameを入力'),
                   sg.InputText(default_text='hoge',
                                key='-name-',
                                size=(20, 5))
               ],
               [
                   sg.Text('部屋IDを入力'),
                   sg.Spin(players,
                           initial_value=1,
                           key='-room-',
                           size=(20, 5))
               ],
               [
                   sg.Text('プレイ人数を入力'),
                   sg.Spin(players,
                           size=(7, 7),
                           initial_value=6,
                           key='-player-')
               ],
               [
                   sg.Text('パスワードを入力'),
                   sg.InputText(default_text='hoge',
                                key='-pass-',
                                size=(20, 5))
               ],
               [
                   sg.Spin(players,
                           size=(7, 7),
                           initial_value=3,
                           key='-clear-'),
                   sg.Text('クリア条件を設定')
               ],
               [
                   sg.Radio('ホスト', "RADIO1", key='-host-'),
                   sg.Radio('ゲスト', "RADIO1", default=True),
                   sg.Text('ホスト:笑わせる側')
               ], [sg.Submit('ログイン'), sg.Submit('チェック')]]

    layout = [[sg.Text('ゲーム設定')],
              [
                  sg.Spin(time0, key='-min-', initial_value=2, size=(7, 1)),
                  sg.Text('分'),
                  sg.Spin(time0, key='-sec-', initial_value=30, size=(7, 1)),
                  sg.Text('秒'),
                  sg.Text('プレイ時間を設定')
              ], [sg.Text('詳細設定'), frame1], [sg.Submit('開始')]]

    #モード選択
    #フリーモード:表情判定とclear,ログのみ
    #ゲームモード:ホストとゲスト

    # セクション 2 - ウィンドウの生成
    window0 = sg.Window('入室', layout0)
    counter = 0
    pygame.mixer.music.load("main.mp3")
    pygame.mixer.music.play(-1)

    video_capture = cv2.VideoCapture(cam)
    # セクション 3 - イベントループ
    while True:
        event, values = window0.read(timeout=10)
        cap = video_capture.read()[1]
        window00['image'](data=cv2.imencode('.png', cap)[1].tobytes())
        window00.finalize()
        #cv2.imshow("camera",cap)
        #cv2.waitKey(1)
        #event, values = window0.read(timeout=10)

        if event is None:
            if counter >= 1:
                try:  #Exit
                    pygame.mixer.music.stop()
                    exit = {"action": "ExitRoom", "data": "1"}
                    ws.send(json.dumps(exit))
                    ws.close()
                    window0.close()
                    iromonea.normal(window00, cam)
                    sys.exit()
                except:
                    ws.close()
            else:
                window0.close()
                video_capture.release()
                pygame.mixer.music.stop()
                #cv2.destroyAllWindows()
                iromonea.normal(window00, cam)
                sys.exit()

        elif event == 'ログイン':
            roomid = str(values['-room-'])
            nickname = values['-name-']
            players = values['-player-']
            password = values['-pass-']
            host_state = values['-host-']
            limit = str(values['-clear-'])
            #EnterRoom roomnum,nickname
            if counter == 0:
                ws = websocket.create_connection(
                    "wss://7dltq43ti1.execute-api.us-east-1.amazonaws.com/alpha"
                )
                data = roomid + "," + nickname
                enter = {"action": "EnterRoom", "data": data}
                ws.send(json.dumps(enter))
                roomid_before = roomid
                sg.popup("ログインしました")
                counter = 1
            else:
                data1 = roomid_before
                check = {"action": "ExitRoom", "data": data1}
                ws.send(json.dumps(check))
                ws = websocket.create_connection(
                    "wss://7dltq43ti1.execute-api.us-east-1.amazonaws.com/alpha"
                )
                data = roomid + "," + nickname
                enter = {"action": "EnterRoom", "data": data}
                ws.send(json.dumps(enter))
                sg.popup("修正しました")
                counter = 1
        elif event == 'チェック':
            if counter == 0:
                sg.popup("先にログインしてください")
                continue
            #check-section
            #roomnum,nickname,plyernum,password,limit
            elif host_state is True:
                pygame.mixer.music.load("warai.mp3")
                pygame.mixer.music.play()
            data = data + "," + str(players) + "," + password + "," + limit
            check = {"action": "check", "data": data}
            ws.send(json.dumps(check))
            result = ws.recv()
            if result == "clear":
                break
            else:
                #ポップアップ
                sg.popup("ログインしてない人がいるか設定が違います")


# セクション 4 - ウィンドウの破棄と終了
    window0.close()
    window1 = sg.Window('設定', layout)
    intercount = 0
    start_state = 0
    while True:
        event, values = window1.read(timeout=10)
        if event is None:
            #Exit
            data1 = roomid
            check = {"action": "ExitRoom", "data": data1}
            ws.send(json.dumps(check))
            window1.close()
            video_capture.release()
            pygame.mixer.music.stop()
            #cv2.destroyAllWindows()
            iromonea.normal(window00, cam)
            sys.exit()
        elif event == '開始':
            minit = int(values["-min-"])
            sec = int(values["-sec-"])
            sense = values["-sensitive-"]
            life = values["-life-"]
            sontaku = values["-sontaku-"]
            setfps = int(values["-fps-"])
            time1 = float(minit * 60 + sec)
            pygame.mixer.music.load("roulette.mp3")
            pygame.mixer.music.play()
            time.sleep(3)
            #start
            start = {"action": "Start", "data": data}
            ws.send(json.dumps(start))
            while True:
                result = ws.recv()
                intercount += 1
                if result == "start":
                    starttime = time.time()
                    pygame.mixer.music.load("start.mp3")
                    pygame.mixer.music.play()
                    start_state = 1
                    break
                elif intercount >= 100:
                    sg.popup("タイムアウトしました")
                    break
        elif start_state == 1:
            break
    window1.close()
    if host_state is True:
        host(ws, data, time1, setfps, starttime, cam, window00)
        iromonea.normal(window00, cam)
        sys.exit()
    else:
        #guest.py

        # parameters for loading data and images
        detection_model_path = 'trained_models/detection_models/haarcascade_frontalface_default.xml'
        emotion_model_path = 'trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
        emotion_labels = get_labels('fer2013')

        # hyper-parameters for bounding boxes shape
        frame_window = 10
        emotion_offsets = (20, 40)

        # loading models
        face_detection = load_detection_model(detection_model_path)
        emotion_classifier = load_model(emotion_model_path, compile=False)

        # getting input model shapes for inference
        emotion_target_size = emotion_classifier.input_shape[1:3]

        # starting lists for calculating modes
        emotion_window = []

        # starting video streaming
        #cv2.namedWindow('window_frame')
        video_capture = cv2.VideoCapture(cam)
        endtime = starttime + time1
        flag = 0
        count = 0
        telop_height = 50
        video_capture.set(cv2.CAP_PROP_FPS, setfps)
        #print("set:",setfps)
        fps = video_capture.get(cv2.CAP_PROP_FPS)
        #print("fps:",fps)
        happy_meter = [
            0, "|----|----|----|----|----|----|----|----|----|----|"
        ]
        happy_level = sense
        Life = life * sontaku
        flag_count = Life
        ccounter = 0
        log = []
        name = ["defalt"]
        state = {"action": "state", "data": data}
        break1 = 0
        while True:
            timer = round(endtime - time.time(), 1)
            if break1 == 1:
                break
            ws.send(json.dumps(state))
            result = ws.recv()
            """if result =="end":
                pygame.mixer.music.load("win.mp3")
                pygame.mixer.music.play()
                break"""
            if result == "" or "message" in result:
                pass
            else:
                result = result.split(",")
                del result[0]
                for x in result:
                    if x == "end":
                        ccounter = "win"
                        pygame.mixer.music.load("win.mp3")
                        pygame.mixer.music.play()
                        time.sleep(4)
                        break1 = 1
                        break
                    elif x not in name:
                        name.append(x)
                        res = x + "さん:" + str(round(time1 - timer, 1)) + "秒"
                        #aa
                        pygame.mixer.music.load("clear.mp3")
                        pygame.mixer.music.play()
                        log.append(res)

            bgr_image = video_capture.read()[1]
            gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
            rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
            faces = detect_faces(face_detection, gray_image)
            timer = round(endtime - time.time(), 1)

            for face_coordinates in faces:

                x1, x2, y1, y2 = apply_offsets(face_coordinates,
                                               emotion_offsets)
                gray_face = gray_image[y1:y2, x1:x2]
                try:
                    gray_face = cv2.resize(gray_face, (emotion_target_size))
                except:
                    continue

                gray_face = preprocess_input(gray_face, True)
                gray_face = np.expand_dims(gray_face, 0)
                gray_face = np.expand_dims(gray_face, -1)
                emotion_prediction = emotion_classifier.predict(gray_face)
                emotion_probability = np.max(emotion_prediction)
                emotion_label_arg = np.argmax(emotion_prediction)
                emotion_text = emotion_labels[emotion_label_arg]
                emotion_window.append(emotion_text)

                if len(emotion_window) > frame_window:
                    emotion_window.pop(0)
                try:
                    emotion_mode = mode(emotion_window)
                except:
                    continue

                if emotion_text == 'angry':
                    color = emotion_probability * np.asarray((255, 0, 0))
                elif emotion_text == 'sad':
                    color = emotion_probability * np.asarray((0, 0, 255))
                elif emotion_text == 'happy':
                    color = emotion_probability * np.asarray((255, 255, 0))

                elif emotion_text == 'surprise':
                    color = emotion_probability * np.asarray((0, 255, 255))
                else:
                    color = emotion_probability * np.asarray((0, 255, 0))

                color = color.astype(int)
                color = color.tolist()

                draw_bounding_box(face_coordinates, rgb_image, color)
                draw_text(face_coordinates, rgb_image, emotion_mode, color, 0,
                          -45, 1, 1)

            bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)

            cap_width = bgr_image.shape[1]
            cap_height = bgr_image.shape[0]
            happy_meter[0] = int(
                emotion_window.count('happy') / happy_level * 100)
            happy_meter[
                1] = "|----|----|----|----|----|----|----|----|----|----|"
            happy_meter[1] = happy_meter[1].replace("-", "#",
                                                    int(happy_meter[0] / 2.5))
            Life_meter = "[" + "@" * int(Life / sontaku) + "]"

            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(bgr_image, "damage{}[%]".format(happy_meter[0]),
                        (10, 50), font, 1, (0, 255, 0), 2, cv2.LINE_AA)
            cv2.putText(bgr_image, "{}".format(happy_meter[1]), (10, 100),
                        font, 1, (0, 255, 0), 2, cv2.LINE_AA)
            cv2.putText(bgr_image, "Life:{}".format(Life_meter), (10, 150),
                        font, 1, (0, 255, 0), 2, cv2.LINE_AA)
            cv2.putText(bgr_image, "{} [OUT]".format(len(name) - 1),
                        (cap_width - 300, cap_height - telop_height - 60),
                        font, 1, (255, 0, 0), 2, cv2.LINE_AA)
            cv2.putText(bgr_image, "{} [fps]".format(round(fps, 1)),
                        (cap_width - 300, cap_height - telop_height - 30),
                        font, 1, (0, 0, 255), 2, cv2.LINE_AA)
            cv2.putText(bgr_image, "{:.2f} [sec]".format(timer),
                        (cap_width - 300, cap_height - telop_height), font, 1,
                        (0, 0, 255), 2, cv2.LINE_AA)
            count += 1
            #cv2.imshow('camera', bgr_image)
            window00['image'](
                data=cv2.imencode('.png', bgr_image)[1].tobytes())
            window00.finalize()

            if emotion_window.count('happy') > happy_level:
                flag += 1
                Life -= 1
                if flag == flag_count:
                    pygame.mixer.music.load("clear.mp3")
                    pygame.mixer.music.play()
                    break

            elif cv2.waitKey(1) & 0xFF == ord('q'):
                break
            elif timer <= 0:
                ccounter += 1
                pygame.mixer.music.load("lose.mp3")
                pygame.mixer.music.play()
                time.sleep(3.5)
                break

        if ccounter == 0:
            mess = {"action": "sendMessage", "data": data}
            ws.send(json.dumps(mess))

        break1 = 0

        while flag >= flag_count:
            timer = round(endtime - time.time(), 1)
            Life_meter = "[" + "@" * int(Life / sontaku) + "]"
            bgr_image = video_capture.read()[1]
            bgr_image = cv2.cvtColor(bgr_image, cv2.COLOR_RGB2BGR)
            imgback = bgr_image
            img = cv2.imread('clear.png')
            height = imgback.shape[0]
            width = imgback.shape[1]
            img = cv2.resize(img, (width, height))
            dst = cv2.addWeighted(imgback, 1, img, 0.8, 0)
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(dst, "damage:{}[%]".format(happy_meter[0]), (10, 50),
                        font, 1, (0, 255, 0), 2, cv2.LINE_AA)
            cv2.putText(dst, "{}".format(happy_meter[1]), (10, 100), font, 1,
                        (0, 255, 0), 2, cv2.LINE_AA)
            cv2.putText(dst, "Life:{}".format(Life_meter), (10, 150), font, 1,
                        (0, 255, 0), 2, cv2.LINE_AA)
            cv2.putText(dst, "{} [OUT]".format(len(name) - 1),
                        (width - 300, height - telop_height - 60), font, 1,
                        (255, 0, 0), 2, cv2.LINE_AA)
            cv2.putText(dst, "{} [fps]".format(round(fps, 1)),
                        (width - 300, height - telop_height - 30), font, 1,
                        (0, 0, 255), 2, cv2.LINE_AA)
            cv2.putText(dst, "{:.2f} [sec]".format(timer),
                        (width - 300, height - telop_height), font, 1,
                        (0, 0, 255), 2, cv2.LINE_AA)
            count += 1
            #cv2.imshow('camera',dst)
            window00['image'](data=cv2.imencode('.png', dst)[1].tobytes())
            window00.finalize()
            if break1 == 1:
                break
            result = ws.recv()
            ws.send(json.dumps(state))
            """if result =="end":
                pygame.mixer.music.load("win.mp3")
                pygame.mixer.music.play() 
                break
            elif result== "" or "message" in result:
                pass"""
            #cv2.waitKey(1) & 0xFF == ord('q')
            if timer <= 0:
                pygame.mixer.music.load("lose.mp3")
                pygame.mixer.music.play()
                time.sleep(3.5)
                break
            else:
                result = result.split(",")
                del result[0]
                for x in result:
                    if x == "end":
                        ccounter = "win"
                        pygame.mixer.music.load("win.mp3")
                        pygame.mixer.music.play()
                        break1 = 1
                        break
                    elif x not in name:
                        name.append(x)
                        res = x + "さん:" + str(round(time1 - timer, 1)) + "秒"
                        pygame.mixer.music.load("clear.mp3")
                        pygame.mixer.music.play()
                        log.append(res)

        my_text = ""
        if len(log) > 0:
            my_text = log[0]
            del log[0]
        for x in log:
            my_text = my_text + '\n' + x
        layout = [[
            sg.Text('結果発表',
                    font=('HG行書体', 24),
                    text_color='#ff0000',
                    background_color='#0000ff')
        ], [sg.Text("笑いログ")], [sg.Text(my_text)], [sg.Submit('OK')]]
        final = sg.Window("結果", layout)
        while True:
            event, values = final.read()
            if event in (None, "OK"):
                break
    #Exit
        data1 = roomid
        exit = {"action": "ExitRoom", "data": data1}
        ws.send(json.dumps(exit))
        final.close()
        video_capture.release()
        cv2.destroyAllWindows()
        iromonea.normal(window00, cam)
Пример #7
0
def process_image(image, result_path):

    try:
        # with open('/tmp/debug.txt', "a") as debug_file:
        #     print("bp B {}".format(len(image)), file=debug_file)
        # parameters for loading data and images
        detection_model_path = os.environ[
            'FACE_CLASSIFICATION_PATH'] + '/trained_models/detection_models/haarcascade_frontalface_default.xml'
        emotion_model_path = os.environ[
            'FACE_CLASSIFICATION_PATH'] + '/trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
        gender_model_path = os.environ[
            'FACE_CLASSIFICATION_PATH'] + '/trained_models/gender_models/simple_CNN.81-0.96.hdf5'
        emotion_labels = get_labels('fer2013')
        gender_labels = get_labels('imdb')
        font = cv2.FONT_HERSHEY_SIMPLEX

        # hyper-parameters for bounding boxes shape
        gender_offsets = (30, 60)
        gender_offsets = (10, 10)
        emotion_offsets = (20, 40)
        emotion_offsets = (0, 0)

        # loading models
        face_detection = load_detection_model(detection_model_path)
        gender_classifier = load_model(gender_model_path, compile=False)
        emotion_classifier = load_model(emotion_model_path, compile=False)

        # getting input model shapes for inference
        emotion_target_size = emotion_classifier.input_shape[1:3]
        gender_target_size = gender_classifier.input_shape[1:3]

        # loading images
        image_array = np.fromstring(image, np.uint8)
        unchanged_image = cv2.imdecode(image_array, cv2.IMREAD_UNCHANGED)

        rgb_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2RGB)
        gray_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2GRAY)

        faces = detect_faces(face_detection, gray_image)
        ret_array = []
        for face_coordinates in faces:
            ret_entry = {'coord': face_coordinates.tolist()}
            x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
            rgb_face = rgb_image[y1:y2, x1:x2]

            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]

            try:
                rgb_face = cv2.resize(rgb_face, (gender_target_size))
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            rgb_face = preprocess_input(rgb_face, False)
            rgb_face = np.expand_dims(rgb_face, 0)
            gender_prediction = gender_classifier.predict(rgb_face)
            gender_label_arg = np.argmax(gender_prediction)
            gender_text = gender_labels[gender_label_arg]
            ret_entry['gender'] = gender_text

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_label_arg = np.argmax(
                emotion_classifier.predict(gray_face))
            emotion_text = emotion_labels[emotion_label_arg]
            ret_entry['emotion'] = emotion_text

            if gender_text == gender_labels[0]:
                color = (0, 0, 255)
            else:
                color = (255, 0, 0)

            draw_bounding_box(face_coordinates, rgb_image, color)
            draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20,
                      1, 2)
            draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50,
                      1, 2)
            ret_array.append(ret_entry)
    except Exception as err:
        logging.error('Error in emotion gender processor: "{0}"'.format(err))

    bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
    cv2.imwrite(result_path, bgr_image)
    return ret_array
def emotion_identify(img_url):
    # parameters for loading data and images

    detection_model_path = 'C:/Users/Admin/PycharmProjects/Emotion_Detection/trained_models/detection_models/haarcascade_frontalface_default.xml'
    emotion_model_path = 'C:/Users/Admin/PycharmProjects/Emotion_Detection/trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
    gender_model_path = 'C:/Users/Admin/PycharmProjects/Emotion_Detection/trained_models/gender_models/simple_CNN.81-0.96.hdf5'
    emotion_labels = get_labels('fer2013')
    gender_labels = get_labels('imdb')
    font = cv2.FONT_HERSHEY_SIMPLEX

    # hyper-parameters for bounding boxes shape
    gender_offsets = (30, 60)
    gender_offsets = (10, 10)
    emotion_offsets = (20, 40)
    emotion_offsets = (0, 0)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)
    gender_classifier = load_model(gender_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]
    gender_target_size = gender_classifier.input_shape[1:3]
    # loading images
    image_path = img_url
    rgb_image = load_image(image_path, grayscale=False)
    gray_image = load_image(image_path, grayscale=True)
    gray_image = np.squeeze(gray_image)
    gray_image = gray_image.astype('uint8')

    faces = detect_faces(face_detection, gray_image)
    if len(faces) == 0:
        print("No face")
        K.clear_session()
        return False

    emotions = collections.defaultdict(int)
    for face_coordinates in faces:
        x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
        rgb_face = rgb_image[y1:y2, x1:x2]

        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]

        try:
            rgb_face = cv2.resize(rgb_face, (gender_target_size))
            gray_face = cv2.resize(gray_face, (emotion_target_size))
        except:
            continue

        rgb_face = preprocess_input(rgb_face, False)
        rgb_face = np.expand_dims(rgb_face, 0)
        gender_prediction = gender_classifier.predict(rgb_face)
        gender_label_arg = np.argmax(gender_prediction)
        gender_text = gender_labels[gender_label_arg]

        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
        emotion_text = emotion_labels[emotion_label_arg]
        emotions[emotion_text] += 1
        if gender_text == gender_labels[0]:
            color = (0, 0, 255)
        else:
            color = (255, 0, 0)

        draw_bounding_box(face_coordinates, rgb_image, color)
        draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20, 1,
                  2)
        draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50, 1,
                  2)
    max_num = 0
    max_emotion = None
    for key, value in emotions.items():
        if value > max_num:
            max_num = value
            max_emotion = key
    print("The emotion of this picture is: ", max_emotion)
    bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
    cv2.imwrite('./result_images/predicted_test_image.png', bgr_image)
    K.clear_session()
    return max_emotion
def process_image(image):

    try:
        # parameters for loading data and images
        detection_model_path = './trained_models/detection_models/haarcascade_frontalface_default.xml'
        emotion_model_path = './trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
        gender_model_path = './trained_models/gender_models/simple_CNN.81-0.96.hdf5'
        emotion_labels = get_labels('fer2013')
        gender_labels = get_labels('imdb')
        font = cv2.FONT_HERSHEY_SIMPLEX

        # hyper-parameters for bounding boxes shape
        gender_offsets = (30, 60)
        gender_offsets = (10, 10)
        emotion_offsets = (20, 40)
        emotion_offsets = (0, 0)

        # loading models
        face_detection = load_detection_model(detection_model_path)
        emotion_classifier = load_model(emotion_model_path, compile=False)
        gender_classifier = load_model(gender_model_path, compile=False)

        # getting input model shapes for inference
        emotion_target_size = emotion_classifier.input_shape[1:3]
        gender_target_size = gender_classifier.input_shape[1:3]

        # loading images
        image_array = np.fromstring(image, np.uint8)
        unchanged_image = cv2.imdecode(image_array, cv2.IMREAD_UNCHANGED)

        rgb_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2RGB)
        gray_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2GRAY)

        faces = detect_faces(face_detection, gray_image)
        for face_coordinates in faces:
            x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
            rgb_face = rgb_image[y1:y2, x1:x2]

            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]

            try:
                rgb_face = cv2.resize(rgb_face, (gender_target_size))
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            rgb_face = preprocess_input(rgb_face, False)
            rgb_face = np.expand_dims(rgb_face, 0)
            gender_prediction = gender_classifier.predict(rgb_face)
            gender_label_arg = np.argmax(gender_prediction)
            gender_text = gender_labels[gender_label_arg]

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
            emotion_text = emotion_labels[emotion_label_arg]

            if gender_text == gender_labels[0]:
                color = (0, 0, 255)
            else:
                color = (255, 0, 0)

            draw_bounding_box(face_coordinates, rgb_image, color)
            draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20, 1, 2)
            draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50, 1, 2)
    except Exception as err:
        logging.error('Error in emotion gender processor: "{0}"'.format(err))

    bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)

    dirname = 'result'
    if not os.path.exists(dirname):
        os.mkdir(dirname)

    cv2.imwrite(os.path.join(dirname, 'predicted_image.png'), bgr_image)
Пример #10
0
def cnn_handler (l, cnn_img_queue, cnn_emo_queue):
    import cv2
    import pygame
    import pygame.camera
    from threading import Thread

    from keras.models import load_model
    from statistics import mode
    import numpy as np

    from utils.datasets import get_labels
    from utils.dataloader import detect_faces
    from utils.dataloader import load_detection_model
    from utils.dataloader import apply_offsets
    from utils.dataloader import draw_text
    from utils.preprocessor import preprocess_input

    detection_model_path='../../trained_models/detection_model/haarcascade_frontalface_default.xml'
    emotion_model_path='../../trained_models/fer2013_models/fer2013_XCEPTION.117-0.66.hdf5'
    emotion_labels=get_labels('fer2013')

    # hyper-parameters for bounding boxes shape
    frame_window=10
    emotion_offsets=(20,40)

    # load models
    face_detection=load_detection_model(detection_model_path)
    emotion_classifier=load_model(emotion_model_path)

    # getting input model shapes for inference
    emotion_target_size=emotion_classifier.input_shape[1:3]

    # window used to calculate probability of current emotion
    emotion_window=[]

    DEVICE = '/dev/video0'
    SIZE = (640, 480)
    FILENAME = 'capture.png'


    def camstream(display, camera, screen):
        while True:
            screen = camera.get_image(screen)
            display.blit(screen, (0,0))
            pygame.display.flip()
            for event in pygame.event.get():
                if event.type == QUIT:
                    capture = False
                elif event.type == KEYDOWN and event.key == K_s:
                    pygame.image.save(screen, FILENAME)
        camera.stop()
        pygame.quit()
        return

    pygame.init()
    pygame.camera.init()
    display = pygame.display.set_mode(SIZE, 0)
    camera = pygame.camera.Camera(DEVICE, SIZE)
    camera.start()
    screen = pygame.surface.Surface(SIZE, 0, display)
    bgcam = Thread(target=camstream, args=(display, camera, screen,))
    bgcam.start()


    def getimage(camera, screen):
        scrncap = camera.get_image(screen)
        l.acquire()
        pygame.image.save(scrncap, 'emotion.jpg')
        image = cv2.imread('emotion.jpg')
        l.release()
        return image


    def getscreen(camera, screen):
        scrncap = camera.get_image(screen)
        return scrncap

    def getemo():
        return emotion_text


    while True:
        bgr_image=getimage(camera, screen)
        gray_image=cv2.cvtColor(bgr_image,cv2.COLOR_BGR2GRAY)
        rgb_image=cv2.cvtColor(bgr_image,cv2.COLOR_BGR2RGB)
        faces=detect_faces(face_detection,gray_image)

        for face_coordinates in faces:
            x1,x2,y1,y2=apply_offsets(face_coordinates,emotion_offsets)
            gray_face=gray_image[y1:y2,x1:x2]
            try:
                gray_face=cv2.resize(gray_face,(emotion_target_size))
            except:
                continue

            gray_face=preprocess_input(gray_face,True)
            gray_face=np.expand_dims(gray_face,0)
            gray_face=np.expand_dims(gray_face,-1)
            emotion_prediction=emotion_classifier.predict(gray_face)
            emotion_probability=np.max(emotion_prediction)
            emotion_label_arg=np.argmax(emotion_prediction)
            emotion_text=emotion_labels[emotion_label_arg]
            emotion_window.append(emotion_text)

            cnn_emo_queue.put(emotion_text)
            cnn_img_queue.put(getimage(camera, screen))
def generateResults(rgb_image, runNumber):

    # parameters for loading data and images
    #image_path = path
    detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
    emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
    gender_model_path = '../trained_models/gender_models/simple_CNN.81-0.96.hdf5'
    emotion_labels = get_labels('fer2013')
    gender_labels = get_labels('imdb')
    font = cv2.FONT_HERSHEY_SIMPLEX

    # hyper-parameters for bounding boxes shape
    gender_offsets = (30, 60)
    gender_offsets = (10, 10)
    emotion_offsets = (20, 40)
    emotion_offsets = (0, 0)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)
    gender_classifier = load_model(gender_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]
    gender_target_size = gender_classifier.input_shape[1:3]

    # loading images
    #rgb_image = load_image(image_path, grayscale=False)
    #gray_image = load_image(image_path, grayscale=True)
    gray_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2GRAY)
    gray_image = np.squeeze(gray_image)
    gray_image = gray_image.astype('uint8')

    result = []

    faces = detect_faces(face_detection, gray_image)
    for face_coordinates in faces:
        x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
        rgb_face = rgb_image[y1:y2, x1:x2]

        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]

        try:
            rgb_face = cv2.resize(rgb_face, (gender_target_size))
            gray_face = cv2.resize(gray_face, (emotion_target_size))
        except:
            continue

        rgb_face = preprocess_input(rgb_face, False)
        rgb_face = np.expand_dims(rgb_face, 0)

        gender_prediction = gender_classifier.predict(rgb_face)
        gender_label_arg = np.argmax(gender_prediction)
        gender_text = gender_labels[gender_label_arg]

        print(gender_labels)
        print(gender_prediction)
        print(gender_label_arg)
        print(emotion_labels)


        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
        emotion_text = emotion_labels[emotion_label_arg]

        print(emotion_label_arg)
        result.append((gender_label_arg, emotion_label_arg))

        if gender_text == gender_labels[0]:
            color = (0, 0, 255)
        else:
            color = (255, 0, 0)

        draw_bounding_box(face_coordinates, rgb_image, color)
        draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20, 1, 2)
        draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50, 1, 2)

    bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
    cv2.imwrite('../images/predicted_test_image' + str(runNumber) + '.png' , bgr_image)
    cv2.imwrite('../images/source_test_image' + str(runNumber) + '.png' , rgb_image)
    
    return result


#generateResults(sys.argv[1])
from utils.grad_cam import calculate_guided_gradient_CAM
from utils.datasets import get_labels
from utils.inference import detect_faces
from utils.inference import apply_offsets
from utils.inference import load_detection_model
from utils.preprocessor import preprocess_input
from utils.inference import draw_bounding_box
from utils.inference import load_image


# parameters
image_path = sys.argv[1]
# task = sys.argv[2]
task = 'emotion'
if task == 'emotion':
    labels = get_labels('fer2013')
    offsets = (0, 0)
    # model_filename = '../trained_models/fer2013_big_XCEPTION.54-0.66.hdf5'
    model_filename = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
elif task == 'gender':
    labels = get_labels('imdb')
    offsets = (30, 60)
    model_filename = '../trained_models/gender_models/gender_mini_XCEPTION.21-0.95.hdf5'

color = (0, 255, 0)

# loading models
detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
model = load_model(model_filename, compile=False)
target_size = model.input_shape[1:3]
face_detection = load_detection_model(detection_model_path)
	def __init__(self):

		# Initialize the node with rosp
                rospy.init_node('emotion_recognizer_node', anonymous=True)
                
                rospy.loginfo("recognizer started")
                print "1................................................"
                
                if(USE_LOCAL_CAMERA):
                    self.video_capture = cv2.VideoCapture(0)
                                
                self._detection_models = "~detection_models"
                if rospy.has_param(self._detection_models):
                    self.detection_model_path = rospy.get_param(self._detection_models)
                else:
                    rospy.logwarn("parameters need to be set to start recognizer.")
                    return
                
                
                self.emotion_models = "~emotion_models"
                if rospy.has_param(self.emotion_models):
                    self.emotion_model_path = rospy.get_param(self.emotion_models)
                else:
                    rospy.logwarn("parameters need to be set to start recognizer.")
                    return
                
                
                self.bridge = CvBridge()
    
		# parameters for loading data and images
		#self.detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
	#s	elf.emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.110-0.65.hdf5'
		self.emotion_labels = get_labels('fer2013')

		# hyper-parameters for bounding boxes shape
		self.frame_window = 10
		self.emotion_offsets = (20, 40)

		# loading models
		self.face_detection = load_detection_model(self.detection_model_path)
		self.emotion_classifier = load_model(self.emotion_model_path, compile=False)

		# getting input model shapes for inference
		self.emotion_target_size = self.emotion_classifier.input_shape[1:3]

		# starting lists for calculating modes
		self.emotion_window = []
		self.emotion_publisher = rospy.Publisher("/qt_face/setEmotion",String,queue_size=10)
		self.speech_publisher = rospy.Publisher("/speaker",String,queue_size=10)
		self.emotion_msg = String()
		self.speech_msg = String()

			 #Where to publish
                self._output_image_topic = "~image_topic_output"
                print rospy.has_param(self._output_image_topic)
                if rospy.has_param(self._output_image_topic):
                    output_image_topic = rospy.get_param(self._output_image_topic)
                    self.image_pub = rospy.Publisher(output_image_topic,Image, queue_size=10)
            
                # Scaling factor for face recognition image
                self.scaling_factor = 0.50
                
                #Where to subscribe
                self._input_image_topic = "~image_topic_input"
                print rospy.has_param(self._input_image_topic)
                if rospy.has_param(self._input_image_topic):
                    input_image_topic = rospy.get_param(self._input_image_topic)
                    if(not USE_LOCAL_CAMERA):
                        self.image_sub = rospy.Subscriber(input_image_topic, Image, self.callback)
                        
                self.graph = tf.get_default_graph()
Пример #14
0
def gender_detection(image_path):
    # parameters for loading data and images
    detection_model_path = 'C:\\Users\\l1f15bscs0049\\Desktop\\haarcascade_frontalface_default.xml'
    gender_model_path = 'C:\\Users\\l1f15bscs0049\\Desktop\\simple_CNN.81-0.96.hdf5'
    gender_labels = get_labels('imdb')
    font = cv2.FONT_HERSHEY_SIMPLEX

    # hyper-parameters for bounding boxes shape
    gender_offsets = (30, 60)
    gender_offsets = (10, 10)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    gender_classifier = load_model(gender_model_path, compile=False)

    # getting input model shapes for inference
    gender_target_size = gender_classifier.input_shape[1:3]

    # loading images
    rgb_image = load_image(image_path, grayscale=False)
    gray_image = load_image(image_path, grayscale=True)
    gray_image = np.squeeze(gray_image)
    gray_image = gray_image.astype('uint8')

    #creating a file
    save_path = 'C:\\Users\\l1f15bscs0049\\Desktop'
    completeName = os.path.join(save_path, "hellojee.txt")
    file = open(completeName, "a")

    faces = detect_faces(face_detection, gray_image)
    for face_coordinates in faces:
        x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
        rgb_face = rgb_image[y1:y2, x1:x2]

        try:
            rgb_face = cv2.resize(rgb_face, (gender_target_size))

        except:
            continue

        rgb_face = preprocess_input(rgb_face, False)
        rgb_face = np.expand_dims(rgb_face, 0)
        gender_prediction = gender_classifier.predict(rgb_face)
        gender_label_arg = np.argmax(gender_prediction)
        gender_text = gender_labels[gender_label_arg]
        #print(gender_label_arg)
        file.write(str(gender_label_arg))
        file.write("\n")

        if gender_text == gender_labels[0]:
            color = (0, 0, 255)
        else:
            color = (255, 0, 0)

        draw_bounding_box(face_coordinates, rgb_image, color)
        draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20, 1,
                  2)
        #print(gender_label_arg)

    bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
    #cv2.imwrite('C:\\Users\\l1f15bscs0049\\Desktop\\a.png', bgr_image)
    print('\n\tGender Detection Done')

    file.close()

    #check men women count
    from collections import Counter
    with open(completeName, "r") as f:
        cd = Counter(int(line.split(None, 1)[0]) for line in f)
    #print(cd)

    women_count = cd[0]
    men_count = cd[1]
    # print(women_count)
    #print(men_count)
    #print(cd[0])
    #print(cd[1])
    os.remove(completeName)
    print("file removed")
    #call a wrapper function
    if (women_count > men_count):
        print("Women detected")
        Wrapper_func(0)

    elif (men_count > women_count):
        print("men detected")
        Wrapper_func(1)

    else:
        print("no Detection\n Random Ad's playing\n")
        random_ads()

    file.close()
Пример #15
0
  def faceDetection(self):
    print('xxxx') 
    global sess, age, gender, train_mode, images_pl
    global label_header
    global face_img_list
    global face_time_list
    global label_face_ubf
    global count
    global openAddUI
    global fa
    global face_net
    global emotion_labels, emotion_classifier, emotion_target_size
    # for face detection
    global doc
    global parentDir
    # load model and weights
    img_size = 160
    
    # capture video
    # cap = cv2.VideoCapture('C:\\Users\\Owner\\Desktop\\Summer-2018\\AI-Internship\\face_classification\\src\\dist\\trained_models\\dinner.mp4')
    cap = cv2.VideoCapture(0)
    
    #face model
    filename = parentDir + '/Deha.xml'
    if os.stat(filename).st_size != 0:
      with open(filename) as fd:
          doc = xmltodict.parse(fd.read())   
          for key_out in doc['root'].keys():
            found = False
            for key in doc['root'][key_out].keys():
              doc['root'][key_out][key] = np.array(doc['root'][key_out][key]['item'], dtype=float)          
    else:
      print('Database is empty')    

    # emotion model
    if count == 1:
      emotion_model_path = parentDir + '/trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
      emotion_labels = get_labels('fer2013')
      emotion_classifier = load_model(emotion_model_path, compile=False)
      emotion_target_size = emotion_classifier.input_shape[1:3]      

    inWidth = 300
    inHeight = 300
    confThreshold = 0.5    
    
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)

     # Track model
    trackerType = "KCF"      
    listTrack = []
    list_faces = []
    gender_show = ''
    age_show = ''
    emotion_txt = ''
    knownPeople = ''
    countFrame = 0
    listBox = []
    listDetails = []
    passEmotions = []
    emotion_dict = {'happy':0, 'sad':0,'surprise':0, 'disgust':0, 'angry':0,'fear':0,'neutral':0}
    while True:
      if openAddUI == True:
        print('Stop Camera')
        break    
      # get video frame
      ret, img = cap.read()
      cloneImage = img.copy()
      countFrame += 1

      timer = cv2.getTickCount()

      male_count = 0
      female_count = 0
      list_time = []

       # Track box
      if listTrack != []:
        for track in listTrack:
          ok, track[0] = track[1].update(img)
          if ok == False or track[0][0] <= 0 or track[0][1] >= img.shape[0] or track[0][2] <= 0 or track[0][2] >= img.shape[1]:
            listTrack.remove(track)

      if countFrame % 2 == 0:
        cols = img.shape[1]
        rows = img.shape[0]        
    
        input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img_h, img_w, _ = np.shape(input_img)      
        #detect face      
        face_net.setInput(dnn.blobFromImage(img, 1.0, (inWidth, inHeight), (104.0, 177.0, 123.0), False, False))
        detections = face_net.forward()
        faces = np.empty((1, img_size, img_size, 3))
        passEmotions = []
        
        list_faces = []
        listBox = []
        countFace = 0
        for i in range(detections.shape[2]):
          confidence = detections[0, 0, i, 2]
          if confidence > confThreshold:
            x1 = int(detections[0, 0, i, 3] * cols)
            y1 = int(detections[0, 0, i, 4] * rows)
            x2 = int(detections[0, 0, i, 5] * cols)
            y2 = int(detections[0, 0, i, 6] * rows)     
            face_image = img[y1:y2, x1:x2]

            if face_image.shape[0] > 0 and face_image.shape[1] > 0: 
              countFace += 1
              list_faces.append(face_image)
              cur_time = datetime.datetime.now()
              list_time.append(cur_time.strftime('%H:%M:%S'))  
            
              box = (x1, x2, y1, y2)
              
              foundTrack = False 
              for index, j in enumerate(listTrack):
                if self.is_intersect(j[0], box):
                  knownPeople = j[2]
                  foundTrack = True
                  j[1].init(img, box)
                  if knownPeople == 'Unknown' and countFrame > 1000:
                    countFrame = 0
                    faceResize = cv2.resize(face_image, (100, 100), interpolation=cv2.INTER_CUBIC)
                    face_RGB = cv2.cvtColor(faceResize,cv2.COLOR_BGR2RGB)
                    face_features = face_recognition.face_encodings(face_RGB)
                    if face_features != []:
                      if os.stat(filename).st_size != 0:
                        for key_out in doc['root'].keys():
                          found = False
                          for key in doc['root'][key_out].keys():
                            face_distances = face_recognition.face_distance(doc['root'][key_out][key], face_features)
                            if min(face_distances) < 0.4:
                              found = True  
                          if found:
                            knownPeople = key_out
                    j[2] = knownPeople
                  break
              if not foundTrack:
                faceResize = cv2.resize(face_image, (100, 100), interpolation=cv2.INTER_CUBIC)
                face_RGB = cv2.cvtColor(faceResize,cv2.COLOR_BGR2RGB)
                face_features = face_recognition.face_encodings(face_RGB)
                knownPeople = 'Unknown'
                if face_features != []:
                  if os.stat(filename).st_size != 0:
                    for key_out in doc['root'].keys():
                      found = False
                      for key in doc['root'][key_out].keys():
                        face_distances = face_recognition.face_distance(doc['root'][key_out][key], face_features)
                        if min(face_distances) < 0.4:
                          found = True
                      if found:
                        knownPeople = key_out
                        
                track = cv2.TrackerKCF_create()
                track.init(img, box) 
                listTrack.append([box, track, knownPeople])
              listBox.append((box, knownPeople, cloneImage))

        if listBox != [] and listDetails != []:
          for index, details in enumerate(listDetails):
            gender_show = details[0]
            age_show = details[1] 
            emotion_txt = details[2]
            if index < len(listBox):
              knownPeople = listBox[index][1]
            passEmotions.append(emotion_txt)
            cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), 1)
            labelSize, baseLine = cv2.getTextSize('normaler', cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
            cv2.rectangle(img, (x1, y1 - 4 * labelSize[1] - baseLine), (x1 + labelSize[0], y1),(255, 255, 255))
            cv2.putText(img, gender_show, (x1, y1 - baseLine), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
            cv2.putText(img, age_show, (x1, y1 - labelSize[1] - baseLine), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0, 255, 0))
            cv2.putText(img, emotion_txt, (x1, y1 - 2 * labelSize[1] - baseLine), cv2.FONT_HERSHEY_SIMPLEX,0.5, (0, 255, 0))
            cv2.putText(img, knownPeople, (x1, y1 - 3 * labelSize[1] - baseLine), cv2.FONT_HERSHEY_SIMPLEX,0.5, (0, 255, 0))
        listDetails = []

      else:
        for index, box in enumerate(listBox):
          x1 = box[0][0]
          x2 = box[0][1]
          y1 = box[0][2]
          y2 = box[0][3]
          knownPeople = box[1]
          passImage = box[2]

          faceR = dlib.rectangle(left = x1, top = y1, right = x2, bottom = y2)
          input_img = cv2.cvtColor(passImage, cv2.COLOR_BGR2RGB)
          gray = cv2.cvtColor(passImage, cv2.COLOR_BGR2GRAY)
          faces[0, :, :, :] = fa.align(input_img, gray, faceR)
          ages,genders = sess.run([age, gender], feed_dict={images_pl: faces, train_mode: False})
          
          if genders[0] == 1:
              gender_show = 'Male'
              male_count+=1
          else:
              gender_show = 'Female' 
              female_count+=1
          
          check_age = int(ages[0])
          if check_age < 8:
              age_show = '0 - 7'
          elif check_age >= 8 and  check_age < 15:
              age_show = '8 - 14'
          elif check_age >= 15 and  check_age < 25:
              age_show = '15 - 24'
          elif check_age >= 25 and  check_age < 38:
              age_show = '25 - 35'
          elif check_age >= 38 and  check_age < 48:
              age_show = '38 - 47'
          elif check_age >= 48 and  check_age < 60:
              age_show = '48 - 59'
          else:
              age_show = '60 - 100'    
          face_image = passImage[y1:y2, x1:x2]
          face_gray_image = cv2.cvtColor(face_image, cv2.COLOR_BGR2GRAY)
          croppedMat = cv2.resize(face_image, (227, 227), interpolation=cv2.INTER_CUBIC)
          input_mat = dnn.blobFromImage(croppedMat, 1.0, (227, 227), (78.4263377603, 87.7689143744, 114.895847746), False)      
          
          emotion_face_image = cv2.resize(face_gray_image, (emotion_target_size))
          emotion_face_image = preprocess_input(emotion_face_image, True)
          emotion_face_image = np.expand_dims(emotion_face_image, 0)
          emotion_face_image = np.expand_dims(emotion_face_image, -1)
          emotion_label_arg = np.argmax(emotion_classifier.predict(emotion_face_image))
          emotion_txt = emotion_labels[emotion_label_arg]  
          if passEmotions != [] and index < len(passEmotions) and emotion_txt != passEmotions[index]:
            emotion_dict[passEmotions[index]] += 1

          listDetails.append((gender_show, age_show, emotion_txt))
          #draw text and rect
          cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), 1)
          labelSize, baseLine = cv2.getTextSize('normaler', cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
          cv2.rectangle(img, (x1, y1 - 4 * labelSize[1] - baseLine), (x1 + labelSize[0], y1),(255, 255, 255))
          cv2.putText(img, gender_show, (x1, y1 - baseLine), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
          cv2.putText(img, age_show, (x1, y1 - labelSize[1] - baseLine), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0, 255, 0))
          cv2.putText(img, emotion_txt, (x1, y1 - 2 * labelSize[1] - baseLine), cv2.FONT_HERSHEY_SIMPLEX,0.5, (0, 255, 0))
          cv2.putText(img, knownPeople, (x1, y1 - 3 * labelSize[1] - baseLine), cv2.FONT_HERSHEY_SIMPLEX,0.5, (0, 255, 0))
      self.gender_text(male_count, female_count)
      self.emotion_text(emotion_dict.get('happy'), emotion_dict.get('sad'), emotion_dict.get('surprise'), emotion_dict.get('fear'), emotion_dict.get('angry'), emotion_dict.get('disgust'), emotion_dict.get('neutral'))

      for track in listTrack:
        for box in listBox:
          if not(self.is_intersect(track[0], box[0])):
            if track in listTrack:
             listTrack.remove(track)

      image = cv2.cvtColor(img,cv2.COLOR_BGR2RGB) 
      src_mat = Image.fromarray(image)
      pic = ImageTk.PhotoImage(src_mat)   
      label_header['image'] = pic   
      img_list = self.convert_mat_to_img(list_faces)

      for index, face_img in enumerate(img_list): 
        face_img_list[index]['image'] = face_img   
      for index, time_text in enumerate(list_time):
        face_time_list[index]['text'] = time_text      
      label_face_ubf['text'] = 'Face: {}'.format(len(list_faces))
    
    print('end camera')
    cap.release()
    openAddUI = False
Пример #16
0
def predict(image_folder_path, emotion_kind):

    detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
    emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
    gender_model_path = '../trained_models/gender_models/simple_CNN.81-0.96.hdf5'
    emotion_labels = get_labels('fer2013')

    target_file = '../result/predicted_' + EmotionName[emotion_kind] + '.txt'

    gender_labels = get_labels('imdb')
    font = cv2.FONT_HERSHEY_SIMPLEX

    # hyper-parameters for bounding boxes shape
    gender_offsets = (30, 60)
    gender_offsets = (10, 10)
    emotion_offsets = (20, 40)
    emotion_offsets = (0, 0)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)
    gender_classifier = load_model(gender_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]
    gender_target_size = gender_classifier.input_shape[1:3]

    image_path = dir_data_folder(image_folder_path)

    predicted_label = []

    for num in range(len(image_path)):
        # print the process info
        if print_switch == 0:
            print('deal with the ' + image_path[num])

        # loading images
        rgb_image = load_image(image_path[num], grayscale=False)
        gray_image = load_image(image_path[num], grayscale=True)
        gray_image = np.squeeze(gray_image)
        gray_image = gray_image.astype('uint8')

        # pdb.set_trace()

        faces = detect_faces(face_detection, gray_image)
        emotion_label_arg = -1
        for face_coordinates in faces:
            x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
            rgb_face = rgb_image[y1:y2, x1:x2]

            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]

            try:
                rgb_face = cv2.resize(rgb_face, (gender_target_size))
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            rgb_face = preprocess_input(rgb_face, False)
            rgb_face = np.expand_dims(rgb_face, 0)
            gender_prediction = gender_classifier.predict(rgb_face)
            gender_label_arg = np.argmax(gender_prediction)
            gender_text = gender_labels[gender_label_arg]

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_label_arg = np.argmax(
                emotion_classifier.predict(gray_face))
            emotion_text = emotion_labels[emotion_label_arg]

            if single_set_draw_switch == 0:
                if gender_text == gender_labels[0]:
                    color = (0, 0, 255)
                else:
                    color = (255, 0, 0)

                draw_bounding_box(face_coordinates, rgb_image, color)
                # draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20, 1, 2)
                draw_text(face_coordinates, rgb_image, emotion_text, color, 0,
                          20, 1, 2)

        if single_set_draw_switch == 0:
            bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)

            filename = image_path[num].split('/')[-1]
            filename = filename.split('.')[0]
            newname = '../predicted_image/' + 'predicted_' + filename + '.png'
            cv2.imwrite(newname, bgr_image)

        # add the predicted label
        predicted_label.append(emotion_label_arg)

    unrecognized_cnt = predicted_label.count(-1)
    true_cnt = predicted_label.count(emotion_kind)
    total_cnt = len(image_path)
    face_cnt = total_cnt - unrecognized_cnt
    face_recog_ratio = face_cnt / float(total_cnt)
    total_accuracy = true_cnt / float(total_cnt)
    recog_accuracy = true_cnt / float(face_cnt)

    f = open(target_file, 'w+')
    #with open(target_file,'w') as f:
    f.write(EmotionName[emotion_kind] + ' count = ' + str(true_cnt) + '\n')
    f.write(EmotionName[emotion_kind] + ' total count = ' + str(total_cnt) +
            '\n')
    f.write(EmotionName[emotion_kind] + '_accuracy in total: ' +
            str(total_accuracy) + '\n')
    f.write(EmotionName[emotion_kind] + '_accuracy in recognized: ' +
            str(recog_accuracy) + '\n')
    f.write('face_recognize_count : ' + str(face_cnt) + '\n')
    f.write('face_recognize_ratio : ' + str(face_recog_ratio) + '\n')
    for label in predicted_label:
        f.write(str(label) + '\n')
    f.close()
    return [
        true_cnt, total_cnt, face_cnt, total_accuracy, face_recog_ratio,
        recog_accuracy
    ]
Пример #17
0
def process_image(unchanged_image):
    """

    :param unchanged_image:
    :return:
    """
    face_size = -1
    emotion = ''
    gender = ''
    try:
        # parameters for loading data and images

        emotion_labels = get_labels('fer2013')
        gender_labels = get_labels('imdb')
        font = cv2.FONT_HERSHEY_SIMPLEX

        # hyper-parameters for bounding boxes shape
        gender_offsets = (30, 60)
        gender_offsets = (10, 10)
        emotion_offsets = (20, 40)
        emotion_offsets = (0, 0)

        with g.as_default(): # 多次调用
            # loading models
            # getting input model shapes for inference
            emotion_target_size = emotion_classifier.input_shape[1:3]
            gender_target_size = gender_classifier.input_shape[1:3]

            rgb_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2RGB)
            gray_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2GRAY)

            faces = detect_faces(face_detection, gray_image)
            if len(faces)>=1:
                face_coordinates = faces[0]
                x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
                rgb_face = rgb_image[y1:y2, x1:x2]

                x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
                gray_face = gray_image[y1:y2, x1:x2]

                try:
                    rgb_face = cv2.resize(rgb_face, (gender_target_size))
                    gray_face = cv2.resize(gray_face, (emotion_target_size))
                except:
                    pass

                rgb_face = preprocess_input(rgb_face, False)
                rgb_face = np.expand_dims(rgb_face, 0)
                gender_prediction = gender_classifier.predict(rgb_face)
                gender_label_arg = np.argmax(gender_prediction)
                gender_text = gender_labels[gender_label_arg]

                gray_face = preprocess_input(gray_face, True)
                gray_face = np.expand_dims(gray_face, 0)
                gray_face = np.expand_dims(gray_face, -1)
                emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
                emotion_text = emotion_labels[emotion_label_arg]

                if gender_text == gender_labels[0]:
                    color = (0, 0, 255)
                else:
                    color = (255, 0, 0)
                print(emotion_text, gender_text, len(faces))
                face_size = len(faces)
                gender = gender_text
                emotion = get_emotion_lables()[emotion_text]
    except Exception as err:
        logging.error('Error in emotion gender processor: "{0}"'.format(err))

    return face_size, gender, emotion
Пример #18
0
def detector(image_input, image_output):
    # parameters for loading data and images
    detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
    emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
    emotion_labels = get_labels('fer2013')
    gender_model_path = '../trained_models/gender_models/simple_CNN.81-0.96.hdf5'
    gender_labels = get_labels('imdb')
    font = cv2.FONT_HERSHEY_SIMPLEX

    gender_to_cnt = {}
    emotion_to_cnt = {}
    age_to_cnt = {}

    # hyper-parameters for bounding boxes shape
    frame_window = 10
    emotion_offsets = (20, 40)
    gender_offsets = (10, 10)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)
    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]

    gender_classifier = load_model(gender_model_path, compile=False)
    gender_target_size = gender_classifier.input_shape[1:3]

    depth = 16
    k = 8
    weight_file = "weights.18-4.06.hdf5"

    # load model and weights
    gender_age_prediction_img_size = 64
    model = WideResNet(gender_age_prediction_img_size, depth=depth, k=k)()
    model.load_weights(weight_file)

    cnn_face_detector = dlib.cnn_face_detection_model_v1(
        'mmod_human_face_detector.dat')

    def pipeline(bgr_image):
        #bgr_image = cv2.resize(bgr_image, (640, 360))
        faces = cnn_face_detector(bgr_image, 1)
        global total_faces

        total_faces = total_faces + len(faces)
        gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
        rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)

        for face in faces:
            x1, y1, x2, y2, w, h = face.rect.left(), face.rect.top(
            ), face.rect.right() + 1, face.rect.bottom() + 1, face.rect.width(
            ), face.rect.height()
            xw1 = max(int(x1 - 0.4 * w), 0)
            yw1 = max(int(y1 - 0.4 * h), 0)
            xw2 = min(int(x2 + 0.4 * w), bgr_image.shape[1] - 1)
            yw2 = min(int(y2 + 0.4 * h), bgr_image.shape[0] - 1)
            gray_face = gray_image[yw1:yw2 + 1, xw1:xw2 + 1]

            try:
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            gray_face = preprocess_input(gray_face, False)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_label_arg = np.argmax(
                emotion_classifier.predict(gray_face))
            emotion_text = emotion_labels[emotion_label_arg]

            if emotion_text not in emotion_to_cnt:
                emotion_to_cnt[emotion_text] = 0
            emotion_to_cnt[emotion_text] = emotion_to_cnt[emotion_text] + 1

            color = (0, 0, 0)

            cv2.putText(rgb_image, emotion_text,
                        (face.rect.left(), face.rect.top() - 5),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, color, 1, cv2.LINE_AA)

        face_list = np.empty((len(faces), gender_age_prediction_img_size,
                              gender_age_prediction_img_size, 3))

        for i in range(0, len(faces)):
            face = faces[i]
            x1, y1, x2, y2, w, h = face.rect.left(), face.rect.top(
            ), face.rect.right() + 1, face.rect.bottom() + 1, face.rect.width(
            ), face.rect.height()
            xw1 = max(int(x1 - 0.4 * w), 0)
            yw1 = max(int(y1 - 0.4 * h), 0)
            xw2 = min(int(x2 + 0.4 * w), bgr_image.shape[1] - 1)
            yw2 = min(int(y2 + 0.4 * h), bgr_image.shape[0] - 1)
            rgb_face = rgb_image[yw1:yw2 + 1, xw1:xw2 + 1, :]

            try:
                face_list[i, :, :, :] = cv2.resize(
                    rgb_face, (gender_age_prediction_img_size,
                               gender_age_prediction_img_size))
            except:
                continue

        gender_age_prediction = model.predict(face_list)
        for i in range(0, len(faces)):
            face = faces[i]
            predicted_genders = gender_age_prediction[0]
            gender_text = "FEMALE" if predicted_genders[i][0] > 0.5 else "MALE"

            ages = np.arange(0, 101).reshape(101, 1)
            predicted_ages = gender_age_prediction[1].dot(ages).flatten()
            age_text = str(predicted_ages[i])

            if gender_text not in gender_to_cnt:
                gender_to_cnt[gender_text] = 0
            gender_to_cnt[gender_text] = gender_to_cnt[gender_text] + 1

            if age_text not in age_to_cnt:
                age_to_cnt[age_text] = 0
            age_to_cnt[age_text] = age_to_cnt[age_text] + 1

            gender_color = (255, 0, 0) if gender_text == "MALE" else (0, 0,
                                                                      255)
            cv2.rectangle(rgb_image, (face.rect.left(), face.rect.top()),
                          (face.rect.right(), face.rect.bottom()),
                          gender_color, 1)

            color = (0, 0, 0)
            cv2.putText(rgb_image, gender_text,
                        (face.rect.left(), face.rect.top() - 20),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, gender_color, 1,
                        cv2.LINE_AA)
            cv2.putText(rgb_image, age_text,
                        (face.rect.left(), face.rect.top() - 35),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, color, 1, cv2.LINE_AA)

        bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)

        return bgr_image

    image = cv2.imread(image_input)
    cv2.imwrite(image_output, pipeline(image))
Пример #19
0
def face_gender(inf, inf3):  #sleep 5ms

    #cv2.namedWindow('AI3',0)               #new window
    #cv2.resizeWindow('AI3', 800, 400);     #640*480
    cap = cv2.VideoCapture('1.avi')
    currentFrame = 0
    totalFrame = cap.get(7)
    print(totalFrame)

    # parameters for loading data and images
    detection_model_path = './trained_models/detection_models/haarcascade_frontalface_default.xml'
    emotion_model_path = './trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
    gender_model_path = './trained_models/gender_models/simple_CNN.81-0.96.hdf5'
    emotion_labels = get_labels('fer2013')
    gender_labels = get_labels('imdb')
    font = cv2.FONT_HERSHEY_SIMPLEX

    # hyper-parameters for bounding boxes shape
    frame_window = 10
    gender_offsets = (30, 60)
    emotion_offsets = (20, 40)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)
    gender_classifier = load_model(gender_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]
    gender_target_size = gender_classifier.input_shape[1:3]

    # starting lists for calculating modes
    gender_window = []
    emotion_window = []

    # starting video streaming
    #cv2.namedWindow('window_frame')
    video_capture = cv2.VideoCapture(0)
    runFlag = 0
    displayTimes = 0
    pictureNum = 0
    menNumber = 0
    womenNumber = 0
    showHairStatus = 0

    while 1:

        time.sleep(0.01)
        #cv2.imshow("AI", frame)  # show fps
        currentFrame += 1
        #print (currentFrame)
        if currentFrame >= (totalFrame - 1):
            currentFrame = 0
            cap.set(cv2.CAP_PROP_POS_FRAMES, 1)
        cv2.waitKey(1)

        bgr_image = video_capture.read()[1]
        gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
        rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
        faces = detect_faces(face_detection, gray_image)

        for face_coordinates in faces:

            x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
            rgb_face = rgb_image[y1:y2, x1:x2]

            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]
            try:
                rgb_face = cv2.resize(rgb_face, (gender_target_size))
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue
            gray_face = preprocess_input(gray_face, False)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_label_arg = np.argmax(
                emotion_classifier.predict(gray_face))
            emotion_text = emotion_labels[emotion_label_arg]
            emotion_window.append(emotion_text)

            rgb_face = np.expand_dims(rgb_face, 0)
            rgb_face = preprocess_input(rgb_face, False)
            gender_prediction = gender_classifier.predict(rgb_face)
            gender_label_arg = np.argmax(gender_prediction)
            gender_text = gender_labels[gender_label_arg]
            gender_window.append(gender_text)

            if len(gender_window) > frame_window:
                emotion_window.pop(0)
                gender_window.pop(0)
            try:
                emotion_mode = mode(emotion_window)
                gender_mode = mode(gender_window)
            except:
                continue

            if gender_text == gender_labels[0]:
                color = (0, 0, 255)
            else:
                color = (255, 0, 0)

            draw_bounding_box(face_coordinates, rgb_image, color)
            draw_text(face_coordinates, rgb_image, gender_mode, color, 0, -20,
                      1, 2)
            draw_text(face_coordinates, rgb_image, emotion_mode, color, 0, -45,
                      1, 2)
            #print (gender_mode)
            if gender_mode == 'man':
                menNumber += 1
            if gender_mode == 'woman':
                womenNumber += 1
            if menNumber > 10000:
                menNumber = 5000
            if womenNumber > 10000:
                womenNumber = 5000
        bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
        size = (int(400), int(400))
        if inf3.empty() == False:
            pictureNum = inf3.get()
            runFlag = 1
            print(pictureNum)
        if runFlag == 1:
            #			pictureNum += 1
            #			pictureNum < 100:
            if womenNumber > menNumber:
                if pictureNum == 1:
                    frame = cv2.imread('./woman/1.jpg')
                if pictureNum == 2:
                    frame = cv2.imread('./woman/2.jpg')
                if pictureNum == 3:
                    frame = cv2.imread('./woman/3.jpg')
                if pictureNum == 4:
                    frame = cv2.imread('./woman/4.jpg')
                if pictureNum == 5:
                    frame = cv2.imread('./woman/5.jpg')
                if pictureNum == 6:
                    frame = cv2.imread('./woman/6.jpg')
            else:
                if pictureNum == 1:
                    frame = cv2.imread('./man/1.jpg')
                if pictureNum == 2:
                    frame = cv2.imread('./man/2.jpg')
                if pictureNum == 3:
                    frame = cv2.imread('./man/3.jpg')
                if pictureNum == 4:
                    frame = cv2.imread('./man/4.jpg')
                if pictureNum == 5:
                    frame = cv2.imread('./man/5.jpg')
                if pictureNum == 6:
                    frame = cv2.imread('./man/6.jpg')
            #else:
            runFlag = 0
            pictureNum = 0
            womenNumber = 0
            menNumber = 0
            showHairStatus = 1
        if showHairStatus == 0:
            ret, frame = cap.read()  # get image
        if showHairStatus == 1:
            displayTimes += 1
            if displayTimes >= 150:
                displayTimes = 0
                showHairStatus = 0

        bgr_image = cv2.resize(bgr_image, size, interpolation=cv2.INTER_AREA)

        #if frame.empty() == False:
        frame = cv2.resize(frame, size, interpolation=cv2.INTER_AREA)
        htitch = np.hstack((bgr_image, frame))
        cv2.imshow('AI', htitch)
        #inf.put(htitch)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
Пример #20
0
def recognition(f_2_s):
    #이중 조건문을 탈출하기 위해서, goto문이 불가능하다 파이썬은..
    tt = False  # 이중 조건문 탈출 위한 변수 설정
    emotion_c = ""  # 이중 조건문 탈출 위한 변수 설정

    #recap == False
    # parameters for loading data and images
    #image_path = path_r

    image_path = image_handler(f_2_s)  # 저장 여부 확인까지 완료한 뒤에 저장된 사진의 경로를 반환,받음

    # 학습된 모델과 감정labels의 경로를 설정해준 부분
    detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
    emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
    emotion_labels = get_labels('fer2013')
    font = cv2.FONT_HERSHEY_SIMPLEX  #폰트 --> emotion정보 보여줄 때 사용

    # hyper-parameters for bounding boxes shape
    emotion_offsets = (20, 40)
    emotion_offsets = (0, 0)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]

    # loading images
    rgb_image = load_image(image_path, grayscale=False)
    gray_image = load_image(image_path, grayscale=True)
    gray_image = np.squeeze(gray_image)
    gray_image = gray_image.astype('uint8')

    faces = detect_faces(face_detection, gray_image)
    for face_coordinates in faces:

        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]

        try:
            gray_face = cv2.resize(gray_face, (emotion_target_size))
        except:
            #pyautogui.confirm(text='one more', title='test', buttons=['ok', 'exit'])
            #recap = True
            continue

        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
        emotion_text = ""
        emotion_text = emotion_labels[emotion_label_arg]

        #감정인식이 성공되면 감정 상태를 물어보고, 감정 확인 후 저장 or 탈출

        tof = pyautogui.confirm(text='Are you ' + emotion_text + '?',
                                title=emotion_text,
                                buttons=['yes', 'no'
                                         ])  # 인식된 감정의 정답 여부 질문, 사용자의 입력을 받음
        if (tof == 'yes'):  # 알림 창의 yes 버튼을 눌렀을 때
            tt = True  # 이중 조건문 탈출 위해

            emotion_c = emotion_text

            color = (255, 0, 0)  # 감정 정보 글씨 빨간색, 사각형도

            #draw_bounding_box(face_coordinates, rgb_image, color)
            #draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -30, 1.5, 2)

        elif (tof == 'no'):  # 알림 창의 no 버튼을 눌렀을 때
            tt = False
            break

        #color = (255, 0, 0) # 감정 정보 글씨 빨간색, 사각형도

        #draw_bounding_box(face_coordinates, rgb_image, color)
        #draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -30, 1.5, 2)

    if (tt == True):
        # yes 버튼을 눌렀을 때는 그 감정 여부에 맞는 파일명을 지어서 사진 저장.
        bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
        next_num = next_index(
            emotion_text)  # 인식된 감정 상태와 같은 파일들이 몇개 있는지 정보 얻고 다음 숫자가 저장됨
        cv2.imwrite('../src/' + emotion_text + 'z' + str(next_num) + '.jpg',
                    bgr_image)  #  새로운 감정 인식 사진이 생성된다.
        f = open(emotion_text + 'z' + str(next_num) + ".txt",
                 'w',
                 encoding="UTF8")  # 그에 매칭되는 일기장도 생성.
        f.close()

        # 이후 인식 된 얼굴 범위와 감정 정보를 화면을 통해 사용자에게 보여줌
        img = cv2.imread(image_path, cv2.IMREAD_COLOR)

        draw_bounding_box(face_coordinates, img, color)
        draw_text(face_coordinates, img, emotion_text, color, 0, -30, 1.5, 2)

        while (True):  # 키 입력을 기다리며 화면 정지
            cv2.imshow(image_path, img)

            if cv2.waitKey(1) > 0:
                break
        # 체크가 완료되면 함수 탈출.

        #check_recoged_img('../src/'+ emotion_text +'z'+ str(next_num) +'.jpg')    --이것은 얼굴에 사각형, 감정정보 입혀진 사진 저장

    else:  # 알림 창을 띄워서 인식 된 감정이 없다는 것을 알려줌 -->> (인식의 오류 or 사용자가 생각한 감정과의 불일치 시)
        pyautogui.alert(text='no emtion captured', title='error', button='OK')
Пример #21
0
from utils.datasets import get_labels
from utils.inference import detect_faces
from utils.inference import apply_offsets
from utils.inference import load_detection_model
from utils.preprocessor import preprocess_input
from utils.inference import draw_bounding_box
from utils.inference import load_image

# parameters
image_path = sys.argv[1]
# image_path = '../images/color_demo.gif'

# task = sys.argv[2]
task = 'emotion'
if task == 'emotion':
    labels = get_labels('fer2013')
    offsets = (0, 0)
    # model_filename = '../trained_models/fer2013_big_XCEPTION.54-0.66.hdf5'
    model_filename = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
elif task == 'gender':
    labels = get_labels('imdb')
    offsets = (30, 60)
    model_filename = '../trained_models/gender_models/gender_mini_XCEPTION.21-0.95.hdf5'

color = (0, 255, 0)

# loading models
detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
model = load_model(model_filename, compile=False)
target_size = model.input_shape[1:3]
face_detection = load_detection_model(detection_model_path)
Пример #22
0
def extract_emotions(video_source, output_video, output_data, n):
    base_path = os.path.realpath(__file__)
    # os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    # os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
    base_path = base_path[:base_path.find('emotions')]
    emotion_model_path = base_path + 'emotions/models/emotion_model.hdf5'
    emotion_labels = get_labels('fer2013')

    # hyper-parameters for bounding boxes shape
    frame_window = 10
    emotion_offsets = (20, 40)
    # loading models
    cnn_face_detector = dlib.cnn_face_detection_model_v1(
        base_path + 'emotions/models/mmod_human_face_detector.dat')
    face_cascade = cv2.CascadeClassifier(
        base_path + 'emotions/models/haarcascade_frontalface_default.xml')
    emotion_classifier = load_model(emotion_model_path)
    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]
    # starting lists for calculating modes
    emotion_window = []
    # starting video streaming

    video_capture = cv2.VideoCapture(0)
    fourcc = cv2.VideoWriter_fourcc(*'MJPG')
    out = cv2.VideoWriter(output_video, fourcc, 9.0, (1920, 1088))
    # Select video or webcam feed
    cap = cv2.VideoCapture(video_source)
    count = 0.
    miss = 0.
    f = open(output_data, 'w')
    while cap.isOpened():  # True:
        ret, bgr_image = cap.read()

        if ret == True:

            gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
            rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)

            # faces = face_cascade.detectMultiScale(gray_image, scaleFactor=1.1, minNeighbors=5,
            #                                       minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE)
            # gray_image = cv2.resize(gray_image,(1280,720))
            # print gray_image.shape
            faces = cnn_face_detector(gray_image, 1)
            # faces, confidences = cv.detect_face(gray_image)
            count += 1.
            if len(faces) == 0:
                miss += 1.
            for face_coordinates in faces:
                x = face_coordinates.rect.left()
                y = face_coordinates.rect.top()
                w = face_coordinates.rect.right() - x
                h = face_coordinates.rect.bottom() - y
                # x = face_coordinates[0]
                # y = face_coordinates[1]
                # w = face_coordinates[2] - x
                # h = face_coordinates[3] - y
                face_coordinates = (x, y, w, h)
                x1, x2, y1, y2 = apply_offsets(face_coordinates,
                                               emotion_offsets)
                gray_face = gray_image[y1:y2, x1:x2]
                try:
                    gray_face = cv2.resize(gray_face, (emotion_target_size))
                except:
                    continue

                gray_face = preprocess_input(gray_face, True)
                gray_face = np.expand_dims(gray_face, 0)
                gray_face = np.expand_dims(gray_face, -1)
                emotion_prediction = emotion_classifier.predict(gray_face)
                emotion_probability = np.max(emotion_prediction)
                emotion_label_arg = np.argmax(emotion_prediction)
                emotion_text = emotion_labels[emotion_label_arg]
                emotion_window.append(emotion_text)

                emotion_label_args = largest_indices(emotion_prediction, n)
                emotion_probabilitis = emotion_prediction[emotion_label_args]
                emotion_texts = [
                    emotion_labels.get(key) for key in emotion_label_args[1]
                ]
                if len(emotion_window) > frame_window:
                    emotion_window.pop(0)
                try:
                    emotion_mode = mode(emotion_window)
                except:
                    continue

                if emotion_text == 'angry':
                    color = emotion_probability * np.asarray((255, 0, 0))
                elif emotion_text == 'sad':
                    color = emotion_probability * np.asarray((0, 0, 255))
                elif emotion_text == 'happy':
                    color = emotion_probability * np.asarray((255, 255, 0))
                elif emotion_text == 'surprise':
                    color = emotion_probability * np.asarray((0, 255, 255))
                else:
                    color = emotion_probability * np.asarray((0, 255, 0))

                color = color.astype(int)
                color = color.tolist()
                emotion_text = " ".join(emotion_texts)
                f.write("{0} {1} \n".format(
                    emotion_text,
                    " ".join(str(item) for item in emotion_probabilitis)))
                f.flush()

                draw_bounding_box(face_coordinates, rgb_image, color)
                draw_text(face_coordinates, rgb_image, emotion_mode, color, 0,
                          -45, 1, 1)

            bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
            out.write(bgr_image)
        else:
            break
    print miss
    print count
    print miss / count
    cap.release()
    out.release()
    f.close()
    cv2.destroyAllWindows()
def process_image(image):

    try:
        # parameters for loading data and images
        detection_model_path = './trained_models/detection_models/haarcascade_frontalface_default.xml'
        emotion_model_path = './trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
        gender_model_path = './trained_models/gender_models/simple_CNN.81-0.96.hdf5'
        emotion_labels = get_labels('fer2013')
        gender_labels = get_labels('imdb')
        font = cv2.FONT_HERSHEY_SIMPLEX

        # hyper-parameters for bounding boxes shape
        gender_offsets = (30, 60)
        gender_offsets = (10, 10)
        emotion_offsets = (20, 40)
        emotion_offsets = (0, 0)

        # loading models
        face_detection = load_detection_model(detection_model_path)
        emotion_classifier = load_model(emotion_model_path, compile=False)
        gender_classifier = load_model(gender_model_path, compile=False)

        # getting input model shapes for inference
        emotion_target_size = emotion_classifier.input_shape[1:3]
        gender_target_size = gender_classifier.input_shape[1:3]

        # loading images
        image_array = np.fromstring(image, np.uint8)
        unchanged_image = cv2.imdecode(image_array, cv2.IMREAD_UNCHANGED)

        rgb_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2RGB)
        gray_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2GRAY)

        faces = detect_faces(face_detection, gray_image)
        for face_coordinates in faces:
            x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
            rgb_face = rgb_image[y1:y2, x1:x2]

            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]

            try:
                rgb_face = cv2.resize(rgb_face, (gender_target_size))
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            rgb_face = preprocess_input(rgb_face, False)
            rgb_face = np.expand_dims(rgb_face, 0)
            gender_prediction = gender_classifier.predict(rgb_face)
            gender_label_arg = np.argmax(gender_prediction)
            gender_text = gender_labels[gender_label_arg]

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_label_arg = np.argmax(
                emotion_classifier.predict(gray_face))
            emotion_text = emotion_labels[emotion_label_arg]

            if gender_text == gender_labels[0]:
                color = (0, 0, 255)
            else:
                color = (255, 0, 0)

            draw_bounding_box(face_coordinates, rgb_image, color)
            draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20,
                      1, 2)
            draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50,
                      1, 2)
    except Exception as err:
        logging.error('Error in emotion gender processor: "{0}"'.format(err))

    bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)

    dirname = 'result'
    if not os.path.exists(dirname):
        os.mkdir(dirname)

    cv2.imwrite(os.path.join(dirname, 'predicted_image.png'), bgr_image)
Пример #24
0
from utils.datasets import get_labels
from utils.inference import detect_faces
from utils.inference import load_detection_model
from utils.inference import make_face_coordinates
from utils.inference import draw_text
from utils.inference import draw_bounding_box
from utils.inference import apply_offsets

from utils.inference import load_image
from utils.preprocessor import preprocess_input

# parameters for loading data and images
image_path = sys.argv[1]
emotion_model_path = '../trained_models/emotion_models/CK+/CK+_mini_XCEPTION.138-1.00.hdf5'
emotion_labels = get_labels('CK+')
font = cv2.FONT_HERSHEY_SIMPLEX

# hyper-parameters for bounding boxes shape
emotion_offsets = (10, 10)
emotion_offsets = (0, 0)

# loading models
face_detection = load_detection_model()
emotion_classifier = load_model(emotion_model_path, compile=False)

# getting input model shapes for inference
emotion_target_size = emotion_classifier.input_shape[1:3]

# loading images
rgb_image = load_image(image_path, grayscale=False)
Пример #25
0
    def __init__(self, emoji_dictionary):
        # Values for average face dimensions
        self._face_width = 13.9  # in cm
        self._face_height = 22.5

        # Emoji dictionary
        self._emoji_dictionary = emoji_dictionary

        # ROS parameters
        # Initialize camera and get its info
        self._camera = rospy.get_param("~/cam_device", "/dev/video0")
        self._camera_calibration_path = rospy.get_param(
            "~/camera_info_path",
            "/home/yago/catkin_ws/src/eyecu/eyecu/config/logitech_webcam_calibration.yaml"
        )
        self._load_camera_info()

        # Get files for face detection
        self._detection_model_path = rospy.get_param(
            "~/detection_model_path", "/trained_models/detection_models/")
        self._detection_model_file = rospy.get_param(
            "~/detection_model_file", "haarcascade_frontalface_default.xml")
        self._detection_model = sys.path[
            0] + self._detection_model_path + self._detection_model_file
        self._load_face_detection()

        # Get files for emotion detection
        self._emotion_model_path = rospy.get_param("~/emotion_model_path",
                                                   "/trained_models/")
        self._emotion_model_file = rospy.get_param(
            "~/emotion_model_file", "fer2013_mini_XCEPTION.119-0.65.hdf5")
        self._emotion_model = sys.path[
            0] + self._emotion_model_path + self._emotion_model_file
        self._load_emotion_detection()

        # Get labels and sizes
        self._emotion_labels = get_labels('fer2013')
        self._emotion_target_size = self._emotion_classifier.input_shape[1:3]

        # Hyper-parameters for bounding boxes shape
        self._emotion_window = []
        self._frame_window = 10
        self._emotion_offsets = (20, 40)

        # Publisher topics
        self._face_distance_topic = rospy.get_param("~/face_distance_topic",
                                                    "/face_distance")
        self._emotion_topic = rospy.get_param("~/emotion_topic",
                                              "/emotion_status")

        # Initialize publishers
        self._face_distance_publisher = rospy.Publisher(
            self._face_distance_topic, DistanceCamera, queue_size=1)
        self._emotion_publisher = rospy.Publisher(self._emotion_topic,
                                                  Int8,
                                                  queue_size=1)

        # Display video
        self._display = rospy.get_param("~/display", True)
        self._font = cv2.FONT_HERSHEY_COMPLEX_SMALL
        if self._display:
            cv2.namedWindow('window_frame', cv2.WND_PROP_FULLSCREEN)
            cv2.setWindowProperty('window_frame', cv2.WND_PROP_FULLSCREEN,
                                  cv2.WINDOW_FULLSCREEN)

        # Start OpenCV video capture
        self._video_capture = WebcamVideoStream(src=self._camera).start()
Пример #26
0
import numpy as np
import sys

from utils.datasets import get_labels
from utils.inference import detect_faces, draw_text, draw_bounding_box, load_detection_model

# parameters for loading data and images
detection_model_path = '../models/haarcascade_files/haarcascade_frontalface_default.xml'
emotion_model_path = '../models/model.85-0.65.hdf5'
source_image_path = sys.argv[1]

# hyper-parameters for bounding boxes shape
# loading models
face_detection = load_detection_model(detection_model_path)
emotion_classifier = load_model(emotion_model_path, compile=False)
EMOTIONS = get_labels()

# reading the frame
frame = cv2.imread(source_image_path)
frame = imutils.resize(frame, width=400)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
faces = detect_faces(face_detection, gray)

if len(faces) > 0:
    faces = sorted(faces,
                   reverse=True,
                   key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0]
    (fX, fY, fW, fH) = faces
    # Extract the ROI of the face from the grayscale image, resize it to a fixed 48x48 pixels, and then prepare
    # the ROI for classification via the CNN
Пример #27
0
def main(yolo):
    t = datetime.datetime.now().replace(microsecond=0).isoformat()
    # write only if values are not empty
    graphInputs = ['1', '8', 'Emotion', '2018-10-23T14:02:29', 'MALE', '2']
    with open(r'templates/test2.csv', 'a') as f:
        writer = csv.writer(f)
        writer.writerow(graphInputs)

    # parameters for loading data and images
    detection_model_path = 'trained_models/detection_models/haarcascade_frontalface_default.xml'
    emotion_model_path = 'trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
    gender_model_path = 'trained_models/gender_models/simple_CNN.81-0.96.hdf5'
    emotion_labels = get_labels('fer2013')
    gender_labels = get_labels('imdb')
    font = cv2.FONT_HERSHEY_SIMPLEX

    # hyper-parameters for bounding boxes shape
    frame_window = 10
    gender_offsets = (30, 60)
    emotion_offsets = (20, 40)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)
    gender_classifier = load_model(gender_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]
    gender_target_size = gender_classifier.input_shape[1:3]

    # starting lists for calculating modes
    gender_window = []
    emotion_window = []

    # Definition of the parameters
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0

    # deep_sort

    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)

    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)

    writeVideo_flag = False
    resetCounter = 0
    amountOfFramesPerScan = 10
    peopleInFrameList = []
    # video_capture = cv2.VideoCapture('demo/dinner.mp4')
    video_capture = cv2.VideoCapture('demo/MOT1712.mp4')
    # video_capture = cv2.VideoCapture(0)

    if writeVideo_flag:
        # Define the codec and create VideoWriter object
        w = int(video_capture.get(3))
        h = int(video_capture.get(4))
        fourcc = cv2.VideoWriter_fourcc(*'MJPG')
        out = cv2.VideoWriter('output.avi', fourcc, 15, (w, h))
        list_file = open('detection.txt', 'w')
        frame_index = -1

    fps = 0.0

    while True:
        ret, frame = video_capture.read()  # frame shape 640*480*3
        if ret != True:
            break
        #   -------------------START  EMOTION CODE

        # cv2.imshow('window_frame', frame) SHOWS EMOTION FRAME SEPERATE

        #  --------------------------------   END EMOTION CODE

        t1 = time.time()
        currentPeopleInFrame = 0
        image = Image.fromarray(frame)
        boxs = yolo.detect_image(image)
        # print("box_num",len(boxs))
        features = encoder(frame, boxs)

        # score to 1.0 here).
        detections = [
            Detection(bbox, 1.0, feature)
            for bbox, feature in zip(boxs, features)
        ]

        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap,
                                                    scores)
        detections = [detections[i] for i in indices]

        # Imagelist is a list of all the images within the tracked bounding boxes of our tracker.
        imageList = []
        # Call the tracker
        tracker.predict()
        tracker.update(detections)
        trackerIDs = []
        for track in tracker.tracks:
            if track.is_confirmed() and track.time_since_update > 1:
                continue
            # Gets the location of the BBOx coordinates within the tracker.
            bbox = track.to_tlbr()

            # Put rectangle and text on the image

            currentPeopleInFrame += 1
            # print(int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]))
            # Check if bounding box 3 isn't out of bounds before creating image
            if int(bbox[2]) <= 640:
                numpArr = np.array(frame[int((bbox[1])):int(bbox[1] + bbox[3]),
                                         int(bbox[0]):int(bbox[0] + bbox[2])])
            else:
                numpArr = np.array(frame[int((bbox[1])):int(bbox[1] + bbox[3]),
                                         int(bbox[0]):(int(bbox[0]) + 640)])
            imageList.append(numpArr)
            # cv2.destroyAllWindows()
            trackerIDs.append(track.track_id)
            i = 0
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                          (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)
            cv2.putText(frame, str(track.track_id),
                        (int(bbox[0]), int(bbox[1])), 0, 5e-3 * 200,
                        (0, 255, 0), 2)
            print("Found tracked human")

        for item in (imageList):
            print("Scanning human")
            gray_image = cv2.cvtColor(item, cv2.COLOR_BGR2GRAY)
            rgb_image = cv2.cvtColor(item, cv2.COLOR_BGR2RGB)
            faces = detect_faces(face_detection, gray_image)

            graphInputs[0] = '%d' % trackerIDs[i]
            i += 1
            graphInputs[3] = '%s' % datetime.datetime.now().replace(
                microsecond=0).isoformat()

            for face_coordinates in faces:
                print("printje KOMT 1")
                x1, x2, y1, y2 = apply_offsets(face_coordinates,
                                               gender_offsets)
                rgb_face = rgb_image[y1:y2, x1:x2]

                x1, x2, y1, y2 = apply_offsets(face_coordinates,
                                               emotion_offsets)
                gray_face = gray_image[y1:y2, x1:x2]
                try:
                    rgb_face = cv2.resize(rgb_face, (gender_target_size))
                    gray_face = cv2.resize(gray_face, (emotion_target_size))
                except:
                    continue
                print("printje KOMT HIER2")
                gray_face = preprocess_input(gray_face, True)
                gray_face = np.expand_dims(gray_face, 0)
                gray_face = np.expand_dims(gray_face, -1)
                emotion_label_arg = np.argmax(
                    emotion_classifier.predict(gray_face))
                emotion_text = emotion_labels[emotion_label_arg]
                emotion_window.append(emotion_text)

                rgb_face = np.expand_dims(rgb_face, 0)
                rgb_face = preprocess_input(rgb_face, False)
                gender_prediction = gender_classifier.predict(rgb_face)
                gender_label_arg = np.argmax(gender_prediction)
                gender_text = gender_labels[gender_label_arg]
                graphInputs[4] = gender_text
                gender_window.append(gender_text)

                #overwrite emotion label met leeg  als er niks gevonden wordt of laat hele persoon weg
                #Als er niks gevonden wordt, niks schrijven
                print("printje KOMT HIER 3 %s" %
                      emotion_labels[emotion_label_arg])
                graphInputs[2] = ('%s' % emotion_labels[emotion_label_arg])
                if gender_text == gender_labels[0]:
                    color = (0, 0, 255)
                else:
                    color = (255, 0, 0)

                # draw_bounding_box(face_coordinates, rgb_image, color)
                # draw_text(face_coordinates, rgb_image, gender_mode,
                #           color, 0, -20, 1, 1)
                # draw_text(face_coordinates, rgb_image, emotion_mode,
                #           color, 0, -45, 1, 1)

            # gray_image = cv2.cvtColor(item, cv2.COLOR_BGR2GRAY)
            # rgb_image = cv2.cvtColor(item, cv2.COLOR_BGR2RGB)
            #
            # faces = face_cascade.detectMultiScale(gray_image, scaleFactor=1.1, minNeighbors=5,
            #                                       minSize=(0, 0), flags=cv2.CASCADE_SCALE_IMAGE)
            # #PersonID Set
            # graphInputs[0] = '%d'%trackerIDs[i]
            # # print("trackerID:", trackerIDs[i])
            # i += 1
            # graphInputs[3] = '%s'%datetime.datetime.now().replace(microsecond=0).isoformat()
            #
            # for face_coordinates in faces:
            #
            #     x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            #     gray_face = gray_image[y1:y2, x1:x2]
            #     try:
            #         gray_face = cv2.resize(gray_face, (emotion_target_size))
            #     except:
            #         continue
            #
            #     gray_face = preprocess_input(gray_face, True)
            #     gray_face = np.expand_dims(gray_face, 0)
            #     gray_face = np.expand_dims(gray_face, -1)
            #     emotion_prediction = emotion_classifier.predict(gray_face)
            #     emotion_probability = np.max(emotion_prediction)
            #     emotion_label_arg = np.argmax(emotion_prediction)
            #     emotion_text = emotion_labels[emotion_label_arg]
            #     emotion_window.append(emotion_text)
            #     if len(emotion_window) > frame_window:
            #         emotion_window.pop(0)
            #     try:
            #         emotion_mode = mode(emotion_window)
            #     except:
            #         continue
            #
            #     #Emotion set
            #     if emotion_text == 'angry':
            #         color = emotion_probability * np.asarray((255, 0, 0))
            #         print("angry", i)
            #         graphInputs[2] = 'ANGRY'
            #     elif emotion_text == 'sad':
            #         color = emotion_probability * np.asarray((0, 0, 255))
            #         print("sad", i)
            #         graphInputs[2] = 'SAD'
            #     elif emotion_text == "happy":
            #         color = emotion_probability * np.asarray((255, 255, 0))
            #         print("happy", i)
            #         graphInputs[2] = 'HAPPY'
            #     elif emotion_text == 'surprise':
            #         color = emotion_probability * np.asarray((0, 255, 255))
            #         print("surprise", i)
            #         graphInputs[2] = 'SURPRISED'
            #     else:
            #         color = emotion_probability * np.asarray((0, 255, 0))
            #         print("neutral", i)
            #         graphInputs[2] = 'NEUTRAL'
            #     # color = color.astype(int)
            #     # color = color.tolist()
            #
            #     # -------------------------------------
            #
            #     draw_bounding_box(face_coordinates, rgb_image, color)
            #     draw_text(face_coordinates, rgb_image, emotion_mode,
            #               color, 0, -45, 1, 1)

            print(graphInputs)
            with open(r'templates/test2.csv', 'a') as f:
                writer = csv.writer(f)
                writer.writerow(graphInputs)
        # cv2.imshow('jaja', frame[int((bbox[1])):int(bbox[1] + bbox[3]), int(bbox[0]):int(bbox[0] + bbox[2])])

        # cv2.imshow('ajaja', imageList[0])

        cv2.imshow('FilteredImage', frame)
        if resetCounter >= amountOfFramesPerScan:
            peopleInFrameList.append(currentPeopleInFrame)
            print("Total amount of people %d" % (currentPeopleInFrame))

            # Print
            # for x in range(len(peopleInFrameList)):
            #     print("listie  %d" % (peopleInFrameList[x]))

            print(peopleInFrameList)
            resetCounter = 0
        else:
            resetCounter += 1
        print("Geen print of add deze keer %d" % (resetCounter))

        if resetCounter >= amountOfFramesPerScan:
            peopleInFrameList.append(currentPeopleInFrame)
            print("Total amount of people %d" % (currentPeopleInFrame))

            # for x in range(len(peopleInFrameList)):
            #     print("listie  %d" % (peopleInFrameList[x]))
            print(peopleInFrameList)
            resetCounter = 0
        else:
            resetCounter += 1
        print("Geen print of add deze keer %d" % (resetCounter))

        if writeVideo_flag:
            # save a frame
            out.write(frame)
            frame_index = frame_index + 1
            list_file.write(str(frame_index) + '')
            if len(boxs) != 0:
                for i in range(0, len(boxs)):
                    list_file.write(
                        str(boxs[i][0]) + ' ' + str(boxs[i][1]) + ' ' +
                        str(boxs[i][2]) + ' ' + str(boxs[i][3]) + '')
            list_file.write('\n')

        fps = (fps + (1. / (time.time() - t1))) / 2
        print("fps= %f" % (fps))

        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    video_capture.release()
    if writeVideo_flag:
        out.release()
        list_file.close()
    cv2.destroyAllWindows()
Пример #28
0
def gen():
    """Video streaming generator function."""
    global cap
    global emotion

    # parameters for loading data and images
    emotion_model_path = './models/emotion_model.hdf5'
    emotion_labels = get_labels('fer2013')

    # hyper-parameters for bounding boxes shape
    frame_window = 10
    emotion_offsets = (20, 40)

    # loading models
    face_cascade = cv2.CascadeClassifier(
        './models/haarcascade_frontalface_default.xml')
    emotion_classifier = load_model(emotion_model_path)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]

    # starting lists for calculating modes
    emotion_window = []

    # starting video streaming
    cv2.namedWindow('window_frame')

    # Uncomment this to show a popup window
    cv2.destroyAllWindows()
    video_capture = cv2.VideoCapture(0)

    # Select video or webcam feed
    cap = None
    if (USE_WEBCAM == True):
        cap = cv2.VideoCapture(0)  # Webcam source
    else:
        cap = cv2.VideoCapture('./demo/dinner.mp4')  # Video file source

    while cap.isOpened():  # True:
        ret, bgr_image = cap.read()
        gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
        rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)

        faces = face_cascade.detectMultiScale(gray_image,
                                              scaleFactor=1.1,
                                              minNeighbors=5,
                                              minSize=(30, 30),
                                              flags=cv2.CASCADE_SCALE_IMAGE)

        for face_coordinates in faces:

            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]
            try:
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_prediction = emotion_classifier.predict(gray_face)
            emotion_probability = np.max(emotion_prediction)
            emotion_label_arg = np.argmax(emotion_prediction)
            emotion_text = emotion_labels[emotion_label_arg]
            emotion_window.append(emotion_text)

            if len(emotion_window) > frame_window:
                emotion_window.pop(0)
            try:
                emotion_mode = mode(emotion_window)
            except:
                continue

            emotion = emotion_text
            print(emotion)

            if emotion_text == 'angry':
                color = emotion_probability * np.asarray((255, 0, 0))
            elif emotion_text == 'sad':
                color = emotion_probability * np.asarray((0, 0, 255))
            elif emotion_text == 'happy':
                color = emotion_probability * np.asarray((255, 255, 0))
            elif emotion_text == 'surprise':
                color = emotion_probability * np.asarray((0, 255, 255))
            else:
                color = emotion_probability * np.asarray((0, 255, 0))

            color = color.astype(int)
            color = color.tolist()

            draw_bounding_box(face_coordinates, rgb_image, color)
            draw_text(face_coordinates, rgb_image, emotion_mode, color, 0, -45,
                      1, 1)

        bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
        # cv2.imshow('window_frame', bgr_image)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

        # write annotated images to jpg, then output to web browser
        cv2.imwrite('t.jpg', bgr_image)
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' +
               open('t.jpg', 'rb').read() + b'\r\n')
Пример #29
0
def recognition(f_2_s):
    tt = False  #이중 조건문을 탈출하기 위해서, goto문이 불가능하다 파이썬은..
    emotion_c = ""
    #recap == False
    # parameters for loading data and images
    #image_path = path_r
    image_path = image_handler(f_2_s)
    detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
    emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
    emotion_labels = get_labels('fer2013')
    font = cv2.FONT_HERSHEY_SIMPLEX

    # hyper-parameters for bounding boxes shape
    emotion_offsets = (20, 40)
    emotion_offsets = (0, 0)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]

    # loading images
    rgb_image = load_image(image_path, grayscale=False)
    gray_image = load_image(image_path, grayscale=True)
    gray_image = np.squeeze(gray_image)
    gray_image = gray_image.astype('uint8')

    faces = detect_faces(face_detection, gray_image)
    for face_coordinates in faces:

        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]

        try:
            gray_face = cv2.resize(gray_face, (emotion_target_size))
        except:
            #pyautogui.confirm(text='one more', title='test', buttons=['ok', 'exit'])
            #recap = True
            continue

        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
        emotion_text = ""
        emotion_text = emotion_labels[emotion_label_arg]

        #감정인식이 성공되면 감정 상태를 물어보고, 감정 확인 후 저장 or 탈출

        tof = pyautogui.confirm(text='Are you ' + emotion_text + '?',
                                title=emotion_text,
                                buttons=['yes', 'no'])
        if (tof == 'yes'):
            tt = True

            emotion_c = emotion_text

            color = (255, 0, 0)  # 감정 정보 글씨 빨간색, 사각형도

            draw_bounding_box(face_coordinates, rgb_image, color)
            draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -30,
                      1.5, 2)
        elif (tof == 'no'):
            tt = False
            break

        #color = (255, 0, 0) # 감정 정보 글씨 빨간색, 사각형도

        #draw_bounding_box(face_coordinates, rgb_image, color)
        #draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -30, 1.5, 2)
    if (tt == True):
        bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
        cv2.imwrite('../src/' + emotion_text + '.jpg', bgr_image)  # 변수 활용
        check_recoged_img('../src/' + emotion_text + '.jpg')
    else:
        pyautogui.alert(text='no emtion captured', title='error', button='OK')
Пример #30
0
from update_counter import gender
from update_counter import age
from update_counter import peopele
import time
female_count = 0
male_count = 0
prev_max = 0
kids = 0
elders = 0
youngadults = 0
temp = 0
cor = 0
diff_cor = 0
agegroup_model_path = '../trained_models/agegroup_models/age_group_weights.hdf5'
gender_model_path = '../trained_models/gender_models/updated_weights.hdf5'
agegroup_labels = get_labels('fer2013')
gender_labels = get_labels('imdb')
font = cv2.FONT_HERSHEY_SIMPLEX

# hyper-parameters for bounding boxes shape
frame_window = 10
gender_offsets = (30, 60)
agegroup_offsets = (20, 40)

# loading models
agegroup_classifier = load_model(agegroup_model_path, compile=False)
gender_classifier = load_model(gender_model_path, compile=False)

# getting input model shapes for inference
agegroup_target_size = agegroup_classifier.input_shape[1:3]
gender_target_size = gender_classifier.input_shape[1:3]
from cv2 import WINDOW_NORMAL
from keras.models import load_model
import numpy as np

from utils.datasets import get_labels
from utils.inference import detect_faces
from utils.inference import draw_text
from utils.inference import draw_bounding_box
from utils.inference import apply_offsets
from utils.inference import load_detection_model
from utils.preprocessor import preprocess_input

# parameters for loading data and images
detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
gender_model_path = '../trained_models/gender_models/simple_CNN.81-0.96.hdf5'
gender_labels = get_labels('imdb')
font = cv2.FONT_HERSHEY_SIMPLEX

# hyper-parameters for bounding boxes shape
frame_window = 10
gender_offsets = (30, 60)

# loading models
face_detection = load_detection_model(detection_model_path)
gender_classifier = load_model(gender_model_path, compile=False)

# getting input model shapes for inference
gender_target_size = gender_classifier.input_shape[1:3]

# starting lists for calculating modes
gender_window = []
Пример #32
0
	total_face_frames=neg+net+pos
	if total_face_frames==0:
		return 1
	# print(neg,net,pos,total_face_frames)
	if neg>=0.3*total_face_frames or pos>=0.3*total_face_frames:
		maj_emo = 0 if neg>pos else 2
	else:
		maj_emo = 1
	
	return maj_emo


# parameters for loading data and images
emotion_model_path_1 = './models/presi_CNN.197.hdf5'
emotion_model_path_2 = './models/presi_big_XCEPTION.108.hdf5'
emotion_labels = get_labels('presi')

# hyper-parameters for bounding boxes shape
emotion_offsets = (20, 40)

# loading models
face_cascade = cv2.CascadeClassifier('./models/haarcascade_frontalface_default.xml')

emotion_classifier_1 = load_model(emotion_model_path_1)
emotion_classifier_2 = load_model(emotion_model_path_2)

# getting input model shapes for inference
emotion_target_size = emotion_classifier_1.input_shape[1:3]

dataset_path = "./test/"
Пример #33
0
# Initialize and start realtime video capture
cam = cv2.VideoCapture(0)
cam.set(3, 800)  # set video widht
cam.set(4, 600)  # set video height
# Define min window size to be recognized as a face
minW = 0.1 * cam.get(3)
minH = 0.1 * cam.get(4)
# Load network
ageNet = cv2.dnn.readNet(ageModel, ageProto)
genderNet = cv2.dnn.readNet(genderModel, genderProto)
faceNet = cv2.dnn.readNet(faceModel, faceProto)
# parameters for loading data and images
detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
emotion_labels = get_labels('fer2013')
# loading models
face_detection = load_detection_model(detection_model_path)
emotion_classifier = load_model(emotion_model_path, compile=False)
# getting input model shapes for inference
emotion_target_size = emotion_classifier.input_shape[1:3]
# hyper-parameters for bounding boxes shape
frame_window = 10
emotion_offsets = (20, 40)

while True:
    hasFrame, frame = cam.read()
    # Flip vertically
    frame = cv2.flip(frame, 1)
    # For Recognition
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
Пример #34
0
def inference(topic, args, cam_source, config, publisher):
    from statistics import mode
    import cv2
    from keras.models import load_model
    import numpy as np

    from utils.datasets import get_labels
    from utils.inference import detect_faces
    from utils.inference import draw_text
    from utils.inference import draw_bounding_box
    from utils.inference import apply_offsets
    from utils.inference import load_detection_model
    from utils.preprocessor import preprocess_input

    logger.info("inference")

    if args.intu:
        logger.info("inference(): results will be published to intu")
    else:
        logger.info("inference(): working standalone")

    # parameters for loading data and images
    detection_model_path = config.get("model", "detection")
    emotion_model_path = config.get("model", "emotion")
    emotion_labels = get_labels('fer2013')

    # timeout for video source access
    if args.intu:
        timeout = float(config.get("intu", "video_timeout"))
    else:
        timeout = 0
    end_time = time.time() + timeout

    # hyper-parameters for bounding boxes shape
    frame_window = 10
    emotion_offsets = (20, 40)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]

    # starting lists for calculating modes
    emotion_window = []

    # starting video streaming
    cv2.namedWindow('emotion_inference')

    video_capture = cv2.VideoCapture(cam_source)
    while True:
        ret, bgr_image = video_capture.read()
        logger.debug("inference: video_capture.read() ret is %s", ret)
        while not ret:
            logger.info("inference: error occurred capturing video, ensure your camera is accessible to the system and you've the appropriate numeral to access it")
            logger.info("end_time is %s", str(end_time))
            logger.info("the current time is %s", str(time.time()))
            if math.isclose(end_time, time.time(), abs_tol=5.0):
                logger.info("video stream access timeout, exiting...")
                exit()
            else:
                logger.info("waiting 10 seconds for the next try, delta %s", end_time - time.time())
                time.sleep(10)
                ret, bgr_image = video_capture.read()

        end_time = time.time() + 20
        gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
        rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
        faces = detect_faces(face_detection, gray_image)

        for face_coordinates in faces:

            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]
            try:
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_prediction = emotion_classifier.predict(gray_face)
            emotion_probability = np.max(emotion_prediction)
            emotion_label_arg = np.argmax(emotion_prediction)
            emotion_text = emotion_labels[emotion_label_arg]
            emotion_window.append(emotion_text)

            if len(emotion_window) > frame_window:
                emotion_window.pop(0)
            try:
                emotion_mode = mode(emotion_window)
            except:
                continue
            logger.info("emotion is %s, with probability %s", emotion_text, emotion_probability)
            publisher.write_and_pub({'emotion_text': emotion_text, 'emotion_probability': float(emotion_probability), 'hostname': socket.gethostname()})

            if args.intu:
                FaceEmotionClient.publish_emotion(emotion_label_arg, emotion_text, emotion_probability)

            if emotion_text == 'angry':
                color = emotion_probability * np.asarray((255, 0, 0))
            elif emotion_text == 'sad':
                color = emotion_probability * np.asarray((0, 0, 255))
            elif emotion_text == 'happy':
                color = emotion_probability * np.asarray((255, 255, 0))
            elif emotion_text == 'surprise':
                color = emotion_probability * np.asarray((0, 255, 255))
            else:
                color = emotion_probability * np.asarray((0, 255, 0))

            color = color.astype(int)
            color = color.tolist()

            draw_bounding_box(face_coordinates, rgb_image, color)
            draw_text(face_coordinates, rgb_image, emotion_mode,
                      color, 0, -45, 1, 1)

        bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
        cv2.imshow('emotion_inference', bgr_image)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            logger.info("canceled with keyboard, exiting...")
            break
        if args.intu and (not topic.is_connected):
            logger.info("disconnected from intu, exiting...")
            break
from statistics import mode
from keras.models import load_model
from utils.datasets import get_labels
from utils.inference import detect_faces
from utils.inference import draw_text
from utils.inference import draw_bounding_box
from utils.inference import apply_offsets
from utils.inference import load_detection_model
from utils.preprocessor import preprocess_input
import math

# parameters for loading data and images
detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
gender_model_path = '../trained_models/gender_models/simple_CNN.81-0.96.hdf5'
emotion_labels = get_labels('fer2013')
gender_labels = get_labels('imdb')
font = cv2.FONT_HERSHEY_SIMPLEX

# hyper-parameters for bounding boxes shape
frame_window = 10
gender_offsets = (30, 60)
emotion_offsets = (20, 40)

# loading models
face_detection = load_detection_model(detection_model_path)
emotion_classifier = load_model(emotion_model_path, compile=False)
gender_classifier = load_model(gender_model_path, compile=False)

# getting input model shapes for inference
emotion_target_size = emotion_classifier.input_shape[1:3]
    def __init__(self):

        # Initialize the node with rosp
        rospy.init_node('emotion_recognizer_node', anonymous=True)

        rospy.loginfo("recognizer started")
        print "1................................................"

        if (USE_LOCAL_CAMERA):
            self.video_capture = cv2.VideoCapture(0)

        self._detection_models = "~detection_models"
        if rospy.has_param(self._detection_models):
            self.detection_model_path = rospy.get_param(self._detection_models)
        else:
            rospy.logwarn("parameters need to be set to start recognizer.")
            return

        self.emotion_models = "~emotion_models"
        if rospy.has_param(self.emotion_models):
            self.emotion_model_path = rospy.get_param(self.emotion_models)
        else:
            rospy.logwarn("parameters need to be set to start recognizer.")
            return

        self.bridge = CvBridge()

        # parameters for loading data and images
        #self.detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
        #s	elf.emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.110-0.65.hdf5'
        self.emotion_labels = get_labels('fer2013')

        # hyper-parameters for bounding boxes shape
        self.frame_window = 10
        self.emotion_offsets = (20, 40)

        # loading models
        self.face_detection = load_detection_model(self.detection_model_path)
        self.emotion_classifier = load_model(self.emotion_model_path,
                                             compile=False)

        # getting input model shapes for inference
        self.emotion_target_size = self.emotion_classifier.input_shape[1:3]

        # starting lists for calculating modes
        self.emotion_window = []
        self.emotion_publisher = rospy.Publisher("/qt_face/setEmotion",
                                                 String,
                                                 queue_size=10)
        self.speech_publisher = rospy.Publisher("/speaker",
                                                String,
                                                queue_size=10)
        self.emotion_msg = String()
        self.speech_msg = String()

        #Where to publish
        self._output_image_topic = "~image_topic_output"
        print rospy.has_param(self._output_image_topic)
        if rospy.has_param(self._output_image_topic):
            output_image_topic = rospy.get_param(self._output_image_topic)
            self.image_pub = rospy.Publisher(output_image_topic,
                                             Image,
                                             queue_size=10)

        # Scaling factor for face recognition image
        self.scaling_factor = 0.50

        #Where to subscribe
        self._input_image_topic = "~image_topic_input"
        print rospy.has_param(self._input_image_topic)
        if rospy.has_param(self._input_image_topic):
            input_image_topic = rospy.get_param(self._input_image_topic)
            if (not USE_LOCAL_CAMERA):
                self.image_sub = rospy.Subscriber(input_image_topic, Image,
                                                  self.callback)

        self.graph = tf.get_default_graph()
speech_msg = String()

# parameters for loading data and images

_detection_model_path = "~detection_model_path"
print rospy.has_param(_detection_model_path)
if rospy.has_param(_detection_model_path):
    detection_model_path = rospy.get_param(_detection_model_path)

_emotion_model_path = "~emotion_model_path"
print rospy.has_param(_emotion_model_path)
if rospy.has_param(_emotion_model_path):
    emotion_model_path = rospy.get_param(_emotion_model_path)


emotion_labels = get_labels('fer2013')

# hyper-parameters for bounding boxes shape
frame_window = 10
emotion_offsets = (20, 40)

# loading models
face_detection = load_detection_model(detection_model_path)
emotion_classifier = load_model(emotion_model_path, compile=False)

# getting input model shapes for inference
emotion_target_size = emotion_classifier.input_shape[1:3]

# starting lists for calculating modes
emotion_window = []
def emotion_identify(img_url):
    # parameters for loading data and images

    detection_model_path = 'C:/Users/Admin/PycharmProjects/Emotion_Detection/trained_models/detection_models/haarcascade_frontalface_default.xml'
    emotion_model_path = 'C:/Users/Admin/PycharmProjects/Emotion_Detection/trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
    gender_model_path = 'C:/Users/Admin/PycharmProjects/Emotion_Detection/trained_models/gender_models/simple_CNN.81-0.96.hdf5'
    emotion_labels = get_labels('fer2013')
    gender_labels = get_labels('imdb')
    font = cv2.FONT_HERSHEY_SIMPLEX

    # hyper-parameters for bounding boxes shape
    gender_offsets = (30, 60)
    gender_offsets = (10, 10)
    emotion_offsets = (20, 40)
    emotion_offsets = (0, 0)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)
    gender_classifier = load_model(gender_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]
    gender_target_size = gender_classifier.input_shape[1:3]
    # loading images
    image_path = img_url
    rgb_image = load_image(image_path, grayscale=False)
    gray_image = load_image(image_path, grayscale=True)
    gray_image = np.squeeze(gray_image)
    gray_image = gray_image.astype('uint8')

    faces = detect_faces(face_detection, gray_image)
    if len(faces) == 0:
        print("No face")
        K.clear_session()
        return False

    emotions = collections.defaultdict(int)
    for face_coordinates in faces:
        x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
        rgb_face = rgb_image[y1:y2, x1:x2]

        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]

        try:
            rgb_face = cv2.resize(rgb_face, (gender_target_size))
            gray_face = cv2.resize(gray_face, (emotion_target_size))
        except:
            continue

        rgb_face = preprocess_input(rgb_face, False)
        rgb_face = np.expand_dims(rgb_face, 0)
        gender_prediction = gender_classifier.predict(rgb_face)
        gender_label_arg = np.argmax(gender_prediction)
        gender_text = gender_labels[gender_label_arg]

        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
        emotion_text = emotion_labels[emotion_label_arg]
        emotions[emotion_text] += 1
        if gender_text == gender_labels[0]:
            color = (0, 0, 255)
        else:
            color = (255, 0, 0)

        draw_bounding_box(face_coordinates, rgb_image, color)
        draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20, 1, 2)
        draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50, 1, 2)
    max_num = 0
    max_emotion = None
    for key, value in emotions.items():
        if value > max_num:
            max_num = value
            max_emotion = key
    print("The emotion of this picture is: ", max_emotion)
    bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
    cv2.imwrite('./result_images/predicted_test_image.png', bgr_image)
    K.clear_session()
    return max_emotion