Exemplo n.º 1
0
def show_webcam_and_run(model,
                        emoticons,
                        window_size=None,
                        window_name='webcam',
                        update_time=10):
    """
    Muestra la webcam y detecta los rostros y las emociones en tiempo real para dibujar los emojis.
    :param model: Modelo para reconocer emociones.
    :param emoticons: emojis.
    :param window_size: tamao de la ventana donde estara el stream.
    :param window_name: Nombre de la ventana.
    :param update_time: tiempo para actualizar la imagen.
    """
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    if window_size:
        width, height = window_size
        cv2.resizeWindow(window_name, width, height)

    # 选择摄像头,0为本地
    vc = cv2.VideoCapture(
        0
    )  # http://192.168.0.2:4747/mjpegfeed para camara android remota por medio de Droidcam

    # 摄像头分辨率,默认为当前使用摄像头的最高分辨率

    # vc.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
    # vc.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)

    if vc.isOpened():
        read_value, webcam_image = vc.read()
    else:
        print("[ERROR] No se enontro camara.")
        return
    while read_value:
        for normalized_face, (x, y, w, h) in find_faces(webcam_image):
            prediction = network.predict(normalized_face)  # hace la prediccion
            prediction = prediction[
                0]  # guarda el numero de la emocion para diujar el emoji
            # carga el emoji para dibujarlo
            image_to_draw = emoticons[prediction.tolist().index(
                max(prediction))]
            # dibuja el emoji
            draw_with_alpha(
                webcam_image, image_to_draw,
                (x, y - 100, w, h))  # image_to_draw,  ,  webcam_image,
        cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN,
                              cv2.WINDOW_FULLSCREEN)
        cv2.imshow(window_name, webcam_image)
        read_value, webcam_image = vc.read()
        key = cv2.waitKey(update_time)
        if key == 27:  # salir con esc
            break
    cv2.destroyWindow(window_name)
Exemplo n.º 2
0
def show_webcam_and_run(model,
                        emoticons,
                        window_size=None,
                        window_name='webcam',
                        update_time=10):
    """
    Shows webcam image, detects faces and its emotions in real time and draw emoticons over those faces.
    :param model: Learnt emotion detection model.
    :param emoticons: List of emotions images.
    :param window_size: Size of webcam image window.
    :param window_name: Name of webcam image window.
    :param update_time: Image update time interval.
    """
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    if window_size:
        width, height = window_size
        cv2.resizeWindow(window_name, width, height)

    vc = cv2.VideoCapture(0)
    if vc.isOpened():
        read_value, webcam_image = vc.read()
    else:
        print("webcam not found")
        return

    while read_value:
        i = 1
        for normalized_face, (x, y, w, h) in find_faces(webcam_image):
            prediction = model.predict(normalized_face)  # do prediction
            prediction = prediction[0]
            # prediction = emotions_map[prediction]
            image_to_draw = emoticons[prediction]
            draw_with_alpha(webcam_image, image_to_draw, (x, y, w, h))
            cv2.putText(webcam_image,
                        (emotions[prediction] + " #{}").format(i), (x, y - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 0, 255), 2)
            i += 1

        cv2.imshow(window_name, webcam_image)
        read_value, webcam_image = vc.read()
        key = cv2.waitKey(update_time)

        if key == 27:  # exit on ESC
            break

    cv2.destroyWindow(window_name)
Exemplo n.º 3
0
def draw_img(predictions, path, normalized_faces):
    emoticons = _load_emoticons(emotions)
    image = cv2.imread(path)
    for index, (img, (x, y, w, h)) in enumerate(normalized_faces):
        image_to_draw = emoticons[predictions[index]]
        emotion_img = draw_with_alpha(image, image_to_draw, (x, y, w, h))
        cv2.imwrite(
            str(emotions[predictions[index]]) + '_' + str(index) + '.png',
            emotion_img)
Exemplo n.º 4
0
def show_webcam_and_run(model, emoticons, window_size=None, window_name='webcam', update_time=10):
    """
    Shows webcam image, detects faces and its emotions in real time and draw emoticons over those faces.
    :param model: Learnt emotion detection model.
    :param emoticons: List of emotions images.
    :param window_size: Size of webcam image window.
    :param window_name: Name of webcam image window.
    :param update_time: Image update time interval.
    """
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    if window_size:
        width, height = window_size
        cv2.resizeWindow(window_name, width, height)

    vc = cv2.VideoCapture(0)
    if vc.isOpened():
        read_value, webcam_image = vc.read()
    else:
        print("webcam not found")
        return

    while read_value:
        for normalized_face, (x, y, w, h) in find_faces(webcam_image):
            prediction = model.predict(normalized_face)  # do prediction
            if cv2.__version__ != '3.1.0':
                prediction = prediction[0]

            image_to_draw = emoticons[prediction]
            draw_with_alpha(webcam_image, image_to_draw, (x, y, w, h))

        cv2.imshow(window_name, webcam_image)
        read_value, webcam_image = vc.read()
        key = cv2.waitKey(update_time)

        if key == 27:  # exit on ESC
            break

    cv2.destroyWindow(window_name)
Exemplo n.º 5
0
def show_webcam_and_run(model,
                        emoticons,
                        window_size=None,
                        window_name='webcam',
                        update_time=10):
    """
    Shows webcam image, detects faces and its emotions in real time and draw emoticons over those faces.
    model: learnt emotion detection model.
    emoticons: list of emotions images.
    window_size: size of webcam image window.
    window_name: name of webcam image window.
    update_time: image update time interval.
    """
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    if window_size:
        width, height = window_size
        cv2.resizeWindow(window_name, width, height)

    vc = cv2.VideoCapture(0)
    if vc.isOpened():
        read_value, webcam_image = vc.read()

    while read_value:
        for normalized_face, (x, y, w, h) in find_faces(webcam_image):
            prediction = model.predict(normalized_face)  # do prediction
            prediction = prediction[0]
            emoj_to_draw = emoticons[prediction]
            draw_with_alpha(webcam_image, emoj_to_draw, (x, y, w, h))

        cv2.imshow(window_name, webcam_image)
        read_value, webcam_image = vc.read()
        key = cv2.waitKey(update_time)

        if key == 27:  # exit on ESC
            break

    cv2.destroyWindow(window_name)
    facecasc = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    faces = facecasc.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5)

    # For making the bounding box
    for (x, y, w, h) in faces:
        cv2.rectangle(img, (x, y - 50), (x + w, y + h + 10), (255, 0, 0), 2)
        roi_gray = gray[y:y + h, x:x + w]
        cropped_img = np.expand_dims(
            np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)
        prediction = model.predict(cropped_img)
        maxindex = int(np.argmax(prediction))
        cv2.putText(img, emotion_dict[maxindex], (x + 20, y - 60),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2,
                    cv2.LINE_AA)

        # For choosing the emoticon
        new_predict = int(np.argmax(prediction))
        # print(new_predict)

        # To draw the emoticons
        image_to_draw = emoticons[new_predict]
        draw_with_alpha(img, image_to_draw, (x, y, w, h))

    cv2.imshow('Output',
               cv2.resize(img, (1280, 720), interpolation=cv2.INTER_CUBIC))
    if cv2.waitKey(1) == 27:
        break

print("Done")
cv2.destroyAllWindows()
Exemplo n.º 7
0
def show_webcam_and_run(model,
                        emoticons,
                        window_size=None,
                        window_name='webcam',
                        update_time=10):
    """
    Shows webcam image, detects faces and its emotions in real time and draw emoticons over those faces.
    :param model: Learnt emotion detection model.
    :param emoticons: List of emotions images.
    :param window_size: Size of webcam image window.
    :param window_name: Name of webcam image window.
    :param update_time: Image update time interval.
    """
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    if window_size:
        width, height = window_size
        cv2.resizeWindow(window_name, width, height)

    vc = cv2.VideoCapture(0)
    if vc.isOpened():
        read_value, webcam_image = vc.read()
    else:
        print("webcam not found")
        return

    while read_value:
        for normalized_face, (x, y, w, h) in find_faces(webcam_image):
            prediction = model.predict(cv2.resize(normalized_face,
                                                  (350, 350)))  # do prediction
            mixer.init()
            if prediction[0] == 0:
                mixer.music.load('songs/neutral.mp3')
                mixer.music.play(-1, 2.0)
                time.sleep(20)
                mixer.music.stop()
            if prediction[0] == 1:
                mixer.music.load('songs/anger.mp3')
                mixer.music.play(-1, 2.0)
                time.sleep(10)
                mixer.music.stop()
            if prediction[0] == 5:
                mixer.music.load('songs/surprise.mp3')
                mixer.music.play(-1, 2.0)
                time.sleep(10)
                mixer.music.stop()
            if prediction[0] == 3:
                mixer.music.load('songs/happy.mp3')
                mixer.music.play(-1, 2.0)
                time.sleep(10)
                mixer.music.stop()
            if prediction[0] == 4:
                mixer.music.load('songs/sadness.mp3')
                mixer.music.play(-1, 2.0)
                time.sleep(10)
                mixer.music.stop()
            image_to_draw = emoticons[prediction[0]]
            draw_with_alpha(webcam_image, image_to_draw, (x, y, w, h))

        cv2.imshow(window_name, webcam_image)
        read_value, webcam_image = vc.read()
        key = cv2.waitKey(update_time)

        if key == 27:  # exit on ESC
            break

    cv2.destroyWindow(window_name)
Exemplo n.º 8
0
def emotion(mode):
    # plots accuracy and loss curves

    # Define data generators
    train_dir = 'data/train'
    val_dir = 'data/test'

    num_train = 28709
    num_val = 7178
    batch_size = 64
    num_epoch = 50

    train_datagen = ImageDataGenerator(rescale=1. / 255)
    val_datagen = ImageDataGenerator(rescale=1. / 255)

    train_generator = train_datagen.flow_from_directory(
        train_dir,
        target_size=(48, 48),
        batch_size=batch_size,
        color_mode="grayscale",
        class_mode='categorical')

    validation_generator = val_datagen.flow_from_directory(
        val_dir,
        target_size=(48, 48),
        batch_size=batch_size,
        color_mode="grayscale",
        class_mode='categorical')

    # Create the model
    model = Sequential()

    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               activation='relu',
               input_shape=(48, 48, 1)))
    model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(1024, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(7, activation='softmax'))

    # If you want to train the same model or try other models, go for this
    if mode == "train":
        model.compile(loss='categorical_crossentropy',
                      optimizer=Adam(lr=0.0001, decay=1e-6),
                      metrics=['accuracy'])
        model_info = model.fit_generator(
            train_generator,
            steps_per_epoch=num_train // batch_size,
            epochs=num_epoch,
            validation_data=validation_generator,
            validation_steps=num_val // batch_size)
        plot_model_history(model_info)
        model.save_weights('model.h5')


# emotions will be displayed on your face from the webcam feed
    elif mode == "display":
        model.load_weights('model.h5')

        emotions = [
            'anger', 'disgust', 'surprise', 'happy', 'neutral', 'sadness',
            'surprise'
        ]
        emoticons = _load_emoticons(emotions)

        # prevents openCL usage and unnecessary logging messages
        cv2.ocl.setUseOpenCL(False)

        # dictionary which assigns each label an emotion (alphabetical order)
        emotion_dict = {
            0: "Angry",
            1: "Disgusted",
            2: "Fearful",
            3: "Happy",
            4: "Neutral",
            5: "Sad",
            6: "Surprised"
        }

        # start the webcam feed
        cap = cv2.VideoCapture(0)
        while True:
            # Find haar cascade to draw bounding box around face
            ret, frame = cap.read()
            if not ret:
                break
            facecasc = cv2.CascadeClassifier(
                'haarcascade_frontalface_default.xml')
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            faces = facecasc.detectMultiScale(gray,
                                              scaleFactor=1.3,
                                              minNeighbors=5)

            for (x, y, w, h) in faces:
                cv2.rectangle(frame, (x, y - 50), (x + w, y + h + 10),
                              (255, 0, 0), 2)
                roi_gray = gray[y:y + h, x:x + w]
                cropped_img = np.expand_dims(
                    np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)
                prediction = model.predict(cropped_img)
                maxindex = int(np.argmax(prediction))
                cv2.putText(frame, emotion_dict[maxindex], (x + 20, y - 60),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2,
                            cv2.LINE_AA)

                image_to_draw = emoticons[maxindex]
                draw_with_alpha(frame, image_to_draw, (x + 100, y, w, h))

            cv2.imshow(
                'Video',
                cv2.resize(frame, (800, 600), interpolation=cv2.INTER_CUBIC))
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        cap.release()
        cv2.destroyAllWindows()
Exemplo n.º 9
0
def show_webcam_and_run(model,
                        emoticons,
                        window_size=None,
                        window_name='webcam',
                        update_time=10):
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    if window_size:
        width, height = window_size
        cv2.resizeWindow(window_name, width, height)

    vc = cv2.VideoCapture(0)
    if vc.isOpened():
        read_value, webcam_image = vc.read()
    else:
        print("webcam not found")
        return
    cs, ch, cn, ca = 0, 0, 0, 0
    while read_value:
        for normalized_face, (x, y, w, h) in find_faces(webcam_image):
            prediction = model.predict(normalized_face)
            print(prediction[1])
            if (prediction[1] > 700 and prediction[1] < 860):
                prediction = 0
                cn += 1
                if (cn >= 3):
                    pattern = "songs/neutral/*"
                    filename = random.choice(glob.glob(pattern))
                    print(filename)
                    mixer.init()
                    mixer.music.load(filename)
                    mixer.music.play(-1, 0.0)
                    time.sleep(10)
                    mixer.music.stop()
                    cn = 0
            elif (prediction[1] > 420 and prediction[1] < 650):
                prediction = 2
                ch += 1
                if (ch >= 3):
                    pattern = "songs/happy/*"
                    filename = random.choice(glob.glob(pattern))
                    print(filename)
                    mixer.init()
                    mixer.music.load(filename)
                    mixer.music.play(-1, 0.0)
                    time.sleep(10)
                    mixer.music.stop()
                    ch = 0
            elif (prediction[1] > 860 and prediction[1] < 1300):
                prediction = 3
                cs += 1
                if (cs >= 3):
                    pattern = "songs/sad/*"
                    filename = random.choice(glob.glob(pattern))
                    print(filename)
                    mixer.init()
                    mixer.music.load(filename)
                    mixer.music.play(-1, 0.0)
                    time.sleep(10)
                    mixer.music.stop()
                    cs = 0
            elif (prediction[1] > 1000 and prediction[1] < 3300):
                prediction = 1
                ca += 1
                if (ca >= 3):
                    pattern = "songs/anger/*"
                    filename = random.choice(glob.glob(pattern))
                    print(filename)
                    mixer.init()
                    mixer.music.load(filename)
                    mixer.music.play(-1, 0.0)
                    time.sleep(10)
                    mixer.music.stop()
                    ca = 0
            else:
                prediction = 0
            time.sleep(2)
            image_to_draw = emoticons[prediction]
            font = cv2.FONT_HERSHEY_SIMPLEX
            text = emotions[prediction]
            webcam_image = cv2.putText(webcam_image, text, (10, 50), font, 1,
                                       (0, 255, 255), 2, cv2.LINE_AA)
            draw_with_alpha(webcam_image, image_to_draw, (x, y, w, h))
            time.sleep(2)
        cv2.imshow(window_name, webcam_image)
        read_value, webcam_image = vc.read()
        key = cv2.waitKey(update_time)

        if key == 27:
            break

    cv2.destroyWindow(window_name)
Exemplo n.º 10
0
    :param emotions: Array of emotions names.
    :return: Array of emotions graphics.
    """
    return [
        nparray_as_image(cv2.imread('graphics/%s.png' % emotion, -1),
                         mode=None) for emotion in emotions
    ]


emotions = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
emoticons = _load_emoticons(emotions)

image_path = 'test.jpeg'
dets, cords = FACE_DET.get_det(image_path)
result, emos = FER.get_fer(dets)
if (len(emos) > 0):
    image_raw = cv2.imread(image_path)
    for idx, emo in enumerate(emos):
        image_to_draw = emoticons[emo]
        b = cords[idx]
        ch = b[3] - b[1]
        cw = b[2] - b[0]
        cc = int(min(ch, cw) / 1.5)
        cx = max(b[0] - int(cc / 2), 0)
        cy = max(b[1] - int(cc / 2), 0)
        picked_color = fer_colors[idx].lstrip('#')
        cv2.rectangle(
            image_raw, (b[0], b[1]), (b[2], b[3]),
            tuple(int(picked_color[i:i + 2], 16) for i in (0, 2, 4))[::-1], 5)
        draw_with_alpha(image_raw, image_to_draw, (cx, cy, cc, cc))
cv2.imwrite('test_result.jpg', image_raw)
Exemplo n.º 11
0
        flip_fr = cv2.flip(fr, 1)
        face2 = gray[y:y + h, x:x + w]
        face_resize = cv2.resize(face2, (width, height))
        prediction2 = model2.predict(face_resize)
        if prediction2[1] < 500:

            cv2.putText(flip_fr,
                        '%s - %.0f' % (names[prediction2[0]], prediction2[1]),
                        (x - 10, y - 10), font, 1, (0, 255, 0), 2)
        else:
            cv2.putText(flip_fr, 'not recognized', (x - 10, y - 10), font, 1,
                        (0, 255, 0), 2)
        image_to_draw = emoticons[list(emo.keys())[list(
            emo.values()).index(out)]]

        draw_with_alpha(flip_fr, image_to_draw, (x, y, w, h))
        cv2.putText(flip_fr, out, (30, 30), font, 1, (255, 255, 0), 2)

    cv2.imshow('rgb', flip_fr)

    k = cv2.waitKey(1) & 0xEFFFFF
    if k == 27:
        break
    elif k == -1:
        continue
    else:
        # print k
        continue

rgb.release()
cv2.destroyAllWindows()
Exemplo n.º 12
0
def show_webcam_and_run(model,
                        emoticons,
                        window_size=None,
                        window_name='webcam',
                        update_time=10):
    """
    Shows webcam image, detects faces and its emotions in real time and draw emoticons over those faces.
    :param model: Learnt emotion detection model.
    :param emoticons: List of emotions images.
    :param window_size: Size of webcam image window.
    :param window_name: Name of webcam image window.
    :param update_time: Image update time interval.
    """
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    if window_size:
        width, height = window_size
        cv2.resizeWindow(window_name, width, height)

    vc = cv2.VideoCapture("testing.mp4")

    if vc.isOpened():
        read_value, webcam_image = vc.read()
    else:
        print("webcam not found")
        return

    while True:

        for normalized_face, (x, y, w, h) in find_faces(webcam_image):
            prediction = model.predict(normalized_face)  # do prediction
            #if cv2.__version__ != '3.1.0':
            prediction = prediction[0]

            image_to_draw = emoticons[prediction]
            draw_with_alpha(webcam_image, image_to_draw, (x, y, w, h))
            print(prediction)  #target variable
        cv2.imshow(window_name, webcam_image)
        read_value, webcam_image = vc.read()
        key = cv2.waitKey(update_time)
        img = webcam_image
        #----------------------
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        faces = faceCascade.detectMultiScale(
            gray,
            scaleFactor=1.2,
            minNeighbors=5,
            minSize=(int(minW), int(minH)),
        )
        #lola=0
        for (x, y, w, h) in faces:

            cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)

            id, confidence = recognizer.predict(gray[y:y + h, x:x + w])

            # Check if confidence is less them 100 ==> "0" is perfect match
            if (confidence < 100):
                percent = 100 - confidence
                if (confidence < 60):
                    hit = 1
                    id = names[id]
                    confidence = "  {0}%".format(round(100 - confidence))
                else:
                    id = names[id]
                    confidence = "  {0}%".format(round(100 - confidence))
                    hit = 0
                    global lola
                    if lola == 2:
                        from twilio.rest import Client

                        account_sid = "AC22c8e1723b1046ac741bd563caa0995d"
                        # # our Auth Token from twilio.com/console
                        auth_token = "9422efc06f0c7ab5b637536569d2d818"

                        client = Client(account_sid, auth_token)

                        message = client.messages.create(
                            to="+919944333726",
                            from_="+12407022806",
                            body=
                            "You are travelling with an unauthorized driver. Please check!"
                        )
                    lola = 0
            else:
                id = "unknown"
                confidence = "  {0}%".format(round(100 - confidence))
                hit = 0

            cv2.putText(img, str(id), (x + 5, y - 5), font, 1, (255, 255, 255),
                        2)
            cv2.putText(img, str(confidence), (x + 5, y + h - 5), font, 1,
                        (255, 255, 0), 1)

            def face(hit):
                return hit

            #print(percent)
        cv2.imshow(window_name, img)

        #----------------------
        if key == 27:  # exit on ESC
            break

    cv2.destroyWindow(window_name)
    facecasc = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = facecasc.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5)

    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x, y - 50), (x + w, y + h + 10), (255, 0, 0), 2)
        roi_gray = gray[y:y + h, x:x + w]
        cropped_img = np.expand_dims(
            np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)
        prediction = model.predict(cropped_img)
        maxindex = int(np.argmax(prediction))
        cv2.putText(frame, emotion_dict[maxindex], (x + 20, y - 60),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2,
                    cv2.LINE_AA)

        # For choosing the emoticon
        new_predict = int(np.argmax(prediction))
        # print(new_predict)

        # To draw the emoticons
        image_to_draw = emoticons[new_predict]
        draw_with_alpha(frame, image_to_draw, (x, y, w, h))

    cv2.imshow('Output',
               cv2.resize(frame, (1280, 720), interpolation=cv2.INTER_CUBIC))
    if cv2.waitKey(1) == 27:
        break

print("Done")
cap.release()
cv2.destroyAllWindows()
Exemplo n.º 14
0
def show_webcam_and_run(model,
                        neutral_filter,
                        neutral_face,
                        emoticons,
                        window_size=None,
                        window_name='emotionEmoji',
                        update_time=10):
    """
    Shows webcam image, detects faces and its emotions in real time and draw emoticons over those faces.
    :param model: Learnt emotion detection model.
    :param emoticons: List of emotions images.
    :param window_size: Size of webcam image window.
    :param window_name: Name of webcam image window.
    :param update_time: Image update time interval.
    """
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    if window_size:
        width, height = window_size
        cv2.resizeWindow(window_name, width, height)

    vc = cv2.VideoCapture(0)
    if vc.isOpened():
        read_value, webcam_image = vc.read()
    else:
        print("webcam not found")
        return

    count = 0
    total = 0
    count_notopen = 0
    while read_value:
        for normalized_face, (x, y, w, h) in find_faces(webcam_image):
            if normalized_face is None: continue
            face_landmarks_list = get_facelandmark(normalized_face)
            if face_landmarks_list is None: continue

            cv2.imwrite(
                "checkface/normalization/original_image_%s.jpg" %
                time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
                webcam_image)
            cv2.imwrite(
                "checkface/normalization/normalized_face_%s.jpg" %
                time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
                normalized_face)

            # Mark landmarks
            featureList = []
            for featureKey in facial_features:
                points = face_landmarks_list[0][featureKey]
                for point in points:
                    featureList += list(point)
            Xs = featureList[::2]
            Ys = featureList[1::2]
            for i in range(len(Xs)):
                xx = Xs[i] * w / resize
                yy = Ys[i] * h / resize
                cv2.circle(webcam_image, (x + xx, y + yy), 3, (0, 0, 255), 0)

            # predict
            total += 1
            featureList = get_feature(face_landmarks_list)
            featureArray = np.array(featureList).reshape(1, -1)
            newfeatureList = [
                featureList[i] - neutral_face[i]
                for i in range(len(featureList))
            ]
            newfeatureArray = np.array(newfeatureList).reshape(1, -1)

            # check if it is neutral
            # check if open the mouth
            mouthfeature = featureList[44:-1:1]
            if abs(mouthfeature[21] - mouthfeature[9]
                   ) <= 1.5 * abs(mouthfeature[9] - mouthfeature[3]) + abs(
                       mouthfeature[21] - mouthfeature[15]):
                count_notopen += 1
                neutral_check = neutral_filter.predict(newfeatureArray)[0]
                if neutral_check == 0:
                    # is neutral face
                    count += 1
                    for i in range(len(neutral_face)):
                        neutral_face[
                            i] = featureList[i] * 1.0 / count + neutral_face[
                                i] * 1.0 * count / (count + 1)
                    pred = 0
                else:
                    newfeatureList = [
                        featureList[i] - neutral_face[i]
                        for i in range(len(featureList))
                    ]
                    newfeatureArray = np.array(newfeatureList).reshape(1, -1)
                    pred = model.predict(newfeatureArray)[0]  # do prediction

            else:
                newfeatureList = [
                    featureList[i] - neutral_face[i]
                    for i in range(len(featureList))
                ]
                newfeatureArray = np.array(newfeatureList).reshape(1, -1)
                pred = model.predict(newfeatureArray)[0]  # do prediction

            image_to_draw = emoticons[pred]
            draw_with_alpha(webcam_image, image_to_draw, (x, y, 60, 60))
            cv2.putText(webcam_image, (emotions[pred] + " #{}").format(pred),
                        (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.55,
                        (0, 0, 255), 2)

        cv2.imwrite(
            "checkface/%s.jpg" %
            time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), webcam_image)
        cv2.imshow(window_name, webcam_image)
        read_value, webcam_image = vc.read()
        key = cv2.waitKey(update_time)

        if key == 27:  # exit on ESC
            print "total: %d, neutral: %d, fraction:%f" % (total, count,
                                                           count * 1.0 / total)
            print "notopen: %d, neutral: %d, fraction:%f" % (
                count_notopen, count, count * 1.0 / count_notopen)
            break

    cv2.destroyWindow(window_name)