コード例 #1
0
def main(model, image_path):
    emotions = ['neutral', 'anger', 'disgust', 'happy', 'sadness', 'surprise']
    # Read the image
    image = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
    #gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # Draw a rectangle around the faces
    for normalized_face, (x, y, w, h) in find_faces(image):
        #print((x, y, w, h))
        emotion = model.predict(normalized_face)  # do prediction
        emotion = emotion[0]
        print emotions[emotion]
        # try to add a emoji to a existing face
        s_img = cv2.imread("./emojis/" + emotions[emotion] + ".png",
                           cv2.IMREAD_UNCHANGED)
        s_height, s_width, s_channels = s_img.shape
        l_img = image

        resize_ratio = round(w * 10 / (10.0 * s_width), 5)

        s_img = cv2.resize(s_img, (0, 0), fx=resize_ratio, fy=resize_ratio)

        draw_with_alpha(l_img, s_img, (x, y, w, h))
    cv2.imwrite("result-emoji.jpg", image)
    cv2.imshow("Faces found", image)
    cv2.waitKey(0)
コード例 #2
0
ファイル: get_emotion.py プロジェクト: lannp/face-emotion
def get_faces(path):
    data = []
    frame = cv2.imread(path)
    normalized_faces = find_faces(frame)
    print(normalized_faces)
    for img, (x, y, w, h) in normalized_faces:
        img = cv2.resize(img[0], (image_size, image_size))
        img = np.reshape(img, (image_size * image_size))
        data.append(img)
    return data, normalized_faces
コード例 #3
0
ファイル: webcam.py プロジェクト: xxdkjq/xxdkjq
def show_webcam_and_run(model,
                        emoticons,
                        window_size=None,
                        window_name='webcam',
                        update_time=10):
    """
    Muestra la webcam y detecta los rostros y las emociones en tiempo real para dibujar los emojis.
    :param model: Modelo para reconocer emociones.
    :param emoticons: emojis.
    :param window_size: tamao de la ventana donde estara el stream.
    :param window_name: Nombre de la ventana.
    :param update_time: tiempo para actualizar la imagen.
    """
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    if window_size:
        width, height = window_size
        cv2.resizeWindow(window_name, width, height)

    # 选择摄像头,0为本地
    vc = cv2.VideoCapture(
        0
    )  # http://192.168.0.2:4747/mjpegfeed para camara android remota por medio de Droidcam

    # 摄像头分辨率,默认为当前使用摄像头的最高分辨率

    # vc.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
    # vc.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)

    if vc.isOpened():
        read_value, webcam_image = vc.read()
    else:
        print("[ERROR] No se enontro camara.")
        return
    while read_value:
        for normalized_face, (x, y, w, h) in find_faces(webcam_image):
            prediction = network.predict(normalized_face)  # hace la prediccion
            prediction = prediction[
                0]  # guarda el numero de la emocion para diujar el emoji
            # carga el emoji para dibujarlo
            image_to_draw = emoticons[prediction.tolist().index(
                max(prediction))]
            # dibuja el emoji
            draw_with_alpha(
                webcam_image, image_to_draw,
                (x, y - 100, w, h))  # image_to_draw,  ,  webcam_image,
        cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN,
                              cv2.WINDOW_FULLSCREEN)
        cv2.imshow(window_name, webcam_image)
        read_value, webcam_image = vc.read()
        key = cv2.waitKey(update_time)
        if key == 27:  # salir con esc
            break
    cv2.destroyWindow(window_name)
コード例 #4
0
def extract_faces(emotions):
    print("Extracting faces")
    for emotion in emotions:
        photos = glob.glob('data/sorted_set/%s/*' % emotion)

        for file_number, photo in enumerate(photos):
            frame = cv2.imread(photo)
            normalized_faces = find_faces(frame)
            os.remove(photo)

            for face in normalized_faces:
                try:
                    cv2.imwrite(
                        "data/sorted_set/%s/%s.png" %
                        (emotion, file_number + 1), face[0])
                except:
                    print("error in processing %s" % photo)
コード例 #5
0
def show_webcam_and_run(model,
                        emoticons,
                        window_size=None,
                        window_name='webcam',
                        update_time=10):
    """
    Shows webcam image, detects faces and its emotions in real time and draw emoticons over those faces.
    :param model: Learnt emotion detection model.
    :param emoticons: List of emotions images.
    :param window_size: Size of webcam image window.
    :param window_name: Name of webcam image window.
    :param update_time: Image update time interval.
    """
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    if window_size:
        width, height = window_size
        cv2.resizeWindow(window_name, width, height)

    vc = cv2.VideoCapture(0)
    if vc.isOpened():
        read_value, webcam_image = vc.read()
    else:
        print("webcam not found")
        return

    while read_value:
        for normalized_face, (x, y, w, h) in find_faces(webcam_image):
            prediction = model.predict(normalized_face)  # do prediction
            if cv2.__version__ != '3.1.0':
                prediction = prediction[0]

            image_to_draw = emoticons[prediction]
            draw_with_alpha(webcam_image, image_to_draw, (x, y, w, h))

        cv2.imshow(window_name, webcam_image)
        read_value, webcam_image = vc.read()
        key = cv2.waitKey(update_time)

        if key == 27:  # exit on ESC
            break

    cv2.destroyWindow(window_name)
コード例 #6
0
def extract_faces(emotions):
    """
    Crops faces in emotions images.
    :param emotions: List of emotions names.
    """
    print("Extracting faces")
    for emotion in emotions:
        photos = glob.glob('./data/sorted_set/%s/*' % emotion)

        for file_number, photo in enumerate(photos):
            frame = cv2.imread(photo)
            normalized_faces = find_faces(frame)
            os.remove(photo)

            for face in normalized_faces:
                try:
                    cv2.imwrite("./data/sorted_set/%s/%s.png" %
                                (emotion, file_number + 1),
                                face[0])  # write image
                except:
                    print("error in processing %s" % photo)
コード例 #7
0
ファイル: webcam.py プロジェクト: lijian8/facemoji
def show_webcam_and_run(model, emoticons, window_size=None, window_name='webcam', update_time=10):
    """
    Shows webcam image, detects faces and its emotions in real time and draw emoticons over those faces.
    :param model: Learnt emotion detection model.
    :param emoticons: List of emotions images.
    :param window_size: Size of webcam image window.
    :param window_name: Name of webcam image window.
    :param update_time: Image update time interval.
    """
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    if window_size:
        width, height = window_size
        cv2.resizeWindow(window_name, width, height)

    vc = cv2.VideoCapture(0)
    if vc.isOpened():
        read_value, webcam_image = vc.read()
    else:
        print("webcam not found")
        return

    while read_value:
        for normalized_face, (x, y, w, h) in find_faces(webcam_image):
            prediction = model.predict(normalized_face)  # do prediction
            if cv2.__version__ != '3.1.0':
                prediction = prediction[0]

            image_to_draw = emoticons[prediction]
            draw_with_alpha(webcam_image, image_to_draw, (x, y, w, h))

        cv2.imshow(window_name, webcam_image)
        read_value, webcam_image = vc.read()
        key = cv2.waitKey(update_time)

        if key == 27:  # exit on ESC
            break

    cv2.destroyWindow(window_name)
コード例 #8
0
ファイル: webcam.py プロジェクト: Ameniabdelhamid/faceEmoji
def show_webcam_and_run(model,
                        emoticons,
                        window_size=None,
                        window_name='webcam',
                        update_time=10):
    """
    Shows webcam image, detects faces and its emotions in real time and draw emoticons over those faces.
    model: learnt emotion detection model.
    emoticons: list of emotions images.
    window_size: size of webcam image window.
    window_name: name of webcam image window.
    update_time: image update time interval.
    """
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    if window_size:
        width, height = window_size
        cv2.resizeWindow(window_name, width, height)

    vc = cv2.VideoCapture(0)
    if vc.isOpened():
        read_value, webcam_image = vc.read()

    while read_value:
        for normalized_face, (x, y, w, h) in find_faces(webcam_image):
            prediction = model.predict(normalized_face)  # do prediction
            prediction = prediction[0]
            emoj_to_draw = emoticons[prediction]
            draw_with_alpha(webcam_image, emoj_to_draw, (x, y, w, h))

        cv2.imshow(window_name, webcam_image)
        read_value, webcam_image = vc.read()
        key = cv2.waitKey(update_time)

        if key == 27:  # exit on ESC
            break

    cv2.destroyWindow(window_name)
コード例 #9
0
emotions = ['neutral', 'fear', 'happy', 'sadness', 'surprise']
for emotion in emotions:
    paths = glob.glob("data/%s/*" % (emotion))
    normalized_path = "normalized_images/%s/" % (emotion)
    if not os.path.exists(normalized_path):
        os.makedirs(normalized_path)
    test_path = "test/%s/" % (emotion)
    if not os.path.exists(test_path):
        os.makedirs(test_path)
    print("Removing old dataset")
    filelist = glob.glob("normalized_imagest/%s/*" % emotion)
    for f in filelist:
        os.remove(f)
    filelist = glob.glob("test/%s/*" % emotion)
    for f in filelist:
        os.remove(f)

    number_random = int(len(paths) / 10)
    random_paths = random.sample(paths, number_random)

    for path in paths:
        frame = cv2.imread(path)
        normalized_faces = find_faces(frame)
        for face in normalized_faces:
            if path in random_paths:
                cv2.imwrite(test_path + str(os.path.basename(path)), face[0])
            else:
                cv2.imwrite(normalized_path + str(os.path.basename(path)),
                            face[0])
コード例 #10
0
ファイル: webcam.py プロジェクト: TanushaS/SMART-MUSIC-PLAYER
def show_webcam_and_run(model,
                        emoticons,
                        window_size=None,
                        window_name='webcam',
                        update_time=10):
    """
    Shows webcam image, detects faces and its emotions in real time and draw emoticons over those faces.
    :param model: Learnt emotion detection model.
    :param emoticons: List of emotions images.
    :param window_size: Size of webcam image window.
    :param window_name: Name of webcam image window.
    :param update_time: Image update time interval.
    """
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    if window_size:
        width, height = window_size
        cv2.resizeWindow(window_name, width, height)

    vc = cv2.VideoCapture(0)
    if vc.isOpened():
        read_value, webcam_image = vc.read()
    else:
        print("webcam not found")
        return

    while read_value:
        for normalized_face, (x, y, w, h) in find_faces(webcam_image):
            prediction = model.predict(cv2.resize(normalized_face,
                                                  (350, 350)))  # do prediction
            mixer.init()
            if prediction[0] == 0:
                mixer.music.load('songs/neutral.mp3')
                mixer.music.play(-1, 2.0)
                time.sleep(20)
                mixer.music.stop()
            if prediction[0] == 1:
                mixer.music.load('songs/anger.mp3')
                mixer.music.play(-1, 2.0)
                time.sleep(10)
                mixer.music.stop()
            if prediction[0] == 5:
                mixer.music.load('songs/surprise.mp3')
                mixer.music.play(-1, 2.0)
                time.sleep(10)
                mixer.music.stop()
            if prediction[0] == 3:
                mixer.music.load('songs/happy.mp3')
                mixer.music.play(-1, 2.0)
                time.sleep(10)
                mixer.music.stop()
            if prediction[0] == 4:
                mixer.music.load('songs/sadness.mp3')
                mixer.music.play(-1, 2.0)
                time.sleep(10)
                mixer.music.stop()
            image_to_draw = emoticons[prediction[0]]
            draw_with_alpha(webcam_image, image_to_draw, (x, y, w, h))

        cv2.imshow(window_name, webcam_image)
        read_value, webcam_image = vc.read()
        key = cv2.waitKey(update_time)

        if key == 27:  # exit on ESC
            break

    cv2.destroyWindow(window_name)
コード例 #11
0
def show_webcam_and_run(model,
                        emoticons,
                        window_size=None,
                        window_name='webcam',
                        update_time=10):
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    if window_size:
        width, height = window_size
        cv2.resizeWindow(window_name, width, height)

    vc = cv2.VideoCapture(0)
    if vc.isOpened():
        read_value, webcam_image = vc.read()
    else:
        print("webcam not found")
        return
    cs, ch, cn, ca = 0, 0, 0, 0
    while read_value:
        for normalized_face, (x, y, w, h) in find_faces(webcam_image):
            prediction = model.predict(normalized_face)
            print(prediction[1])
            if (prediction[1] > 700 and prediction[1] < 860):
                prediction = 0
                cn += 1
                if (cn >= 3):
                    pattern = "songs/neutral/*"
                    filename = random.choice(glob.glob(pattern))
                    print(filename)
                    mixer.init()
                    mixer.music.load(filename)
                    mixer.music.play(-1, 0.0)
                    time.sleep(10)
                    mixer.music.stop()
                    cn = 0
            elif (prediction[1] > 420 and prediction[1] < 650):
                prediction = 2
                ch += 1
                if (ch >= 3):
                    pattern = "songs/happy/*"
                    filename = random.choice(glob.glob(pattern))
                    print(filename)
                    mixer.init()
                    mixer.music.load(filename)
                    mixer.music.play(-1, 0.0)
                    time.sleep(10)
                    mixer.music.stop()
                    ch = 0
            elif (prediction[1] > 860 and prediction[1] < 1300):
                prediction = 3
                cs += 1
                if (cs >= 3):
                    pattern = "songs/sad/*"
                    filename = random.choice(glob.glob(pattern))
                    print(filename)
                    mixer.init()
                    mixer.music.load(filename)
                    mixer.music.play(-1, 0.0)
                    time.sleep(10)
                    mixer.music.stop()
                    cs = 0
            elif (prediction[1] > 1000 and prediction[1] < 3300):
                prediction = 1
                ca += 1
                if (ca >= 3):
                    pattern = "songs/anger/*"
                    filename = random.choice(glob.glob(pattern))
                    print(filename)
                    mixer.init()
                    mixer.music.load(filename)
                    mixer.music.play(-1, 0.0)
                    time.sleep(10)
                    mixer.music.stop()
                    ca = 0
            else:
                prediction = 0
            time.sleep(2)
            image_to_draw = emoticons[prediction]
            font = cv2.FONT_HERSHEY_SIMPLEX
            text = emotions[prediction]
            webcam_image = cv2.putText(webcam_image, text, (10, 50), font, 1,
                                       (0, 255, 255), 2, cv2.LINE_AA)
            draw_with_alpha(webcam_image, image_to_draw, (x, y, w, h))
            time.sleep(2)
        cv2.imshow(window_name, webcam_image)
        read_value, webcam_image = vc.read()
        key = cv2.waitKey(update_time)

        if key == 27:
            break

    cv2.destroyWindow(window_name)
コード例 #12
0
def show_webcam_and_run(model,
                        emoticons,
                        window_size=None,
                        window_name='webcam',
                        update_time=10):
    """
    Shows webcam image, detects faces and its emotions in real time and draw emoticons over those faces.
    :param model: Learnt emotion detection model.
    :param emoticons: List of emotions images.
    :param window_size: Size of webcam image window.
    :param window_name: Name of webcam image window.
    :param update_time: Image update time interval.
    """
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    if window_size:
        width, height = window_size
        cv2.resizeWindow(window_name, width, height)

    vc = cv2.VideoCapture("testing.mp4")

    if vc.isOpened():
        read_value, webcam_image = vc.read()
    else:
        print("webcam not found")
        return

    while True:

        for normalized_face, (x, y, w, h) in find_faces(webcam_image):
            prediction = model.predict(normalized_face)  # do prediction
            #if cv2.__version__ != '3.1.0':
            prediction = prediction[0]

            image_to_draw = emoticons[prediction]
            draw_with_alpha(webcam_image, image_to_draw, (x, y, w, h))
            print(prediction)  #target variable
        cv2.imshow(window_name, webcam_image)
        read_value, webcam_image = vc.read()
        key = cv2.waitKey(update_time)
        img = webcam_image
        #----------------------
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        faces = faceCascade.detectMultiScale(
            gray,
            scaleFactor=1.2,
            minNeighbors=5,
            minSize=(int(minW), int(minH)),
        )
        #lola=0
        for (x, y, w, h) in faces:

            cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)

            id, confidence = recognizer.predict(gray[y:y + h, x:x + w])

            # Check if confidence is less them 100 ==> "0" is perfect match
            if (confidence < 100):
                percent = 100 - confidence
                if (confidence < 60):
                    hit = 1
                    id = names[id]
                    confidence = "  {0}%".format(round(100 - confidence))
                else:
                    id = names[id]
                    confidence = "  {0}%".format(round(100 - confidence))
                    hit = 0
                    global lola
                    if lola == 2:
                        from twilio.rest import Client

                        account_sid = "AC22c8e1723b1046ac741bd563caa0995d"
                        # # our Auth Token from twilio.com/console
                        auth_token = "9422efc06f0c7ab5b637536569d2d818"

                        client = Client(account_sid, auth_token)

                        message = client.messages.create(
                            to="+919944333726",
                            from_="+12407022806",
                            body=
                            "You are travelling with an unauthorized driver. Please check!"
                        )
                    lola = 0
            else:
                id = "unknown"
                confidence = "  {0}%".format(round(100 - confidence))
                hit = 0

            cv2.putText(img, str(id), (x + 5, y - 5), font, 1, (255, 255, 255),
                        2)
            cv2.putText(img, str(confidence), (x + 5, y + h - 5), font, 1,
                        (255, 255, 0), 1)

            def face(hit):
                return hit

            #print(percent)
        cv2.imshow(window_name, img)

        #----------------------
        if key == 27:  # exit on ESC
            break

    cv2.destroyWindow(window_name)