color = emotion_probability * np.asarray((255, 0, 0))
                elif emotion == '厌恶':
                    color = emotion_probability * np.asarray((0, 0, 255))
                elif emotion == '恐惧':
                    color = emotion_probability * np.asarray((255, 255, 255))
                elif emotion == '开心':
                    color = emotion_probability * np.asarray((255, 255, 0))
                elif emotion == '难过':
                    color = emotion_probability * np.asarray((0, 0, 255))
                elif emotion == '惊喜':
                    color = emotion_probability * np.asarray((0, 255, 255))
                else:
                    color = emotion_probability * np.asarray((0, 255, 0))

                print(color)
                color = [int(i) for i in color]  # list元素转成int
                color = tuple(color)  # 传入参数需要为tuple

    ##            color = color.tolist()

    #cv2.rectangle(img, (x + 10, y + 10), (x + h - 10, y + w - 10),color, 2)
    img = chineseText.cv2ImgAddText(img, emotion, 0, 410, color, 70)

    cv2.imshow("Image", img)

    if cv2.waitKey(100) & 0xff == ord('q'):
        break
camera.release()
cv2.waitKey(0)
cv2.destroyAllWindows()
Example #2
0
}

img = cv2.imread("D:/python/works/emotion.png")

face_classifier = cv2.CascadeClassifier(
    "D:\pythoncode\Lib\site-packages\cv2\data\haarcascade_frontalface_default.xml"
)

gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray,
                                         scaleFactor=1.2,
                                         minNeighbors=3,
                                         minSize=(40, 40))
color = (255, 0, 0)

for (x, y, w, h) in faces:
    gray_face = gray[(y):(y + h), (x):(x + w)]
    gray_face = cv2.resize(gray_face, (48, 48))
    gray_face = gray_face / 255.0
    gray_face = np.expand_dims(gray_face, 0)
    gray_face = np.expand_dims(gray_face, -1)
    emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
    emotion = emotion_labels[emotion_label_arg]
    cv2.rectangle(img, (x + 10, y + 10), (x + h - 10, y + w - 10),
                  (255, 255, 255), 2)
    img = chineseText.cv2ImgAddText(img, emotion, x + h * 0.3, y, color, 20)

cv2.imshow("Image", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Example #3
0
def discern(img):

    #理论上来讲,python3的int类型支持无限大,如果一直加下去,内存就会爆炸,所以还是到了一定数值的时候重启归零好点
    global num
    num += 1
    if (num == 1000):
        num = 0
        #清理系统终端内容
        os.system("clear")

    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # cap = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
    faceRects = cap.detectMultiScale(gray,
                                     scaleFactor=1.2,
                                     minNeighbors=3,
                                     minSize=(50, 50))
    if len(faceRects):
        # #框出正方形人脸
        #test
        # for faceRect in faceRects:
        #     x, y, w, h = faceRect
        #     cv2.rectangle(img, (x, y), (x + h, y + w), (0, 255, 0), 3)

        #视频的每一帧都框出每一张人脸,包括眼睛和鼻子
        if (num % 2 == 0):
            for faceRect in faceRects:
                x, y, w, h = faceRect
                # 框出人脸
                cv2.rectangle(img, (x, y), (x + h, y + w), color, 2)
                # 左眼
                cv2.circle(img, (x + w // 4, y + h // 4 + 30),
                           min(w // 8, h // 8), color)
                #右眼
                cv2.circle(img, (x + 3 * w // 4, y + h // 4 + 30),
                           min(w // 8, h // 8), color)
                #嘴巴
                cv2.rectangle(img, (x + 3 * w // 8, y + 3 * h // 4),
                              (x + 5 * w // 8, y + 7 * h // 8), color)

        #识别男女性别
        if (num % 5 == 0):
            try:
                for (x, y, w, h) in faceRects:
                    face = img[(y - 60):(y + h + 60), (x - 30):(x + w + 30)]
                    face = cv2.resize(face, (48, 48))
                    face = np.expand_dims(face, 0)
                    face = face / 255.0
                    gender_label_arg = np.argmax(
                        gender_classifier.predict(face))
                    gender = gender_labels[gender_label_arg]
                    cv2.rectangle(img, (x, y), (x + h, y + w), color, 2)
                    img = chineseText.cv2ImgAddText(img, gender, x + h, y,
                                                    (224, 54, 54))
            except:
                print("错误代码:Oxf-001 已知错误!未检测到图像中的人脸或无法识别人脸性别")
        if (num % 3 == 0):
            #表情识别代码
            for (x, y, w, h) in faceRects:
                gray_face = gray[(y):(y + h), (x):(x + w)]
                gray_face = cv2.resize(gray_face, (48, 48))
                gray_face = gray_face / 255.0
                gray_face = np.expand_dims(gray_face, 0)
                gray_face = np.expand_dims(gray_face, -1)
                emotion_label_arg = np.argmax(
                    emotion_classifier.predict(gray_face))
                emotion = emotion_labels[emotion_label_arg]
                img = chineseText.cv2ImgAddText(img, emotion, x + h * 0.3, y,
                                                (0, 0, 255))

        #头像挂件代码
        try:
            for faceRect in faceRects:
                # imgCompose = cv2.imread("img/maozi-1.png")
                x, y, w, h = faceRect
                sp = imgCompose.shape
                imgComposeSizeH = int(sp[0] / sp[1] * w)
                if imgComposeSizeH > (y - 20):
                    imgComposeSizeH = (y - 20)
                imgComposeSize = cv2.resize(imgCompose, (w, imgComposeSizeH),
                                            interpolation=cv2.INTER_NEAREST)
                top = (y - imgComposeSizeH - 20)
                if top <= 0:
                    top = 0
                rows, cols, channels = imgComposeSize.shape
                roi = img[top:top + rows, x:x + cols]

                img2gray = cv2.cvtColor(imgComposeSize, cv2.COLOR_RGB2GRAY)
                ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY)
                mask_inv = cv2.bitwise_not(mask)

                img1_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)

                img2_fg = cv2.bitwise_and(imgComposeSize,
                                          imgComposeSize,
                                          mask=mask)

                dst = cv2.add(img1_bg, img2_fg)
                img[top:top + rows, x:x + cols] = dst
        except:
            print("错误代码:Oxf-002 已知错误!sRGB配置文件!此错误不可消除,除非更改图片文件")

    #将识别效果创建窗口展示出来
    cv2.namedWindow("Face recognition - q exit", 0)
    # cv2.resizeWindow("Face recognition - q exit", 1280 , 720)
    cv2.imshow("Face recognition - q exit", img)
Example #4
0
import chineseText

img = cv2.imread("img/gather.png")
face_classifier = cv2.CascadeClassifier(
    "C:\Python36\Lib\site-packages\opencv-master\data\haarcascades\haarcascade_frontalface_default.xml"
)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray,
                                         scaleFactor=1.2,
                                         minNeighbors=3,
                                         minSize=(140, 140))

gender_classifier = load_model(
    "classifier/gender_models/simple_CNN.81-0.96.hdf5")
gender_labels = {0: '女', 1: '男'}
color = (255, 255, 255)

for (x, y, w, h) in faces:
    face = img[(y - 60):(y + h + 60), (x - 30):(x + w + 30)]
    face = cv2.resize(face, (48, 48))
    face = np.expand_dims(face, 0)
    face = face / 255.0
    gender_label_arg = np.argmax(gender_classifier.predict(face))
    gender = gender_labels[gender_label_arg]
    cv2.rectangle(img, (x, y), (x + h, y + w), color, 2)
    img = chineseText.cv2ImgAddText(img, gender, x + h, y, color, 30)

cv2.imshow("Image", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Example #5
0
from keras.models import load_model
import numpy as np
import chineseText

img = cv2.imread("img/gather.png")
face_classifier = cv2.CascadeClassifier(
    "C:\Python36\Lib\site-packages\opencv-master\data\haarcascades\haarcascade_frontalface_default.xml"
)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(
    gray, scaleFactor=1.2, minNeighbors=3, minSize=(140, 140))

gender_classifier = load_model(
    "classifier/gender_models/simple_CNN.81-0.96.hdf5")
gender_labels = {0: '女', 1: '男'}
color = (255, 255, 255)

for (x, y, w, h) in faces:
    face = img[(y - 60):(y + h + 60), (x - 30):(x + w + 30)]
    face = cv2.resize(face, (48, 48))
    face = np.expand_dims(face, 0)
    face = face / 255.0
    gender_label_arg = np.argmax(gender_classifier.predict(face))
    gender = gender_labels[gender_label_arg]
    cv2.rectangle(img, (x, y), (x + h, y + w), color, 2)
    img = chineseText.cv2ImgAddText(img, gender, x + h, y, color, 30)

cv2.imshow("Image", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
   Project_Name :  raspberryPi-develop  
   File Name:     chineseTest.py
   Description :
   Author :       HuHongLin
   date:          2018/8/23
-------------------------------------------------
   Change Activity:
                   2018/8/23 15:36:
-------------------------------------------------
"""

import cv2
import sys
import os.path

# sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import chineseText as text

img = cv2.imread("bd_icon.png")
img = text.cv2ImgAddText(img, "北斗启航", 50, 320, (51, 51, 51), 48)

cv2.imshow("Image", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Example #7
0
    3: '开心',
    4: '难过',
    5: '惊喜',
    6: '平静'
}

img = cv2.imread("img/emotion/emotion.png")
face_classifier = cv2.CascadeClassifier(
    "C:\Python36\Lib\site-packages\opencv-master\data\haarcascades\haarcascade_frontalface_default.xml"
)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(
    gray, scaleFactor=1.2, minNeighbors=3, minSize=(40, 40))
color = (255, 0, 0)

for (x, y, w, h) in faces:
    gray_face = gray[(y):(y + h), (x):(x + w)]
    gray_face = cv2.resize(gray_face, (48, 48))
    gray_face = gray_face / 255.0
    gray_face = np.expand_dims(gray_face, 0)
    gray_face = np.expand_dims(gray_face, -1)
    emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
    emotion = emotion_labels[emotion_label_arg]
    cv2.rectangle(img, (x + 10, y + 10), (x + h - 10, y + w - 10),
                  (255, 255, 255), 2)
    img = chineseText.cv2ImgAddText(img, emotion, x + h * 0.3, y, color, 20)

cv2.imshow("Image", img)
cv2.waitKey(0)
cv2.destroyAllWindows()