def frame_process(base64_string_list):
    # starting lists for calculating modes

    ls = []

    ###############################################

    detection_model_path = os.getcwd(
    ) + '/trained_models/detection_models/haarcascade_frontalface_default.xml'

    emotion_model_path = os.getcwd() + \
        '/trained_models/emotion_models/emotion_modelsfer2013_mini_XCEPTION.82-0.80.hdf5'

    emotion_labels = {0: 'confused', 1: 'neutral', 2: 'confident'}

    # hyper-parameters for bounding boxes shape
    frame_window = 10
    emotion_offsets = (20, 40)

    # loading models

    face_detection = inference.load_detection_model(detection_model_path)

    emotion_classifier = load_model(emotion_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]
    #################################################################
    for base64_string in base64_string_list:
        imgdata = base64.b64decode(base64_string)
        image = Image.open(io.BytesIO(imgdata))

        gray_image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2GRAY)

        rgb_image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB)
        faces = inference.detect_faces(face_detection, gray_image)

        for face_coordinates in faces:
            print(face_coordinates)
            x1, x2, y1, y2 = inference.apply_offsets(face_coordinates,
                                                     emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]
            try:
                gray_face = cv2.resize(gray_face, (emotion_target_size))

            except:
                continue

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_prediction = emotion_classifier.predict(gray_face)
            emotion_probability = np.max(emotion_prediction)
            emotion_label_arg = np.argmax(emotion_prediction)
            emotion_text = emotion_labels[emotion_label_arg]
            ls.append(emotion_text)

    K.clear_session()

    return ls
示例#2
0
def predict_gender(name):

    #loading data and images
    image_path = source + name
    m = re.search('.*(?=-)', name)
    if m:
        found = m.group(0)
    else:
        found = name
    result = {'male': 0, 'female': 0, 'domain': found}

    try:
        rgb_image = load_image(image_path, grayscale=False)
    except:
        print('3. Doesn"t open')
        if (os.path.isfile(image_path)):
            os.remove(image_path)
        to_remove.append(name)
        return result
    gray_image = load_image(image_path, grayscale=True)
    gray_image = np.squeeze(gray_image)
    gray_image = gray_image.astype('uint8')

    #face and gender detection
    faces = detect_faces(face_detection, gray_image)
    if (len(faces) == 0):
        print('no faces')
        to_remove.append(name)

    for face_coordinates in faces:
        x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
        rgb_face = rgb_image[y1:y2, x1:x2]

        try:
            rgb_face = cv2.resize(rgb_face, (gender_target_size))
        except:
            continue

        rgb_face = preprocess_input(rgb_face, False)
        rgb_face = np.expand_dims(rgb_face, 0)
        gender_prediction = gender_classifier.predict(rgb_face)
        gender_label_arg = np.argmax(gender_prediction)
        gender_text = gender_labels[gender_label_arg]

        if gender_text == gender_labels[0]:
            result['female'] += 1
        else:
            result['male'] += 1
    return result
示例#3
0
def ExpressionDetection (image):
    detection_model_path = "%s/%s/haarcascade_frontalface_default.xml"%(PATH,"haar_cascade")


    emotion_model_path = '%s/vision/models/fer2013_mini_XCEPTION.102-0.66.hdf5'%(PATH)
    emotion_labels = get_labels('fer2013')

    # loading models
    face_detection = load_detection_model(detection_model_path)

    emotion_classifier = load_model(emotion_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]
    emotion_offsets = (20, 40)
    #read image


    # print 'dddddddd',gray_image
    #gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # rgb_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    gray_image=cv2.imread(image,0)
    faces = detect_faces(face_detection,gray_image)
    # print 'qqqqqqqq',faces

    for face_coordinates in faces:

        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]
        try:
            gray_face = cv2.resize(gray_face, (emotion_target_size))
        except:
            continue

        gray_face = preprocess_input(gray_face, True)
        # print 'eeeeee',gray_face
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        emotion_prediction = emotion_classifier.predict(gray_face)
        emotion_probability = np.max(emotion_prediction)
        emotion_label_arg = np.argmax(emotion_prediction)
        emotion_text = emotion_labels[emotion_label_arg]
        # print 'the emotion probability is:',emotion_probability
        # print 'the emotion is:',emotion_label_arg

        # print 'the predicted emotion is:',emotion_text
        return emotion_text
    camera.resolution = (640, 480)
    camera.framerate = 24
    time.sleep(1)
    stream = io.BytesIO()
    for foo in camera.capture_continuous(stream,
                                         format('bgr'),
                                         use_video_port=True):
        bgr_image = np.fromstring(stream.getvalue(), dtype=np.uint8)

        #    bgr_image = video_capture.read()[1]
        #bgr_image = np.empty((480 * 640 * 3,), dtype=np.uint8)
        #camera.capture(bgr_image, format='bgr', use_video_port=True)
        bgr_image = bgr_image.reshape((480, 640, 3))
        gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
        rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
        faces = detect_faces(face_detection, gray_image)

        for face_coordinates in faces:
            count = count + 1
            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]
            try:
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue
            gray_face = preprocess_input(gray_face, True)  # 类似于标准化的操作(64,64)
            gray_face = np.expand_dims(gray_face, 0)  # (1,64,64)
            gray_face = np.expand_dims(gray_face, -1)  # (1,64,64,1)
            emotion_prediction = emotion_classifier.predict(gray_face)
            emotion_probability = np.max(emotion_prediction)
            emotion_label_arg = np.argmax(emotion_prediction)
示例#5
0
def emotion():
    # parameters for loading data and images
    emotion_text = ""
    detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
    emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
    emotion_labels = get_labels('fer2013')

    # hyper-parameters for bounding boxes shape
    frame_window = 10
    emotion_offsets = (20, 40)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]

    # starting lists for calculating modes
    emotion_window = []

    # starting video streaming
    cv2.namedWindow('window_frame')
    video_capture = cv2.VideoCapture(0)
    while True:
        bgr_image = video_capture.read()[1]
        gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
        rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
        faces = detect_faces(face_detection, gray_image)

        for face_coordinates in faces:

            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]
            try:
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_prediction = emotion_classifier.predict(gray_face)
            emotion_probability = np.max(emotion_prediction)
            emotion_label_arg = np.argmax(emotion_prediction)
            emotion_text = emotion_labels[emotion_label_arg]
            emotion_window.append(emotion_text)

            if len(emotion_window) > frame_window:
                emotion_window.pop(0)
            try:
                emotion_mode = mode(emotion_window)
            except:
                continue

            if emotion_text == 'angry':
                color = emotion_probability * np.asarray((255, 0, 0))
            elif emotion_text == 'sad':
                color = emotion_probability * np.asarray((0, 0, 255))
            elif emotion_text == 'happy':
                color = emotion_probability * np.asarray((255, 255, 0))
            elif emotion_text == 'surprise':
                color = emotion_probability * np.asarray((0, 255, 255))
            else:
                color = emotion_probability * np.asarray((0, 255, 0))

            color = color.astype(int)
            color = color.tolist()

            draw_bounding_box(face_coordinates, rgb_image, color)
            draw_text(face_coordinates, rgb_image, emotion_mode, color, 0, -45,
                      1, 1)

        if (emotion_text):
            return emotion_text
            break

        bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
        cv2.imshow('window_frame', bgr_image)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break