示例#1
0
    def predict(self,img, faces):
        analysis = []

        #getting input model shapes for inference
        emotion_target_size = self.emotion_classifier.input_shape[1:3]
        #rgb_image = load_image(img , grayscale=False)
        rgb_image = cv2.cvtColor(img.copy(),cv2.cv2.COLOR_BGR2RGB)
        #gray_image = load_image(img , grayscale=True)
        gray_image = cv2.cvtColor(img.copy(),cv2.cv2.COLOR_BGR2GRAY)
        gray_image = np.squeeze(gray_image)
        gray_image = gray_image.astype('uint8')

        for face_coordinates in faces:
            x1, x2, y1, y2 = apply_offsets(face_coordinates, self.emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]

            try:
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_label_arg = np.argmax(self.emotion_classifier.predict(gray_face))
            emotion_text = emotion_labels[emotion_label_arg]

            color = (0, 0, 255)
            draw_bounding_box(face_coordinates, rgb_image, color)
            draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50, 1, 2)
                #ouput
            analysis.append([face_coordinates, emotion_text]) #Essa será a saida, externamente decidiremos para onde vai
        bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
        return analysis, bgr_image # Aqui ele retorna lista de faces detectadas com os labels de genero e emoção
示例#2
0
    log_file_path = base_path + dataset_name + '_emotation_training.log'
    csv_logger = CSVLogger(log_file_path, append=False)
    early_stop = EarlyStopping('val_loss', patience=patience)
    reduce_lr = ReduceLROnPlateau('val_loss',
                                  fator=0.1,
                                  patience=int(patience / 4),
                                  verbose=1)
    train_models_path = base_path + dataset_name + '_mini_XCEPTION'
    model_names = train_models_path + '.{epoch:02d}-{val_acc:.2f}.hdf5'
    model_checkpoint = ModelCheckpoint(model_names,
                                       'val_loss',
                                       verbose=1,
                                       save_best_only=True)

    callbacks = [model_checkpoint, csv_logger, early_stop, reduce_lr]

    # load_dataset:
    data_loader = DataManager(dataset_name, image_size=input_shape[:2])
    faces, emotions = data_loader.get_data()
    faces = preprocess_input(faces)
    num_samples, num_classes = emotions.shape
    train_data, val_data = split_data(faces, emotions, validation_split)
    train_faces, train_emotions = train_data
    model.fit_generator(data_generator.flow(train_faces, train_emotions,
                                            batch_size),
                        steps_per_epoch=len(train_faces) / batch_size,
                        epochs=num_epochs,
                        verbose=1,
                        callbacks=callbacks,
                        validation_data=val_data)
def process_image(image, filename, model_path):

    try:
        # parameters for loading data and images
        detection_model_path = './trained_models/detection_models/haarcascade_frontalface_default.xml'
        emotion_model_path = model_path + '/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
        gender_model_path = model_path + '/gender_models/simple_CNN.81-0.96.hdf5'
        emotion_labels = get_labels('fer2013')
        gender_labels = get_labels('imdb')
        font = cv2.FONT_HERSHEY_SIMPLEX

        # hyper-parameters for bounding boxes shape
        gender_offsets = (30, 60)
        gender_offsets = (10, 10)
        emotion_offsets = (20, 40)
        emotion_offsets = (0, 0)

        # loading models
        face_detection = load_detection_model(detection_model_path)
        emotion_classifier = load_model(emotion_model_path, compile=False)
        gender_classifier = load_model(gender_model_path, compile=False)

        # getting input model shapes for inference
        emotion_target_size = emotion_classifier.input_shape[1:3]
        gender_target_size = gender_classifier.input_shape[1:3]

        # loading images
        image_array = np.fromstring(image, np.uint8)
        unchanged_image = cv2.imdecode(image_array, cv2.IMREAD_UNCHANGED)

        rgb_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2RGB)
        gray_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2GRAY)

        faces = detect_faces(face_detection, gray_image)
        for face_coordinates in faces:
            x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
            rgb_face = rgb_image[y1:y2, x1:x2]

            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]

            try:
                rgb_face = cv2.resize(rgb_face, (gender_target_size))
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            rgb_face = preprocess_input(rgb_face, False)
            rgb_face = np.expand_dims(rgb_face, 0)
            gender_prediction = gender_classifier.predict(rgb_face)
            gender_label_arg = np.argmax(gender_prediction)
            gender_text = gender_labels[gender_label_arg]

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
            emotion_text = emotion_labels[emotion_label_arg]

            if gender_text == gender_labels[0]:
                color = (0, 0, 255)
            else:
                color = (255, 0, 0)

            draw_bounding_box(face_coordinates, rgb_image, color)
            draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20, 1, 2)
            draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50, 1, 2)
    except Exception as err:
        logging.error('Error in emotion gender processor: "{0}"'.format(err))

    bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)

    dirname = 'result'
    if not os.path.exists(dirname):
        os.mkdir(dirname)

    cv2.imwrite(os.path.join(dirname, filename), bgr_image)
示例#4
0
    gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
    rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
    if(1):
        faces = detect_faces(face_detection, gray_image)
        print("===bgr===")
        for face_coordinates in faces:
            print("===bgr1===")
            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]
            print(gray_face.shape)

            try:
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue
            gray_face = preprocess_input(gray_face, True) #类似于标准化的操作(64,64)
            gray_face = np.expand_dims(gray_face, 0)#(1,64,64)
            gray_face = np.expand_dims(gray_face, -1)#(1,64,64,1)
          #  emotion_prediction = emotion_classifier.predict(gray_face)
            emotion_prediction=sess.run(out_softmax, feed_dict={input_x: gray_face})
            emotion_probability = np.max(emotion_prediction)
            emotion_label_arg = np.argmax(emotion_prediction)
            emotion_text = emotion_labels[emotion_label_arg]
            emotion_window.append(emotion_text)

            if len(emotion_window) > frame_window:
                emotion_window.pop(0)
            try:
                emotion_mode = mode(emotion_window)
            except:
                continue
    def detect_face(self, img):
        # workaround for CV2 bug
        img = copy.deepcopy(img)

        # for face detection
        detector = dlib.get_frontal_face_detector()

        input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img_h, img_w, _ = np.shape(input_img)

        # detect faces using dlib detector
        if RECOGNIZE_FACES == True:
            face_bbs, identities = self.face_recognizer.identify_image_faces(
                img)
        else:
            face_bbs = detector(input_img, 1)
        expanded_face_imgs = np.empty(
            (len(face_bbs), self.face_size, self.face_size, 3))
        emotion2_results = []

        # Get face images
        for i, bb in enumerate(face_bbs):
            x1, y1, x2, y2, w, h = bb.left(), bb.top(
            ), bb.right() + 1, bb.bottom() + 1, bb.width(), bb.height()
            expanded_face_imgs[i, :, :, :] = self.get_expanded_face(img, bb)
            reg_face = self.get_regular_face(img, bb)
            gray_face = gray_image[y1:y2, x1:x2]

            try:
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue
            #reg_face = copy.deepcopy(reg_face)
            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_prediction = emotion_classifier.predict(gray_face)
            emotion_probability = np.max(emotion_prediction)
            emotion_label_arg = np.argmax(emotion_prediction)
            emotion_text = emotion_labels[emotion_label_arg]
            emotion2_results.append(emotion_text)
        #  emotion2_results.append(emotion.emotionof(self.emotion_model, reg_face)[0])

        if len(expanded_face_imgs) > 0:
            # predict ages and genders of the detected faces
            results = self.model.predict(expanded_face_imgs)
            predicted_genders = results[0]
            ages = np.arange(0, 101).reshape(101, 1)
            predicted_ages = results[1].dot(ages).flatten()

        # draw results
        for i, bb in enumerate(face_bbs):

            if RECOGNIZE_FACES == True:
                # Display name

                label12 = "{}".format(identities[i])
                label1 = "{}, {}".format(
                    int(predicted_ages[i]),
                    "F" if predicted_genders[i][0] > 0.5 else "M")

                self.draw_label_bottom(img, (bb.left(), bb.bottom()),
                                       label1,
                                       row_index=2)
                self.draw_label_bottom(img, (bb.left(), bb.bottom() + 1),
                                       label12,
                                       row_index=0)
                ## Display emotion
                if identities[i] == "Unknown" or "customer" in identities[i]:
                    label2 = "{}".format(emotion2_results[i])
                else:
                    try:
                        label2 = "{}".format(emotion2_results[i])
                    except:
                        label2 = "{}".format('normal')
                self.draw_label_bottom(img, (bb.left(), bb.bottom() + 1),
                                       label2,
                                       row_index=1)
            else:
                ## Display age, gender and emotion
                label2 = "{}".format(emotion2_results[i])
                self.draw_label_bottom(img, (bb.left(), bb.bottom()),
                                       label2,
                                       row_index=0)

        # draw face rectangles
        for i, bb in enumerate(face_bbs):
            x1, y1, x2, y2, w, h = bb.left(), bb.top(
            ), bb.right() + 1, bb.bottom() + 1, bb.width(), bb.height()
            cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 2)

        return img
    def detect_face_info(self, file_path):

        img = cv2.imread(file_path)
        print(img)
        # workaround for CV2 bug
        img = copy.deepcopy(img)

        # for face detection
        detector = dlib.get_frontal_face_detector()

        input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img_h, img_w, _ = np.shape(input_img)

        # detect faces using dlib detector
        if RECOGNIZE_FACES == True:
            face_bbs, identities = self.face_recognizer.identify_image_faces(
                img)
        else:
            face_bbs = detector(input_img, 1)
        expanded_face_imgs = np.empty(
            (len(face_bbs), self.face_size, self.face_size, 3))
        emotion2_results = []

        # Get face images
        for i, bb in enumerate(face_bbs):
            x1, y1, x2, y2, w, h = bb.left(), bb.top(
            ), bb.right() + 1, bb.bottom() + 1, bb.width(), bb.height()
            expanded_face_imgs[i, :, :, :] = self.get_expanded_face(img, bb)
            reg_face = self.get_regular_face(img, bb)
            gray_face = gray_image[y1:y2, x1:x2]

            try:
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue
            #reg_face = copy.deepcopy(reg_face)
            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_prediction = emotion_classifier.predict(gray_face)
            emotion_probability = np.max(emotion_prediction)
            emotion_label_arg = np.argmax(emotion_prediction)
            emotion_text = emotion_labels[emotion_label_arg]
            emotion2_results.append(emotion_text)

        #  emotion2_results.append(emotion.emotionof(self.emotion_model, reg_face)[0])

        if len(expanded_face_imgs) > 0:
            # predict ages and genders of the detected faces
            results = self.model.predict(expanded_face_imgs)
            predicted_genders = results[0]
            ages = np.arange(0, 101).reshape(101, 1)
            predicted_ages = results[1].dot(ages).flatten()

        all_faces_info = []
        # draw results
        for i, bb in enumerate(face_bbs):
            face_info = {
                'Name': identities[i],
                'Age': int(predicted_ages[i]),
                'Gender': "F" if predicted_genders[i][0] > 0.5 else "M",
                'Imotion': emotion2_results[i]
            }
            all_faces_info.append(face_info)

        return all_faces_info
示例#7
0
while True:
    bgr_image = video_capture.read()[1]
    gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)  # 灰度图
    rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)  # 彩色图
    # 人脸
    faces = detect_faces(face_detection, gray_image)

    for face_coordinates in faces:
        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]
        try:
            gray_face = cv2.resize(gray_face, (emotion_target_size))
        except:
            continue

        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)

        # 人脸表情预测 -- 输入灰度图 ---》 7个label不同分数
        emotion_prediction = emotion_classifier.predict(gray_face)  #
        # print('emotion_predicted:',emotion_prediction)

        emotion_probability = np.max(emotion_prediction)
        emotion_label_arg = np.argmax(emotion_prediction)

        emotion_text = emotion_labels[emotion_label_arg]
        emotion_window.append(emotion_text)

        if len(emotion_window) > frame_window:
            emotion_window.pop(0)
def image_emotion_gender(image):
    bgr_image = image
    rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
    gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)


    detection_model_path = 'trained_models/detection_models/haarcascade_frontalface_default.xml'
    emotion_model_path = 'trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
    gender_model_path = 'trained_models/gender_models/simple_CNN.81-0.96.hdf5'
    emotion_labels = get_labels('fer2013')
    gender_labels = get_labels('imdb')
    font = cv2.FONT_HERSHEY_SIMPLEX

    # hyper-parameters for bounding boxes shape
    gender_offsets = (30, 60)
    gender_offsets = (10, 10)
    emotion_offsets = (20, 40)
    emotion_offsets = (0, 0)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)
    gender_classifier = load_model(gender_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]
    gender_target_size = gender_classifier.input_shape[1:3]
    faces = detect_faces(face_detection, gray_image)
    for face_coordinates in faces:
        x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
        rgb_face = rgb_image[y1:y2, x1:x2]

        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]

        try:
            rgb_face = cv2.resize(rgb_face, (gender_target_size))
            gray_face = cv2.resize(gray_face, (emotion_target_size))
        except:
            continue

        rgb_face = preprocess_input(rgb_face, False)
        rgb_face = np.expand_dims(rgb_face, 0)
        gender_prediction = gender_classifier.predict(rgb_face)
        gender_label_arg = np.argmax(gender_prediction)
        gender_text = gender_labels[gender_label_arg]

        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
        emotion_text = emotion_labels[emotion_label_arg]

        if gender_text == gender_labels[0]:
            color = (0, 0, 255)
        else:
            color = (255, 0, 0)

        draw_bounding_box(face_coordinates, rgb_image, color)
        draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20, 1, 2)
        draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50, 1, 2)

    bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
    return bgr_image
示例#9
0
    for face_coordinates in faces:
        # 对每一个人脸,分别找到 gender 和 emotion 坐标
        x1,x2,y1,y2 = apply_offsets(face_coordinates,gender_offsets)
        rgb_face = rgb_image[y1:y2,x1:x2]

        x1,x2,y1,y2 = apply_offsets(face_coordinates,emotion_offsets)
        gray_face = gray_image[y1:y2,x1:x2]

        # 调整大小
        try:
            rgb_face = cv2.resize(rgb_face,(gender_target_size))
            gray_face = cv2.resize(gray_face,(emotion_target_size))
        except:
            continue
        ##########  emotion ##########
        gray_face = preprocess_input(gray_face,False) # 预处理 (64,64)
        # print('gray_face0:', gray_face.shape)
        gray_face = np.expand_dims(gray_face,0) #(1,64,64)
        # print('gray_face1:', gray_face.shape)
        gray_face = np.expand_dims(gray_face,-1)#(1,64,64,1)
        # print('gray_face2:',gray_face.shape)

        # emotion_label
        # predict:每一个label的得分值
        emotion_prediction = emotion_classifier.predict(gray_face)
        # print('pre_label:',emotion_prediction)
        # argmax取最大概率
        emotion_label_arg = np.argmax(emotion_prediction) #最大值的索引
        # print('emotion_label_arg',emotion_label_arg)
        # emotion-text
        emotion_text = emotion_labels[emotion_label_arg] # 由索引拿到真实标签
def process_image(image):
    K.clear_session()

    # parameters for loading data and images
    if sys.path[1] == '/app':
        #load model for heroku
        detection_model_path = sys.path[
            1] + '/trained_models/detection_models/haarcascade_frontalface_default.xml'
        emotion_model_path = sys.path[
            1] + '/trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
    else:
        detection_model_path = sys.path[
            -1] + '/trained_models/detection_models/haarcascade_frontalface_default.xml'
        emotion_model_path = sys.path[
            -1] + '/trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'

    emotion_labels = get_labels('fer2013')
    font = cv2.FONT_HERSHEY_SIMPLEX

    # hyper-parameters for bounding boxes shape
    emotion_offsets = (20, 40)
    emotion_offsets = (0, 0)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]

    # loading images
    image_array = np.fromstring(image, np.uint8)
    unchanged_image = cv2.imdecode(image_array, cv2.IMREAD_UNCHANGED)
    rgb_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2RGB)
    gray_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2GRAY)
    gray_image = np.squeeze(gray_image)
    gray_image = gray_image.astype('uint8')

    faces = detect_faces(face_detection, gray_image)
    emotion_text_arr = []

    for face_coordinates in faces:
        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]

        try:
            gray_face = cv2.resize(gray_face, (emotion_target_size))
        except:
            continue

        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
        emotion_text = emotion_labels[emotion_label_arg]
        emotion_text_arr.append(emotion_text)

        color = (255, 0, 0)

        draw_bounding_box(face_coordinates, rgb_image, color)
        draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50, 1,
                  2)

    bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
    K.clear_session()

    return (bgr_image, emotion_text_arr)
def race_emotion(image_path, save_path=None, task='save', faces):

    image_path = image_path  #'../test_images'#sys.argv[1]
    detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
    emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
    gender_model_path = '../trained_models/gender_models/simple_CNN.81-0.96.hdf5'
    emotion_labels = get_labels('../trained_models/fer2013')
    gender_labels = get_labels('../trained_models/imdb')
    font = cv2.FONT_HERSHEY_SIMPLEX

    base = os.path.basename(image_path)
    name = os.path.splitext(base)[0]
    # hyper-parameters for bounding boxes shape
    gender_offsets = (30, 60)
    gender_offsets = (10, 10)
    emotion_offsets = (20, 40)
    emotion_offsets = (0, 0)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)
    gender_classifier = load_model(gender_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]
    gender_target_size = gender_classifier.input_shape[1:3]

    # loading images
    rgb_image = load_image(image_path, grayscale=False)

    gray_image = load_image(image_path, grayscale=True)
    gray_image = np.squeeze(gray_image)
    gray_image = gray_image.astype('uint8')

    #faces = detect_faces(face_detection, gray_image)
    faces = faces
    i = 0
    for face_coordinates in faces:
        x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
        rgb_face = rgb_image[y1:y2, x1:x2]

        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]

        try:
            rgb_face = cv2.resize(rgb_face, (gender_target_size))
            gray_face = cv2.resize(gray_face, (emotion_target_size))
        except:
            print '=' * 10 + 'exception in resize'
            continue

#print 'exception in resize' #continue

        rgb_face = preprocess_input(rgb_face, False)
        rgb_face = np.expand_dims(rgb_face, 0)
        gender_prediction = gender_classifier.predict(rgb_face)
        gender_label_arg = np.argmax(gender_prediction)
        gender_text = gender_labels[gender_label_arg]

        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
        emotion_text = emotion_labels[emotion_label_arg]

        if gender_text == gender_labels[0]:
            color = (0, 0, 255)
        else:
            color = (255, 0, 0)

        crop = rgb_image[y1:y2, x1:x2]
        crop = cv2.cvtColor(crop, cv2.COLOR_RGB2BGR)
        #race=find_race(crop,gender_text)
        print('emotion and race are:', emotion_text, race)
        #cv2.imwrite('images/'+str(i)+'.png', crop)
        draw_bounding_box(face_coordinates, rgb_image, color)
        draw_text(face_coordinates, rgb_image, color, 0, -20, 1, 2)
        #draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20, 1, 2)
        draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50, 1,
                  2)
        i = i + 1

    if task == 'save':
        bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
        cv2.imwrite('../OUT/' + name + '.png', bgr_image)
示例#12
0
def main():
    cv2.namedWindow('window_frame')
    video_capture = cv2.VideoCapture(0)
    while True:
        bgr_image = video_capture.read()[1]
        gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
        rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
        faces = detect_faces(face_detection, gray_image)

        for face_coordinates in faces:

            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]
            try:
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_prediction = emotion_classifier.predict(gray_face)
            emotion_probability = np.max(emotion_prediction)
            emotion_label_arg = np.argmax(emotion_prediction)
            emotion_text = emotion_labels[emotion_label_arg]
            emotion_window.append(emotion_text)

            if len(emotion_window) > frame_window:
                emotion_window.pop(0)
            try:
                emotion_mode = mode(emotion_window)
            except:
                continue

            if emotion_text == 'angry':
                color = emotion_probability * np.asarray((255, 0, 0))
                print(emoji.emojize(emoji.demojize("😠")))
            elif emotion_text == 'sad':
                color = emotion_probability * np.asarray((0, 0, 255))
                print(emoji.emojize(emoji.demojize("😰")))
            elif emotion_text == 'happy':
                color = emotion_probability * np.asarray((255, 255, 0))
                print(emoji.emojize(emoji.demojize("😃")))
            elif emotion_text == 'surprise':
                color = emotion_probability * np.asarray((0, 255, 255))
                print(emoji.emojize(emoji.demojize("😯")))
            elif emotion_text == 'disgusted':
                color = emotion_probability * np.asarray((0, 255, 255))
                print(emoji.emojize(emoji.demojize("😣")))
            elif emotion_text == 'fearful':
                color = emotion_probability * np.asarray((0, 255, 255))
                print(emoji.emojize(emoji.demojize("😟")))
            else:
                color = emotion_probability * np.asarray((0, 255, 0))
                print(emotion_text)

            color = color.astype(int)
            color = color.tolist()

            draw_bounding_box(face_coordinates, rgb_image, color)
            draw_text(face_coordinates, rgb_image, emotion_mode, color, 0, -45,
                      1, 1)

        bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
        cv2.imshow('window_frame', bgr_image)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
示例#13
0
    faces = detect_faces(face_detection, gray_image)
    for face_coordinates in faces:
        x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
        rgb_face = rgb_image[y1:y2, x1:x2]

        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]

        try:
            rgb_face = cv2.resize(rgb_face, (gender_target_size))
            gray_face = cv2.resize(gray_face, (emotion_target_size))
        except:
            continue

        rgb_face = preprocess_input(rgb_face, False)
        rgb_face = np.expand_dims(rgb_face, 0)
        gender_prediction = gender_classifier.predict(rgb_face)
        gender_label_arg = np.argmax(gender_prediction)
        gender_text = gender_labels[gender_label_arg]

        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
        emotion_text = emotion_labels[emotion_label_arg]

        if gender_text == gender_labels[0]:
            color = (0, 0, 255)
        else:
            color = (255, 0, 0)