Esempio n. 1
0
def process_image(image):

    try:
        # parameters for loading data and images
        detection_model_path = './trained_models/detection_models/haarcascade_frontalface_default.xml'
        emotion_model_path = './trained_models/emotion_models/fer2013_35a93d1a30cf52b8722837f3e096b7b1006949325de8d00f26595d5a418241d3.hdf5'
        emotion_labels = get_labels('fer2013')
        font = cv2.FONT_HERSHEY_SIMPLEX

        # hyper-parameters for bounding boxes shape
        emotion_offsets = (20, 40)
        emotion_offsets = (0, 0)

        # loading models
        face_detection = load_detection_model(detection_model_path)
        emotion_classifier = load_model(emotion_model_path, compile=False)

        # getting input model shapes for inference
        emotion_target_size = emotion_classifier.input_shape[1:3]

        # loading images
        image_array = np.fromstring(image, np.uint8)
        unchanged_image = cv2.imdecode(image_array, cv2.IMREAD_UNCHANGED)

        rgb_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2RGB)
        gray_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2GRAY)

        faces = detect_faces(face_detection, gray_image)
        for face_coordinates in faces:
            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]

            try:
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            rgb_face = preprocess_input(rgb_face, False)
            rgb_face = np.expand_dims(rgb_face, 0)

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
            emotion_text = emotion_labels[emotion_label_arg]

            color = (255, 0, 0)

            draw_bounding_box(face_coordinates, rgb_image, color)
            draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50, 1, 2)
    except Exception as err:
        logging.error('Error in emotion gender processor: "{0}"'.format(err))

    bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)

    dirname = 'result'
    if not os.path.exists(dirname):
        os.mkdir(dirname)

    cv2.imwrite(os.path.join(dirname, 'predicted_image.png'), bgr_image)
Esempio n. 2
0
    def get_emotion(self, image_path_, face_detection, emotion_classifier,
                    gender_classifier):
        #print(face_detection)

        emotion_labels = get_labels('fer2013')
        gender_labels = get_labels('imdb')
        font = cv2.FONT_HERSHEY_SIMPLEX

        # hyper-parameters for bounding boxes shape
        # change for variance
        gender_offsets = (30, 60)
        gender_offsets = (10, 10)
        emotion_offsets = (20, 40)
        emotion_offsets = (0, 0)

        # loading models

        # getting input model shapes for inference
        emotion_target_size = emotion_classifier.input_shape[1:3]
        gender_target_size = gender_classifier.input_shape[1:3]

        #By default, set emotion to "Neutral" and gender to "Unknown"
        emotion_text = "happy"
        gender_text = "Unknown"

        # loading images
        rgb_image = load_image(image_path_, grayscale=False)
        gray_image = load_image(image_path_, grayscale=True)
        gray_image = np.squeeze(gray_image)
        gray_image = gray_image.astype('uint8')

        faces = detect_faces(face_detection, gray_image)
        for face_coordinates in faces:
            x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
            rgb_face = rgb_image[y1:y2, x1:x2]

            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]

            try:
                rgb_face = cv2.resize(rgb_face, (gender_target_size))
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            rgb_face = preprocess_input(rgb_face, False)
            rgb_face = np.expand_dims(rgb_face, 0)
            gender_prediction = gender_classifier.predict(rgb_face)
            gender_label_arg = np.argmax(gender_prediction)
            gender_text = gender_labels[gender_label_arg]

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_label_arg = np.argmax(
                emotion_classifier.predict(gray_face))
            emotion_text = emotion_labels[emotion_label_arg]

        return emotion_text, gender_text
Esempio n. 3
0
    def detect(self, image_path):
        # loading images
        rgb_image = load_image(image_path, grayscale=False)
        gray_image = load_image(image_path, grayscale=True)
        gray_image = np.squeeze(gray_image)
        gray_image = gray_image.astype('uint8')

        res = []

        faces = detect_faces(self.face_detection, gray_image)
        for face_coordinates in faces:
            x1, x2, y1, y2 = apply_offsets(face_coordinates,
                                           self.gender_offsets)
            rgb_face = rgb_image[y1:y2, x1:x2]

            x1, x2, y1, y2 = apply_offsets(face_coordinates,
                                           self.emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]

            try:
                rgb_face = cv2.resize(rgb_face, (self.gender_target_size))
                gray_face = cv2.resize(gray_face, (self.emotion_target_size))
            except:
                continue

            rgb_face = preprocess_input(rgb_face, False)
            rgb_face = np.expand_dims(rgb_face, 0)
            gender_prediction = self.gender_classifier.predict(rgb_face)
            gender_label_arg = np.argmax(gender_prediction)
            gender_text = self.gender_labels[gender_label_arg]

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_label_arg = np.argmax(
                self.emotion_classifier.predict(gray_face))
            emotion_text = self.emotion_labels[emotion_label_arg]

            if gender_text == self.gender_labels[0]:
                color = (0, 0, 255)
            else:
                color = (255, 0, 0)

            res.append({
                'left': x1,
                'top': y1,
                'right': x2,
                'bottom': y2,
                'male': gender_text,
                'emotion': emotion_text
            })

            # print(gender_text, emotion_text)
            # draw = ImageDraw.Draw(img)
            # draw.rectangle(((x1, y1), (x2,y2)), outline='red')
            # draw.text((0, 0), "something123")
        return res
Esempio n. 4
0
def classify_frame(bgr_image):
    # bgr_image = video_capture.read()[1]
    gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
    rgb = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)

    max_size = 512 

    aspect_ratio = rgb.shape[1] / rgb.shape[0]
    scaling_factor = max([rgb.shape[1], rgb.shape[0]]) / max_size

    new_width = int(rgb.shape[1] / scaling_factor)
    new_height = int(rgb.shape[0] / scaling_factor)

    rgb_image = cv2.resize(rgb, (new_width, new_height))

    faces = detect_faces(face_detection, rgb_image)

    for face_coordinates in faces:

        x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
        rgb_face = rgb_image[y1:y2, x1:x2]

        gray_face = gray_image[y1:y2, x1:x2]
        try:
            rgb_face = cv2.resize(rgb_face, (gender_target_size))
        except:
            continue
        gray_face = preprocess_input(gray_face, False)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)

        rgb_face = np.expand_dims(rgb_face, 0)
        rgb_face = preprocess_input(rgb_face, False)
        gender_prediction = gender_classifier.predict(rgb_face)
        gender_label_arg = np.argmax(gender_prediction)
        gender_text = gender_labels[gender_label_arg]
        gender_window.append(gender_text)

        if len(gender_window) > frame_window:
            gender_window.pop(0)
        try:
            gender_mode = mode(gender_window)
        except:
            continue

        if gender_text == gender_labels[0]:
            color = (0, 0, 255)
        else:
            color = (255, 0, 0)

        draw_bounding_box(face_coordinates, rgb_image, color)
        draw_text(face_coordinates, rgb_image, gender_mode,
                  color, 0, -20, 1, 1)
    
    bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
    cv2.imshow('window_frame', bgr_image)
def run_classify(image_path):
    emotion_text = ''
    gender_text = ''
    # loading images
    rgb_image = load_image(image_path, grayscale=False)
    gray_image = load_image(image_path, grayscale=True)
    gray_image = np.squeeze(gray_image)
    gray_image = gray_image.astype('uint8')

    faces = detect_faces(face_detection, gray_image)
    for face_coordinates in faces:
        x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
        rgb_face = rgb_image[y1:y2, x1:x2]

        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]

        try:
            rgb_face = cv2.resize(rgb_face, (gender_target_size))
            gray_face = cv2.resize(gray_face, (emotion_target_size))
        except:
            continue

        rgb_face = preprocess_input(rgb_face, False)
        rgb_face = np.expand_dims(rgb_face, 0)
        gender_prediction = gender_classifier.predict(rgb_face)
        gender_label_arg = np.argmax(gender_prediction)
        gender_text = gender_labels[gender_label_arg]

        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
        emotion_text = emotion_labels[emotion_label_arg]

        if gender_text == gender_labels[0]:
            color = (0, 0, 255)
        else:
            color = (255, 0, 0)

        draw_bounding_box(face_coordinates, rgb_image, color)
        draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20, 1,
                  2)
        draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50, 1,
                  2)

    bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
    cv2.imwrite(os.path.join(dir, '../images/predicted_test_image.png'),
                bgr_image)

    return ([gender_text, emotion_text])
Esempio n. 6
0
def make_label(image_path):
    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)
    gender_classifier = load_model(gender_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]
    gender_target_size = gender_classifier.input_shape[1:3]

    # loading images
    rgb_image = load_image(image_path, grayscale=False)
    gray_image = load_image(image_path, grayscale=True)
    gray_image = np.squeeze(gray_image)
    gray_image = gray_image.astype('uint8')

    faces = detect_faces(face_detection, gray_image)
    for face_coordinates in faces:
        x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
        rgb_face = rgb_image[y1:y2, x1:x2]

        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]

        try:
            rgb_face = cv2.resize(rgb_face, (gender_target_size))
            gray_face = cv2.resize(gray_face, (emotion_target_size))
        except:
            continue

        rgb_face = preprocess_input(rgb_face, False)
        rgb_face = np.expand_dims(rgb_face, 0)
        gender_prediction = gender_classifier.predict(rgb_face)
        gender_label_arg = np.argmax(gender_prediction)
        gender_text = gender_labels[gender_label_arg]

        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
        emotion_text = emotion_labels[emotion_label_arg]

        if gender_text == gender_labels[0]:
            color = (0, 0, 255)
        else:
            color = (255, 0, 0)


    return emotion_text
Esempio n. 7
0
def predict_emotion_gender(rgb_face, gray_face):

    rgb_face = preprocess_input(rgb_face, False)
    rgb_face = np.expand_dims(rgb_face, 0)
    gender_prediction = gender_classifier.predict(rgb_face)
    gender_label_arg = np.argmax(gender_prediction)
    gender_text = gender_labels[gender_label_arg]

    gray_face = preprocess_input(gray_face, True)
    gray_face = np.expand_dims(gray_face, 0)
    gray_face = np.expand_dims(gray_face, -1)
    emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
    emotion_text = emotion_labels[emotion_label_arg]

    return (emotion_text, gender_text)
Esempio n. 8
0
    def det_emo_gen_reco(self, frame):
        self.res = []
        self.emotion = []
        self.gender = []
        bgr_image = frame
        gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
        rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
        bounding_boxes = detect_faces(face_detection, gray_image)

        for face_coordinates in bounding_boxes:
            emo_gen = []
            res_dic = {}
            x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
            rgb_face = rgb_image[y1:y2, x1:x2]

            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]
            try:
                rgb_face = cv2.resize(rgb_face, (gender_target_size))
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue
            gray_face = preprocess_input(gray_face, False)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_label_arg = np.argmax(
                emotion_classifier.predict(gray_face))
            emotion_text = emotion_labels[emotion_label_arg]
            self.emotion.append(emotion_text)
            emo_gen.append(emotion_text)

            rgb_face = np.expand_dims(rgb_face, 0)
            rgb_face = preprocess_input(rgb_face, False)
            gender_prediction = gender_classifier.predict(rgb_face)
            gender_label_arg = np.argmax(gender_prediction)
            gender_text = gender_labels[gender_label_arg]
            self.gender.append(gender_text)
            emo_gen.append(gender_text)
            #print (face_coordinates)
            #res_dic[emotion_text] = face_coordinates
            res_dic[emotion_text + ',' + gender_text] = (int(x1), int(y1),
                                                         int(x2), int(y2))
            #res_dic[emotion_text+','+gender_text] = (int(face_coordinates[0]),int(face_coordinates[1]),int(face_coordinates[2]),int(face_coordinates[3]))
            self.res.append(res_dic)
        print(self.res)
        print(type(self.res))

        return self.res
Esempio n. 9
0
def emotion_recogination(image_path):
    frame = cv2.imread(image_path)

    gray_image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

    faces = detector(rgb_image)

    for face_coordinates in faces:
        x1, x2, y1, y2 = apply_offsets(face_utils.rect_to_bb(face_coordinates),
                                       emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]
        try:
            gray_face = cv2.resize(gray_face, (emotion_target_size))
        except:
            continue
        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        emotion_prediction = emotion_classifier.predict(gray_face)
        emotion_probability = np.max(emotion_prediction)
        emotion_label_arg = np.argmax(emotion_prediction)
        emotion_text = emotion_labels[emotion_label_arg]
        print(emotion_text)
        return emotion_text
Esempio n. 10
0
    def predict(self,image):
        gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        faces = self.face_cascade.detectMultiScale(gray_image, scaleFactor=1.1, minNeighbors=5,minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE)
        emotion_window = []
        for face_coordinates in faces:
            x1, x2, y1, y2 = apply_offsets(face_coordinates, self.emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]
            try:
                gray_face = cv2.resize(gray_face, (self.emotion_target_size))
            except:
                continue
                
            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_prediction = self.emotion_classifier.predict(gray_face)
            emotion_probability = np.max(emotion_prediction)
            emotion_label_arg = np.argmax(emotion_prediction)
            emotion_text = self.emotion_labels[emotion_label_arg]

            

            return emotion_text,emotion_label_arg
        
        return  'null',6    
            


            
Esempio n. 11
0
def emotion_face(bgr_image_base64):
    bgr_image = data_uri_to_cv2_img(bgr_image_base64)
    gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
    rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
    faces = face_cascade.detectMultiScale(gray_image,
                                          scaleFactor=1.1,
                                          minNeighbors=5,
                                          minSize=(30, 30),
                                          flags=cv2.CASCADE_SCALE_IMAGE)

    for face_coordinates in faces:
        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]
        try:
            gray_face = cv2.resize(gray_face, (emotion_target_size))
        except:
            continue
        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        with graph.as_default():
            emotion_prediction = emotion_classifier.predict(gray_face)
        emotion_probability = np.max(emotion_prediction)
        emotion_label_arg = np.argmax(emotion_prediction)
        emotion_text = emotion_labels[emotion_label_arg]
        print("emotion_text", emotion_text)
        return emotion_text
Esempio n. 12
0
def main():
    true = 0
    emotion_model_path = '../trained_models/emotion_models/RAF/RAF_mini_XCEPTION.76-0.82.hdf5'
    emotion_classifier = load_model(emotion_model_path, compile=False)
    np.set_printoptions(precision=2)
    # loading dataset
    data_loader = DataManager(dataset_name, image_size=input_shape[:2])
    train_faces, train_emotions, test_faces, test_emotions = data_loader.get_data(
    )
    test_faces = preprocess_input(test_faces)
    num_samples, num_classes = test_emotions.shape
    predictions = emotion_classifier.predict(test_faces)
    predictions = np.argmax(predictions, axis=1)
    real_emotion = np.argmax(test_emotions, axis=1)
    # for i in range(num_samples):
    #     if predictions[i] == real_emotion[i]:
    #         true += 1
    # acc = 100. * true / num_samples
    conf_mat = confusion_matrix(real_emotion, predictions)
    plt.figure(figsize=(5, 4))
    plot_confusion_matrix(
        conf_mat,
        classes=['Sur', 'Fea', 'Dis', 'Hap', 'Sad', 'Ang', 'Neu'],
        normalize=True,
    )
    # title='Normalized Confusion Matrix'
    plt.savefig(os.path.join(save_path,
                             dataset_name + 'Confusion Matrix8-2.pdf'),
                bbox_inches='tight',
                dpi=600)
    plt.show()
    plt.close()
Esempio n. 13
0
    def detect_face(self):
        face_cascade = cv2.CascadeClassifier(self.CASE_PATH)

        # 0 means the default video capture device in OS
        video_capture = cv2.VideoCapture(0)
        # infinite loop, break by key ESC
        while True:
            if not video_capture.isOpened():
                sleep(5)
            # Capture frame-by-frame
            ret, frame = video_capture.read()
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            faces = face_cascade.detectMultiScale(gray,
                                                  scaleFactor=1.2,
                                                  minNeighbors=10,
                                                  minSize=(self.face_size,
                                                           self.face_size))
            # placeholder for cropped faces
            face_imgs = np.empty(
                (len(faces), self.face_size, self.face_size, 3))
            for i, face in enumerate(faces):
                face_img, cropped = self.crop_face(frame,
                                                   face,
                                                   margin=40,
                                                   size=self.face_size)
                (x, y, w, h) = cropped
                cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 128, 0), 2)
                face_imgs[i, :, :, :] = face_img
                x1, x2, y1, y2 = apply_offsets(face, emotion_offsets)
                gray_face = gray[y1:y2, x1:x2]
                try:
                    gray_face = cv2.resize(gray_face, (emotion_target_size))
                except:
                    continue
                #cv2.imshow("gray-face",gray_face)
            if len(face_imgs) > 0:
                results = self.model.predict(face_imgs)
                predicted_genders = results[0]
                ages = np.arange(0, 101).reshape(101, 1)
                predicted_ages = results[1].dot(ages).flatten()
                gray_face = preprocess_input(gray_face, False)
                gray_face = np.expand_dims(gray_face, 0)
                gray_face = np.expand_dims(gray_face, -1)
                emotion_label_arg = np.argmax(
                    emotion_classifier.predict(gray_face))
                emotion_text = emotion_labels[emotion_label_arg]
            # draw results
            for i, face in enumerate(faces):
                label = "{}, {}".format(
                    int(predicted_ages[i]),
                    "F" if predicted_genders[i][0] > 0.5 else "M")
                label = label + ", " + emotion_text.upper()
                self.draw_label(frame, (face[0], face[1]), label)

            cv2.imshow('Detection', frame)
            if cv2.waitKey(5) == 27:  # ESC key press
                break
        # When everything is done, release the capture
        video_capture.release()
        cv2.destroyAllWindows()
Esempio n. 14
0
def main():
    emotion_model_path = '../trained_models/emotion_models/fer2013/fer2013_mini_XCEPTION.102-0.66.hdf5'
    emotion_classifier = load_model(emotion_model_path, compile=False)
    np.set_printoptions(precision=2)
    # loading dataset
    data_loader = DataManager(dataset_name, image_size=input_shape[:2])
    faces, emotions = data_loader.get_data()
    faces = preprocess_input(faces)
    num_samples, num_classes = emotions.shape
    train_data, val_data = split_data(faces, emotions, validation_split)
    val_faces, val_emotions = val_data
    predictions = emotion_classifier.predict(val_faces)
    predictions = np.argmax(predictions, axis=1)
    real_emotions = np.argmax(val_emotions, axis=1)

    conf_mat = confusion_matrix(real_emotions, predictions)
    plt.figure(figsize=(10, 8))
    plot_confusion_matrix(
        conf_mat,
        classes=['Ang', 'Dis', 'Fea', 'Hap', 'Sad', 'Sur', 'Neu'],
        normalize=True)

    # plot_confusion_matrix(conf_mat, classes=['AN', 'DI', 'AF',
    #                                          'HA', 'SA', 'SU', 'NE'],
    #                       normalize=True)
    plt.show()
Esempio n. 15
0
def getFaceDetails(image_np):
    gray_image = cv2.cvtColor(image_np, cv2.COLOR_RGB2GRAY) # the model requires a grayscale image

    faces = detect_faces(face_detection_model, gray_image) # pass in the grayscale image

    if len(faces) > 0: # make sure a face was found
        draw_bounding_box(faces[0], image_np) # draws a square around the face, just for testing purposes
        
        # doing preprocessing on grayscale image, found from the model creator's example
        x1, x2, y1, y2 = apply_offsets(faces[0], emotion_offsets) 
        gray_face = gray_image[y1:y2, x1:x2]
        gray_face = cv2.resize(gray_face, (emotion_target_size))
        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        
        with graph.as_default(): # to prevent graph not found error
            emotion_prediction = emotion_classifier.predict(gray_face) # predict the emotion

        #emotion_probability = np.max(emotion_prediction) # uncomment this line to see the accuracy of the emotion
        
        emotion_label_arg = np.argmax(emotion_prediction)
    
        face_details = [emotion_labels[emotion_label_arg], int(faces[0][0]), int(faces[0][1]), int(faces[0][2]), int(faces[0][3])] # structure to return emotion and face coords
    else:
        face_details = [None, None, None, None, None] # in case no face was found

    thread_storage['face'] = face_details # instead of returning, put it into dictionary
Esempio n. 16
0
def detection(image_path):

    # parameters for loading data and images
    detection_model_path = 'C:\\Users\\l1f15bscs0049\\Desktop\\haarcascade_frontalface_default.xml'

    font = cv2.FONT_HERSHEY_SIMPLEX

    # hyper-parameters for bounding boxes shape
    gender_offsets = (30, 60)
    gender_offsets = (10, 10)

    # loading models
    face_detection = load_detection_model(detection_model_path)

    # loading images
    rgb_image = load_image(image_path, grayscale=False)
    gray_image = load_image(image_path, grayscale=True)
    gray_image = np.squeeze(gray_image)
    gray_image = gray_image.astype('uint8')
    count1 = 0
    faces = detect_faces(face_detection, gray_image)
    for face_coordinates in faces:
        x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
        rgb_face = rgb_image[y1:y2, x1:x2]

        rgb_face = preprocess_input(rgb_face, False)
        rgb_face = np.expand_dims(rgb_face, 0)

        color = (0, 0, 255)
        draw_bounding_box(face_coordinates, rgb_image, color)
        count1 = count1 + 1

    bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
    #cv2.imwrite('C:\\Users\\l1f15bscs0049\\Desktop\\test_cases\\testt2.png', bgr_image)
    return count1
Esempio n. 17
0
def process_image(image):
    results = []
    image_array = np.fromstring(image, np.uint8)
    unchanged_image = cv2.imdecode(image_array, cv2.IMREAD_UNCHANGED)
    rgb_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2RGB)
    gray_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2GRAY)

    faces = detect_faces(face_detection, gray_image)
    for face_coordinates in faces:
        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]
        try:
            gray_face = cv2.resize(gray_face, (emotion_target_size))
        except:
            continue

        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        with graph.as_default():
            emotion_label_arg = np.argmax(
                emotion_classifier.predict(gray_face))
            emotion_text = emotion_labels[emotion_label_arg]
        results.append(emotion_text)
    dic_result = {"emotions": results, "number face": len(faces)}
    return dic_result
Esempio n. 18
0
def process_image(image):
    results = []
    try:
        # loading images
        image_array = np.fromstring(image, np.uint8)
        unchanged_image = cv2.imdecode(image_array, cv2.IMREAD_UNCHANGED)

        rgb_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2RGB)
        gray_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2GRAY)

        faces = detect_faces(face_detection, gray_image)
        for face_coordinates in faces:
            x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
            rgb_face = rgb_image[y1:y2, x1:x2]

            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]

            try:
                rgb_face = cv2.resize(rgb_face, (gender_target_size))
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            rgb_face = preprocess_input(rgb_face, False)
            rgb_face = np.expand_dims(rgb_face, 0)
            gender_prediction = gender_classifier.predict(rgb_face)
            gender_label_arg = np.argmax(gender_prediction)
            gender_text = gender_labels[gender_label_arg]

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_label_arg = np.argmax(
                emotion_classifier.predict(gray_face))
            emotion_text = emotion_labels[emotion_label_arg]

            results.append({
                "coords": face_coordinates.tolist(),
                "gender": gender_text,
                "emotion": emotion_text
            })
    except Exception as err:
        logging.error('Error in emotion gender processor: "{0}"'.format(err))

    return results
def predict(image_path):
    # loading images
    start = time.time()
    rgb_image = load_image(image_path, grayscale=False)
    gray_image = load_image(image_path, grayscale=True)
    gray_image = np.squeeze(gray_image)
    gray_image = gray_image.astype('uint8')
    with graph.as_default():
        faces = detect_faces(face_detection, gray_image)
        for face_coordinates in faces:
            x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
            rgb_face = rgb_image[y1:y2, x1:x2]
            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]
            try:
                rgb_face = cv2.resize(rgb_face, (gender_target_size))
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue
            rgb_face = preprocess_input(rgb_face, False)
            rgb_face = np.expand_dims(rgb_face, 0)
            gender_prediction = gender_classifier.predict(rgb_face)
            gender_label_arg = np.argmax(gender_prediction)
            gender_text = gender_labels[gender_label_arg]
            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
            emotion_text = emotion_labels[emotion_label_arg]

            if gender_text == gender_labels[0]:
                color = (0, 0, 255)
            else:
                color = (255, 0, 0)

            draw_bounding_box(face_coordinates, rgb_image, color)
            draw_text(face_coordinates, rgb_image, gender_text, color, 0, -10, 1, 2)
            draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -40, 1, 2)

        bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
        resized_image = cv2.resize(bgr_image, (1024, 769))
    file_path = 'result/' + image_path.split('/')[-1]
    cv2.imwrite(file_path, resized_image)
    end = time.time()
    logging.info('[Time] for prediction(): {}'.format(end - start))
    return file_path
Esempio n. 20
0
    def get_gender(self, image_path_, face_detection, emotion_classifier,
                   gender_classifier):

        gender_labels = get_labels('imdb')

        # hyper-parameters for bounding boxes shape
        # change for variance
        gender_offsets = (30, 60)
        gender_offsets = (10, 10)
        emotion_offsets = (20, 40)
        emotion_offsets = (0, 0)

        # loading models
        # getting input model shapes for inference
        gender_target_size = gender_classifier.input_shape[1:3]

        # loading images
        rgb_image = load_image(image_path_, grayscale=False)
        gray_image = load_image(image_path_, grayscale=True)
        gray_image = np.squeeze(gray_image)
        gray_image = gray_image.astype('uint8')

        faces = detect_faces(face_detection, gray_image)
        for face_coordinates in faces:
            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]
            rgb_face = rgb_image[y1:y2, x1:x2]

            try:
                rgb_face = cv2.resize(rgb_face, (gender_target_size))
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            rgb_face = preprocess_input(rgb_face, False)
            rgb_face = np.expand_dims(rgb_face, 0)
            gender_prediction = gender_classifier.predict(rgb_face)
            gender_label_arg = np.argmax(gender_prediction)
            gender_text = gender_labels[gender_label_arg]

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)

            return gender_text
def emotion_classificator(image):
    detected_peoples = []
    json_info = {}
    try:
        # parameters for loading data and images
        emotion_labels = get_labels('fer2013')

        # hyper-parameters for bounding boxes shape
        emotion_offsets = (20, 40)
        emotion_offsets = (0, 0)

        # getting input model shapes for inference
        emotion_target_size = _EMOTION_CLASSIFIER.input_shape[1:3]

        start_time_preprocess = datetime.now()
        gray_image = preprocess_image(image, grascale=True)

        faces = detect_faces(_FACE_DETECTION, gray_image)

        delta_preprocess = datetime.now() - start_time_preprocess
        print("delta for preprocess {0}".format(
            delta_preprocess.total_seconds() * 1000.0))
        for face_coordinates in faces:
            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]

            try:
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            start_time = datetime.now()

            with _GRAPH.as_default():
                gray_face = preprocess_input(gray_face, True)
                gray_face = np.expand_dims(gray_face, 0)
                gray_face = np.expand_dims(gray_face, -1)
                emotion_prediction = _EMOTION_CLASSIFIER.predict(gray_face)
                emotion_label_arg = np.argmax(emotion_prediction)
                emotion_text = emotion_labels[emotion_label_arg]

            delta = datetime.now() - start_time
            print("Delta for emotion classificator {0}".format(
                delta.total_seconds() * 1000.0))

            json_info['emotion'] = "{0}:{1}".format(
                emotion_text, emotion_prediction.flat[emotion_label_arg])

            json_info['face_bound'] = list(
                map(lambda it: str(it),
                    list(face_coordinates.astype(np.int).flat)))

            detected_peoples.append(json_info.copy())
    except Exception as err:
        logging.error('Error in emotion processor: "{0}"'.format(err))

    return detected_peoples
Esempio n. 22
0
    def emotionRecog(self, record=False):
        self.emotionRecord = np.zeros(7)
        time_start = time.time()
        time_cost = 0
        while True:
            ret, frame = self.camera.read()
            grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            # 首先检测人脸,返回的是框住人脸的矩形框
            faces = self.faceCascade.detectMultiScale(
                grayFrame,
                scaleFactor=1.1,
                minNeighbors=5,
                minSize=(30, 30),
                flags=cv2.CASCADE_SCALE_IMAGE
            )

            # 画出每一个人脸,提取出人脸所在区域
            for (x, y, w, h) in faces:
                grayFace = grayFrame[y:y+h, x:x+w]

                try:
                    grayFace = cv2.resize(grayFace, (self.emotionTargetSize))
                except:
                    continue

                grayFace = preprocess_input(grayFace, True)
                grayFace = np.expand_dims(grayFace, 0)
                grayFace = np.expand_dims(grayFace, -1)
                emotionPrediction = self.emotionClassifier.predict(grayFace)
                emotionProbability = np.max(emotionPrediction)
                emotionLabelArg = np.argmax(emotionPrediction)
                emotionText = self.emotionLabels[emotionLabelArg]

                if emotionText == 'angry':
                    color = emotionProbability * np.asarray((255, 0, 0))
                elif emotionText == 'sad':
                    color = emotionProbability * np.asarray((0, 0, 255))
                elif emotionText == 'happy':
                    color = emotionProbability * np.asarray((255, 255, 0))
                elif emotionText == 'surprise':
                    color = emotionProbability * np.asarray((0, 255, 255))
                else:
                    color = emotionProbability * np.asarray((0, 255, 0))

                # 标明心情 框出人脸
                cv2.rectangle(frame, (x, y), (x+w, y+h), color, 2)
                if record:
                    self.emotionRecord[emotionLabelArg] += 1
                    time_cost = time.time() - time_start
                    cv2.putText(frame, emotionText+' %ds' % (5-time_cost+1), (x, y - 7), 3, 1.2, color, 2, cv2.LINE_AA)
                else:
                    cv2.putText(frame, emotionText, (x, y - 7), 3, 1.2, color, 2, cv2.LINE_AA)
            if cv2.waitKey(85) & 0xff == ord('q') | self.runCommand != self.RUN_EMOTION_RECOG:
                break
            if time_cost > 5:
                break
            self.display(frame)
Esempio n. 23
0
def recognition(f_2_s):

    # parameters for loading data and images
    #image_path = path_r
    image_path = image_handler(f_2_s)
    detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
    emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
    emotion_labels = get_labels('fer2013')
    font = cv2.FONT_HERSHEY_SIMPLEX

    # hyper-parameters for bounding boxes shape
    emotion_offsets = (20, 40)
    emotion_offsets = (0, 0)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]

    # loading images
    rgb_image = load_image(image_path, grayscale=False)
    gray_image = load_image(image_path, grayscale=True)
    gray_image = np.squeeze(gray_image)
    gray_image = gray_image.astype('uint8')

    faces = detect_faces(face_detection, gray_image)
    for face_coordinates in faces:

        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]

        try:
            gray_face = cv2.resize(gray_face, (emotion_target_size))
        except:
            continue

        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
        emotion_text = ""
        emotion_text = emotion_labels[emotion_label_arg]

        color = (255, 0, 0)  # 감정 정보 글씨 빨간색, 사각형도

        draw_bounding_box(face_coordinates, rgb_image, color)
        draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -30,
                  1.5, 2)
    #if(emotion_text == ""):
    #recognition(f_2_s)

    bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
    cv2.imwrite('../images/predicted_test_image.png', bgr_image)  # 변수 활용
    check_recoged_img('../images/predicted_test_image.png')
Esempio n. 24
0
def final_ml_predict(bgr_image):
    gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
    rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)

    faces = face_cascade.detectMultiScale(gray_image,
                                          scaleFactor=1.1,
                                          minNeighbors=5,
                                          minSize=(30, 30),
                                          flags=cv2.CASCADE_SCALE_IMAGE)

    for face_coordinates in faces:

        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]
        try:
            gray_face = cv2.resize(gray_face, (emotion_target_size))
        except:
            continue

        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        with graph.as_default():
            emotion_prediction = emotion_classifier.predict(gray_face)
        emotion_probability = np.max(emotion_prediction)
        emotion_label_arg = np.argmax(emotion_prediction)
        emotion_text = emotion_labels[emotion_label_arg]
        emotion_window.append(emotion_text)

        if len(emotion_window) > frame_window:
            emotion_window.pop(0)
        try:
            emotion_mode = mode(emotion_window)
        except:
            continue

        if emotion_text == 'angry':
            color = emotion_probability * np.asarray((255, 0, 0))
        elif emotion_text == 'sad':
            color = emotion_probability * np.asarray((0, 0, 255))
        elif emotion_text == 'happy':
            color = emotion_probability * np.asarray((255, 255, 0))
        elif emotion_text == 'surprise':
            color = emotion_probability * np.asarray((0, 255, 255))
        else:
            color = emotion_probability * np.asarray((0, 255, 0))

        color = color.astype(int)
        color = color.tolist()

        draw_bounding_box(face_coordinates, rgb_image, color)
        draw_text(face_coordinates, rgb_image, emotion_mode, color, 0, -45, 1,
                  1)

    bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
    return bgr_image
Esempio n. 25
0
def emotionimg(img):
    # bgr_image = video_capture.read()[1]
    gray_face = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    gray_face = cv2.resize(gray_face, (emotion_target_size))
    gray_face = preprocess_input(gray_face, True)
    gray_face = np.expand_dims(gray_face, 0)
    gray_face = np.expand_dims(gray_face, -1)
    emotion_prediction = emotion_classifier.predict(gray_face)
    emotion_label_arg = np.argmax(emotion_prediction)
    emotion_text = emotion_labels[emotion_label_arg]
    return emotion_text, emotion_prediction
Esempio n. 26
0
def detect_emotion(frame,model,emotion_classifier,emotion_target_size,face_cascade,frame_window,emotion_offsets,emotion_labels):
    gray_image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(gray_image, scaleFactor=1.1, minNeighbors=5,minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE)

    for face_coordinates in faces:

        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
        # gray_face = gray_image[y1:y2, x1:x2]
        # gray_c = cv2.cvtColor(gray_image[y1:y2, x1:x2], cv2.COLOR_BGR2GRAY)
        gray_c = gray_image[y1:y2, x1:x2]

        try:
            gray_face = cv2.resize(gray_c, (emotion_target_size))
        except:
            continue

        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        # print(gray_face)
        emotion_prediction = emotion_classifier.predict(gray_face)
        
        
        emotion_probability =round( np.max(emotion_prediction),2)
        emotion_label_arg = np.argmax(emotion_prediction)
        emotion_text = emotion_labels[emotion_label_arg]
        emotion_window.append(emotion_text)
        # print(emotion_text)
        # print(emotion_window)

        if len(emotion_window) > frame_window:
            emotion_window.pop(0)
        try:
            emotion_mode = mode(emotion_window)
        except:
            continue

        # if emotion_text == 'angry':
        #     color = emotion_probability * np.asarray((255, 0, 0))
        # elif emotion_text == 'sad':
        #     color = emotion_probability * np.asarray((0, 0, 255))
        # elif emotion_text == 'happy':
        #     # print (emotion_probability)
        #     color = emotion_probability * np.asarray((255, 255, 0))
        # elif emotion_text == 'surprise':
        #     color = emotion_probability * np.asarray((0, 255, 255))
        # else:
        #     color = emotion_probability * np.asarray((0, 255, 0))

        # color = color.astype(int)
        # color = color.tolist()
        color=(255,255,255)
        draw_text(emotion_probability,face_coordinates, frame, emotion_mode,color, 0, -20, 0.5, 1)
Esempio n. 27
0
def getEmotion(x1, x2, y1, y2, gray):
    gray_image = gray.copy()
    gray_face = gray_image[y1:y2, x1:x2]
    gray_face = cv2.resize(gray_face, (emotion_target_size))
    gray_face = preprocess_input(gray_face, True)
    gray_face = np.expand_dims(gray_face, 0)
    gray_face = np.expand_dims(gray_face, -1)
    emotion_prediction = emotion_classifier.predict(gray_face)
    emotion_probability = np.max(emotion_prediction)
    emotion_label_arg = np.argmax(emotion_prediction)
    emotion_text = emotion_labels[emotion_label_arg]
    return emotion_text
Esempio n. 28
0
def emotion():
    detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
    emotion_model_path = '../trained_models/emotion_models/fer2013_big_XCEPTION.54-0.66.hdf5'
    emotion_labels = get_labels('fer2013')

    # hyper-parameters for bounding boxes shape
    frame_window = 10
    emotion_offsets = (20, 40)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]

    # starting lists for calculating modes
    emotion_window = []
    emotion_count_array = [0 for x in range(7)];

    # starting video streaming
    #cv2.namedWindow('window_frame')
    video_capture = cv2.VideoCapture(0)
    duration = 10;
    start = time.time();
    while (time.time() - start) < duration:
        bgr_image = video_capture.read()[1];
        gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
        rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
        faces = detect_faces(face_detection, gray_image)

        for face_coordinates in faces:

            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]
            try:
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_prediction = emotion_classifier.predict(gray_face)
            emotion_probability = np.max(emotion_prediction)
            emotion_label_arg = np.argmax(emotion_prediction)
            emotion_text = emotion_labels[emotion_label_arg]

            emotion_count_array[emotion_label_arg] = emotion_count_array[emotion_label_arg] + 1;

    chosen = np.argmax(emotion_count_array);

    return emotion_labels[chosen]
Esempio n. 29
0
def analyse_image(image_path):

    print(image_path)
    print(emotion_labels)

    image = cv2.imread(image_path)

    gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

    faces = face_cascade.detectMultiScale(gray_image,
                                          scaleFactor=1.3,
                                          minNeighbors=5,
                                          minSize=(30, 30),
                                          flags=cv2.CASCADE_SCALE_IMAGE)

    if len(faces) == 0:
        return {'error': 'No faces found!'}

    print('faces found:' + str(len(faces)))

    for face_coordinates in faces:
        try:
            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]

            try:
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_prediction = emotion_classifier.predict(gray_face)

            # custom_prediction = emotion_prediction[0]
            # prediction_dict = {}
            # for i in range(0, len(custom_prediction)):
            #   prediction_dict[emotion_labels[i]] = custom_prediction[i]

            # emotion_probability = np.max(emotion_prediction)
            # emotion_label_arg = np.argmax(emotion_prediction)
            # emotion_text = emotion_labels[emotion_label_arg]

            if len(emotion_prediction) > 0:
                return {'result': ','.join(map(str, emotion_prediction[0]))}
            else:
                return {'error': 'no predictions!'}
        except:
            # danger stuff
            return {'error': 'unknown error'}
def gender_classificator(image):
    detected_peoples = []
    json_info = {}
    try:
        # parameters for loading data and images
        gender_labels = get_labels('imdb')

        # hyper-parameters for bounding boxes shape
        gender_offsets = (30, 60)
        gender_offsets = (10, 10)

        # getting input model shapes for inference
        gender_target_size = _GENDER_CLASSIFIER.input_shape[1:3]

        rgb_image = preprocess_image(image)
        gray_image = preprocess_image(image, grascale=True)

        faces = detect_faces(_FACE_DETECTION, gray_image)
        for face_coordinates in faces:
            x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
            rgb_face = rgb_image[y1:y2, x1:x2]

            try:
                rgb_face = cv2.resize(rgb_face, (gender_target_size))
            except:
                continue

            start_time = datetime.now()

            with _GRAPH.as_default():
                rgb_face = preprocess_input(rgb_face, False)
                rgb_face = np.expand_dims(rgb_face, 0)
                gender_prediction = _GENDER_CLASSIFIER.predict(rgb_face)
                gender_label_arg = np.argmax(gender_prediction)
                gender_text = gender_labels[gender_label_arg]

            delta = datetime.now() - start_time
            print("Delta for gender classificator {0}".format(
                delta.total_seconds() * 1000.0))

            json_info['gender'] = "{0}:{1}".format(
                gender_text, gender_prediction.flat[gender_label_arg])

            json_info['face_bound'] = list(
                map(lambda it: str(it),
                    list(face_coordinates.astype(np.int).flat)))

            detected_peoples.append(json_info.copy())
    except Exception as err:
        logging.error('Error in gender processor: "{0}"'.format(err))

    return detected_peoples
def process_image(image):

    try:
        # parameters for loading data and images
        detection_model_path = './trained_models/detection_models/haarcascade_frontalface_default.xml'
        emotion_model_path = './trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
        emotion_labels = get_labels('fer2013')

        # hyper-parameters for bounding boxes shape
        emotion_offsets = (0, 0)

        # loading models
        face_detection = load_detection_model(detection_model_path)
        emotion_classifier = load_model(emotion_model_path, compile=False)

        # getting input model shapes for inference
        emotion_target_size = emotion_classifier.input_shape[1:3]

        # loading images
        image_array = np.fromstring(image, np.uint8)
        unchanged_image = cv2.imdecode(image_array, cv2.IMREAD_UNCHANGED)
        gray_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2GRAY)

        faces = detect_faces(face_detection, gray_image)
        detected_emotions = []
        for face_coordinates in faces:
            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]

            try:
                gray_face = cv2.resize(gray_face, emotion_target_size)
            except:
                continue

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
            detected_emotions.append({
                "coordinates": [str(y1), str(x2), str(y2), str(x1)],
                "emotion": emotion_labels[emotion_label_arg]})

        return detected_emotions

    except Exception as err:
        logging.error('Error in emotion gender processor: "{0}"'.format(err))

datasets = ['fer2013']
for dataset_name in datasets:
    print('Training dataset:', dataset_name)

    # callbacks
    log_file_path = base_path + dataset_name + '_emotion_training.log'
    csv_logger = CSVLogger(log_file_path, append=False)
    early_stop = EarlyStopping('val_loss', patience=patience)
    reduce_lr = ReduceLROnPlateau('val_loss', factor=0.1,
                                  patience=int(patience/4), verbose=1)
    trained_models_path = base_path + dataset_name + '_mini_XCEPTION'
    model_names = trained_models_path + '.{epoch:02d}-{val_acc:.2f}.hdf5'
    model_checkpoint = ModelCheckpoint(model_names, 'val_loss', verbose=1,
                                                    save_best_only=True)
    callbacks = [model_checkpoint, csv_logger, early_stop, reduce_lr]

    # loading dataset
    data_loader = DataManager(dataset_name, image_size=input_shape[:2])
    faces, emotions = data_loader.get_data()
    faces = preprocess_input(faces)
    num_samples, num_classes = emotions.shape
    train_data, val_data = split_data(faces, emotions, validation_split)
    train_faces, train_emotions = train_data
    model.fit_generator(data_generator.flow(train_faces, train_emotions,
                                            batch_size),
                        steps_per_epoch=len(train_faces) / batch_size,
                        epochs=num_epochs, verbose=1, callbacks=callbacks,
                        validation_data=val_data)
        def callback(self,data):
                    #/////////////////////////////////////////////
                    #cv_image = bridge.imgmsg_to_cv2(image_msg, desired_encoding="passthrough")
                    #cv_image = cv2.resize(cv_image, target_size)  # resize image
                    #np_image = np.asarray(cv_image)               # read as np array
                    #np_image = np.expand_dims(np_image, axis=0)   # Add another dimension for tensorflow
                    #np_image = np_image.astype(float)  # preprocess needs float64 and img is uint8
                    #np_image = preprocess_input(np_image)         # Regularize the data
                    #/////////////////////////////////////////////
                    if(not USE_LOCAL_CAMERA):                                                    
                        try:
                                frame = self.bridge.imgmsg_to_cv2(data, desired_encoding="passthrough")
                        except CvBridgeError as e:
                                print(e)

                    # Capture frame-by-frame
                    if(USE_LOCAL_CAMERA):
                        ret, frame1 = self.video_capture.read()
                    #/////////////////////////////////////////////    
                    
                    
                    
                    #print"--------"
                    #print('input_msg height  : {}'.format(frame.height))
                    #print('input_msg width   : {}'.format(frame.width))
                    #print('input_msg step    : {}'.format(frame.step))
                    #print('input_msg encoding: {}'.format(frame.encoding))
                    #print('output dtype      : {}'.format(frame.dtype))
                    #print('output shape      : {}'.format(frame.shape))
                    #print"--------"
                   
                    
                    gray_image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                    rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                    faces = detect_faces(self.face_detection, gray_image)

                    for face_coordinates in faces:
                            print "inside the for loop!"
                            print face_coordinates

                            x1, x2, y1, y2 = apply_offsets(face_coordinates, self.emotion_offsets)
                            gray_face = gray_image[y1:y2, x1:x2]
                            #print len(frame)
                            #print gray_face.size
                            #print gray_face.shape
                            #print gray_face.dtype

                            try:
                                    gray_face = cv2.resize(gray_face, (self.emotion_target_size))
                            except:
                                    continue

                    
                            gray_face = preprocess_input(gray_face, True)
                            gray_face = np.expand_dims(gray_face, 0)
                            gray_face = np.expand_dims(gray_face, -1)
                            
                            #print"************"
                            #print('gray_face dtype      : {}'.format(gray_face.dtype))
                            #print('gray_face shape      : {}'.format(gray_face.shape))
                            #print('gray_face size      : {}'.format(gray_face.size))
                            #print"************"


                            ## This is a workaround for asynchronous execution using TF and ROS
                            # https://github.com/keras-team/keras/issues/2397
                            # http://projectsfromtech.blogspot.com/2017/10/visual-object-recognition-in-ros-using.html
                            #global self.graph                                  
                            with self.graph.as_default():
                                emotion_prediction = self.emotion_classifier.predict(gray_face)
                                emotion_probability = np.max(emotion_prediction)
                                emotion_label_arg = np.argmax(emotion_prediction)
                                emotion_text = self.emotion_labels[emotion_label_arg]	
                                print emotion_text
                                print(emotion_probability)
                                print('%')
                                self.emotion_window.append(emotion_text)

                
                                #self.emotion_msg.data = emotion_text
                                #self.emotion_publisher.publish(emotion_msg)
                                #self.speech_msg.data = 'I see that you are ' + emotion_text
                                #self.speech_publisher.publish(speech_msg)

                                if len(self.emotion_window) > self.frame_window:
                                    self.emotion_window.pop(0)
                                try:
                                    emotion_mode = mode(self.emotion_window)
                                except:
                                    continue

                                if emotion_text == 'angry':
                                    color = emotion_probability * np.asarray((255, 0, 0))
                                elif emotion_text == 'sad':
                                    color = emotion_probability * np.asarray((0, 0, 255))
                                elif emotion_text == 'happy':
                                    color = emotion_probability * np.asarray((255, 255, 0))
                                elif emotion_text == 'surprise':
                                    color = emotion_probability * np.asarray((0, 255, 255))
                                else:
                                    color = emotion_probability * np.asarray((0, 255, 0))

                                color = color.astype(int)
                                color = color.tolist()

                                draw_bounding_box(face_coordinates, rgb_image, color)
                                draw_text(face_coordinates, rgb_image, emotion_mode,
                                        color, 0, -45, 1, 1)
                         
                    try:
                        self.image_pub.publish(self.bridge.cv2_to_imgmsg(rgb_image, "bgr8"))
                    except CvBridgeError as e:
                        print(e)
def process_image(image):

    try:
        # parameters for loading data and images
        detection_model_path = './trained_models/detection_models/haarcascade_frontalface_default.xml'
        emotion_model_path = './trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
        gender_model_path = './trained_models/gender_models/simple_CNN.81-0.96.hdf5'
        emotion_labels = get_labels('fer2013')
        gender_labels = get_labels('imdb')
        font = cv2.FONT_HERSHEY_SIMPLEX

        # hyper-parameters for bounding boxes shape
        gender_offsets = (30, 60)
        gender_offsets = (10, 10)
        emotion_offsets = (20, 40)
        emotion_offsets = (0, 0)

        # loading models
        face_detection = load_detection_model(detection_model_path)
        emotion_classifier = load_model(emotion_model_path, compile=False)
        gender_classifier = load_model(gender_model_path, compile=False)

        # getting input model shapes for inference
        emotion_target_size = emotion_classifier.input_shape[1:3]
        gender_target_size = gender_classifier.input_shape[1:3]

        # loading images
        image_array = np.fromstring(image, np.uint8)
        unchanged_image = cv2.imdecode(image_array, cv2.IMREAD_UNCHANGED)

        rgb_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2RGB)
        gray_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2GRAY)

        faces = detect_faces(face_detection, gray_image)
        for face_coordinates in faces:
            x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
            rgb_face = rgb_image[y1:y2, x1:x2]

            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]

            try:
                rgb_face = cv2.resize(rgb_face, (gender_target_size))
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue

            rgb_face = preprocess_input(rgb_face, False)
            rgb_face = np.expand_dims(rgb_face, 0)
            gender_prediction = gender_classifier.predict(rgb_face)
            gender_label_arg = np.argmax(gender_prediction)
            gender_text = gender_labels[gender_label_arg]

            gray_face = preprocess_input(gray_face, True)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
            emotion_text = emotion_labels[emotion_label_arg]

            if gender_text == gender_labels[0]:
                color = (0, 0, 255)
            else:
                color = (255, 0, 0)

            draw_bounding_box(face_coordinates, rgb_image, color)
            draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20, 1, 2)
            draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50, 1, 2)
    except Exception as err:
        logging.error('Error in emotion gender processor: "{0}"'.format(err))

    bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)

    dirname = 'result'
    if not os.path.exists(dirname):
        os.mkdir(dirname)

    cv2.imwrite(os.path.join(dirname, 'predicted_image.png'), bgr_image)
  def callback(self,data):
    try:
      bgr_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
      cv2.imshow("Image window", bgr_image)
    except CvBridgeError as e:
      print(e)



######################### Start the image processing
    gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
    rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
    faces = detect_faces(self.face_detection, gray_image)

    for face_coordinates in faces:

        x1, x2, y1, y2 = apply_offsets(face_coordinates, self.emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]
	
        try:
            gray_face = cv2.resize(gray_face, (self.emotion_target_size))
        except:
            continue

        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        print "start"
        print len(gray_face)
        
        print gray_face
        print "end"

        emotion_prediction = self.emotion_classifier.predict(gray_face)
        emotion_probability = np.max(emotion_prediction)
        emotion_label_arg = np.argmax(emotion_prediction)
        emotion_text = emotion_labels[emotion_label_arg]	
	print emotion_text
	print(emotion_probability)
	print('%')
        self.emotion_window.append(emotion_text)

	


        if len(self.emotion_window) > self.frame_window:
            self.emotion_window.pop(0)
        try:
            emotion_mode = mode(self.emotion_window)
        except:
            continue

        if emotion_text == 'angry':
            color = emotion_probability * np.asarray((255, 0, 0))
        elif emotion_text == 'sad':
            color = emotion_probability * np.asarray((0, 0, 255))
        elif emotion_text == 'happy':
            color = emotion_probability * np.asarray((255, 255, 0))
        elif emotion_text == 'surprise':
            color = emotion_probability * np.asarray((0, 255, 255))
        else:
            color = emotion_probability * np.asarray((0, 255, 0))

        color = color.astype(int)
        color = color.tolist()

        draw_bounding_box(face_coordinates, rgb_image, color)
        draw_text(face_coordinates, rgb_image, emotion_mode,
                  color, 0, -45, 1, 1)

    bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
    
######################### End the image processing	



    cv2.imshow("Image window", bgr_image)
    cv2.waitKey(3)

    try:
      self.image_pub.publish(self.bridge.cv2_to_imgmsg(bgr_image, "bgr8"))
    except CvBridgeError as e:
      print(e)
def emotion_identify(img_url):
    # parameters for loading data and images

    detection_model_path = 'C:/Users/Admin/PycharmProjects/Emotion_Detection/trained_models/detection_models/haarcascade_frontalface_default.xml'
    emotion_model_path = 'C:/Users/Admin/PycharmProjects/Emotion_Detection/trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
    gender_model_path = 'C:/Users/Admin/PycharmProjects/Emotion_Detection/trained_models/gender_models/simple_CNN.81-0.96.hdf5'
    emotion_labels = get_labels('fer2013')
    gender_labels = get_labels('imdb')
    font = cv2.FONT_HERSHEY_SIMPLEX

    # hyper-parameters for bounding boxes shape
    gender_offsets = (30, 60)
    gender_offsets = (10, 10)
    emotion_offsets = (20, 40)
    emotion_offsets = (0, 0)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)
    gender_classifier = load_model(gender_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]
    gender_target_size = gender_classifier.input_shape[1:3]
    # loading images
    image_path = img_url
    rgb_image = load_image(image_path, grayscale=False)
    gray_image = load_image(image_path, grayscale=True)
    gray_image = np.squeeze(gray_image)
    gray_image = gray_image.astype('uint8')

    faces = detect_faces(face_detection, gray_image)
    if len(faces) == 0:
        print("No face")
        K.clear_session()
        return False

    emotions = collections.defaultdict(int)
    for face_coordinates in faces:
        x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
        rgb_face = rgb_image[y1:y2, x1:x2]

        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]

        try:
            rgb_face = cv2.resize(rgb_face, (gender_target_size))
            gray_face = cv2.resize(gray_face, (emotion_target_size))
        except:
            continue

        rgb_face = preprocess_input(rgb_face, False)
        rgb_face = np.expand_dims(rgb_face, 0)
        gender_prediction = gender_classifier.predict(rgb_face)
        gender_label_arg = np.argmax(gender_prediction)
        gender_text = gender_labels[gender_label_arg]

        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
        emotion_text = emotion_labels[emotion_label_arg]
        emotions[emotion_text] += 1
        if gender_text == gender_labels[0]:
            color = (0, 0, 255)
        else:
            color = (255, 0, 0)

        draw_bounding_box(face_coordinates, rgb_image, color)
        draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20, 1, 2)
        draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50, 1, 2)
    max_num = 0
    max_emotion = None
    for key, value in emotions.items():
        if value > max_num:
            max_num = value
            max_emotion = key
    print("The emotion of this picture is: ", max_emotion)
    bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
    cv2.imwrite('./result_images/predicted_test_image.png', bgr_image)
    K.clear_session()
    return max_emotion
while True:
    bgr_image = video_capture.read()[1]
    gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
    rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
    faces = detect_faces(face_detection, gray_image)

    for face_coordinates in faces:

        x1, x2, y1, y2 = apply_offsets(face_coordinates, offsets)
        gray_face = gray_image[y1:y2, x1:x2]
        try:
            gray_face = cv2.resize(gray_face, (target_size))
        except:
            continue

        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        guided_gradCAM = calculate_guided_gradient_CAM(gray_face,
                            gradient_function, saliency_function)
        guided_gradCAM = cv2.resize(guided_gradCAM, (x2-x1, y2-y1))
        try:
            rgb_guided_gradCAM = np.repeat(guided_gradCAM[:, :, np.newaxis],
                                                                3, axis=2)
            rgb_image[y1:y2, x1:x2, :] = rgb_guided_gradCAM
        except:
            continue
        draw_bounding_box((x1, y1, x2 - x1, y2 - y1), rgb_image, color)
    bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
    try:
        cv2.imshow('window_frame', bgr_image)
faces = detect_faces(face_detection, gray_image)
for face_coordinates in faces:
    x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
    rgb_face = rgb_image[y1:y2, x1:x2]

    x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
    gray_face = gray_image[y1:y2, x1:x2]

    try:
        rgb_face = cv2.resize(rgb_face, (gender_target_size))
        gray_face = cv2.resize(gray_face, (emotion_target_size))
    except:
        continue

    rgb_face = preprocess_input(rgb_face, False)
    rgb_face = np.expand_dims(rgb_face, 0)
    gender_prediction = gender_classifier.predict(rgb_face)
    gender_label_arg = np.argmax(gender_prediction)
    gender_text = gender_labels[gender_label_arg]

    gray_face = preprocess_input(gray_face, True)
    gray_face = np.expand_dims(gray_face, 0)
    gray_face = np.expand_dims(gray_face, -1)
    emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
    emotion_text = emotion_labels[emotion_label_arg]

    if gender_text == gender_labels[0]:
        color = (0, 0, 255)
    else:
        color = (255, 0, 0)