예제 #1
0
    def analysisEmotion():
        logger.info('Emotion Request')
        try:
            imageData = request.get_json()['image']
            emotion = prediction_path(imageData)
            emotion_request_json = jsonify({'analysis_result': emotion})

            logger.info('Result : ' + emotion)
            return emotion_request_json
        except Exception as e:
            logger.error(e)
예제 #2
0
def video_emotions(path):
    # load keras model
    emotion_model_path = 'models/model.hdf5'
    detection_model_path = 'haarcascades/haarcascade_frontalface_default.xml'
    face_detection = cv2.CascadeClassifier(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)
    print('Model loaded')

    # save location for image
    save_loc = 'save_loc/1.jpg'
    # numpy matrix for stroing prediction
    result = np.array((1, 7))
    # for knowing whether prediction has started or not
    once = False
    # list of given emotions
    EMOTIONS = [
        'Angry', 'Disgusted', 'Fearful', 'Happy', 'Sad', 'Surprised', 'Neutral'
    ]

    # store the emoji coreesponding to different emotions
    emoji_faces = []
    for index, emotion in enumerate(EMOTIONS):
        emoji_faces.append(cv2.imread('emojis/' + emotion.lower() + '.png',
                                      -1))

    # set video capture device , webcam in this case
    video_capture = cv2.VideoCapture(path)
    video_capture.set(3, 640)  # WIDTH
    video_capture.set(4, 480)  # HEIGHT

    # save current time
    prev_time = time.time()

    # start webcam feed
    while True:
        # Capture frame-by-frame
        ret, frame = video_capture.read()
        cv2.imwrite(save_loc, frame)
        frame = prediction_path(save_loc)
        # Display the resulting frame
        cv2.imshow('Video', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything is done, release the capture
    video_capture.release()
    cv2.destroyAllWindows()
예제 #3
0
    def analysisEmotionAndReturnRecommends():
        logger.info('Recommendations Request')
        try:
            imageData = request.get_json()['image']
            path = request.get_json()['path']
            emotion = prediction_path(imageData)
            recommended_list = recommend_by_emotion(emotion, path)
            emotion_request_json = jsonify({
                'header': 'EMOTIONAL_STATE',
                'analysis_result': emotion,
                'recommended_list': recommended_list
            })

            logger.info('Result : ' + emotion)
            logger.info('Recommends : ' + recommended_list)
            return emotion_request_json
        except Exception as e:
            logger.error(e)
예제 #4
0
파일: __init__.py 프로젝트: Synez7/TER_S6
def run_detection_path(path_img, path_text):
    prediction_path(path_img, path_text)
예제 #5
0
파일: __init__.py 프로젝트: Synez7/TER_S6
from prediction_utils import prediction_path
import sys


# for running emotion detection
def read_emotion(path):
    get_current_emotion(path)


# to run emotion detection on image saved on disk
def run_detection_path(path_img, path_text):
    prediction_path(path_img, path_text)


if __name__ == '__main__':
    if len(sys.argv) == 3:
        prediction_path(sys.argv[1], sys.argv[2])
    elif len(sys.argv) == 4:
        prediction_path(sys.argv[1], sys.argv[2], sys.argv[3])
    else:
        print(
            "--------------------\nEmotion Recognition:\n--------------------")
        print("python ./__init__.py [path_img] [path_text]")
        print(
            "[path_img]: the path of the image of which emotion need be detected"
        )
        print(
            "[path_text]: the path of the text file in which result will be stored\n"
        )
        print("[name_model]: (optional) the name of the model to use\n")
예제 #6
0
파일: main.py 프로젝트: Hushout/TER_S6
def run_detection_path(path):
    prediction_path(path)
def run_detection_path(path):
    img = prediction_path(path)
    imgplot = plt.imshow(img)
    plt.show()