示例#1
0
def dashboard():
    if 'email' not in session:
        return redirect(
            url_for('login'))  #requires an account to access this page
    resume = True
    photo = True
    emotions = False
    photo_feedback = False
    resume_feedback = False
    score = False
    error = False
    if request.method == 'POST':
        user_info = users.find_one({'email': session['email']})
        if 'resume' in request.form:
            if user_info['resume'] != '':
                resume_data = user_info['resume']
                fh = open("sentiment_results.txt", 'w')
                parsed_output = json.loads(resume_data)
                fh.write(
                    json.dumps(parsed_output["DocumentElement"]["Resume"]
                               ["Experience"],
                               indent=4,
                               sort_keys=True))
                fh.close()
                results = sentiment.analyze("sentiment_results.txt")
                score = results[0]
                resume_feedback = analyzeResume(score)
            else:
                resume = False
        if 'photo' in request.form:
            if fs.exists(user_info['image']):
                image_data = fs.get(user_info['image'])
                f = open(UPLOAD_FOLDER + user_photo, 'wb')
                f.write(image_data.read())
                f.close()
                emotions = detect_face.detect_faces(UPLOAD_FOLDER + user_photo)
                photo_feedback = analyzeRelease(
                    emotions["anger"], emotions["joy"], emotions["surprise"],
                    emotions["blurred"],
                    emotions["headwear"]) if emotions else False
                error = False if photo_feedback else True

            else:
                photo = False
    return render_template("dashboard.html",
                           emotions=emotions,
                           photo=photo,
                           score=score,
                           resume=resume,
                           photo_feedback=photo_feedback,
                           resume_feedback=resume_feedback,
                           error=error)
示例#2
0
def extractface():
    responseArray = {}
    if request.method == 'POST':
        images = request.files.to_dict()
        for image in images:  #image will be the key
            print(images[image])  #this line will print value for the image key
            file_name = images[image].filename
            extension = os.path.splitext(file_name)[1]
            f_name = str(uuid.uuid4()) + extension
            images[image].save(os.path.join('./source', f_name))
            #responseArray.append(detect_face.detect_faces(f_name))
            responseArray[f_name] = detect_face.detect_faces('./source/' +
                                                             f_name)

        return jsonify(responseArray)
    else:
        return jsonify(msg='error')
示例#3
0
def vid2frames(path=datadir + "savee/AudioVisualClip/DC/a1.avi",
               subject="DC",
               vid_label="a1"):
    # noinspection PyArgumentList
    cap = cv2.VideoCapture(path)
    # print(cap)
    n = 0
    while cap.isOpened():
        # Capture frame-by-frame
        ret, frame = cap.read()
        # print(str(n) +"\r")
        if ret is True:

            # Display the resulting frame
            # cv2.imshow('Frame', frame)
            frame = get_largest_face(
                frame,
                detect_faces(
                    cv2.CascadeClassifier('lbpcascade_frontalface.xml'),
                    frame))
            try:
                frame = cv2.resize(frame, (48, 48),
                                   interpolation=cv2.INTER_CUBIC)
            except cv2.error as e:
                frame = np.zeros((48, 48))
            cv2.imwrite(
                datadir + 'frames/' + subject + '_' + vid_label + '_' +
                str(n).zfill(5) + '.png', frame)
            n = n + 1
            # print(n)
            # Press Q on keyboard to  exit
            if cv2.waitKey(25) & 0xFF == ord('q'):
                break

        # Break the loop
        else:
            break

    # When everything done, release the video capture object
    cap.release()

    # Closes all the frames
    cv2.destroyAllWindows()
示例#4
0
def show_webcam_and_run(model,
                        emoticons,
                        window_size=None,
                        window_name='webcam',
                        update_time=10):
    """
    Shows webcam image, detects faces and its emotions in real time and draw emoticons over those faces.
    :param model: Learnt emotion detection model.
    :param emoticons: List of emotions images.
    :param window_size: Size of webcam image window.
    :param window_name: Name of webcam image window.
    :param update_time: Image update time interval.
    """
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    if window_size:
        width, height = window_size
        cv2.resizeWindow(window_name, width, height)

    vc = cv2.VideoCapture(0)
    if vc.isOpened():
        read_value, webcam_image = vc.read()
    else:
        print("webcam not found")
        return

    while read_value:
        for normalized_face, (x, y, w, h) in detect_faces(webcam_image):
            prediction = model.predict(normalized_face)  # do prediction
            if cv2.__version__ != '3.1.0':
                prediction = prediction[0]

            image_to_draw = emoticons[prediction]
            draw_with_alpha(webcam_image, image_to_draw, (x, y, w, h))

        cv2.imshow(window_name, webcam_image)
        read_value, webcam_image = vc.read()
        key = cv2.waitKey(update_time)

        if key == 27:  # exit on ESC
            break

    cv2.destroyWindow(window_name)
def extract_faces(emotions):
    """
    Crops faces in emotions images.
    :param emotions: List of emotions names.
    """
    print("Extracting faces")
    for emotion in emotions:

        photos = glob.glob('data/sorted_set/%s/*' % emotion)

        for file_number, photo in enumerate(photos):
            frame = cv2.imread(photo)
            normalized_faces = detect_faces(frame)
            os.remove(photo)

            for face in normalized_faces:
                try:
                    cv2.imwrite("data/sorted_set/%s/%s.png" % (emotion, file_number + 1), face[0])  # write image
                except:
                    print("error in processing %s" % photo)
def video_capture(image_based=False):
    cv2.namedWindow("exit on ESC")
    # to capture video from cv2
    # fourcc = cv2.VideoWriter_fourcc(*'XVID')
    # out = cv2.VideoWriter('output.avi', fourcc, (640,
    vc = cv2.VideoCapture(0)
    frame = 0
    video = []
    temp_video = []
    fill = np.zeros(shape)

    if vc.isOpened():  # try to get the first frame
        rval, frame = vc.read()
    else:
        rval = False
    f = cv2.flip(frame, 1)
    frames_passed = 0
    frame_rate = 2
    plot = frame[-120:, 0:120]
    (angry, disgust, fear, happy, neutral, sad, surprise) = (0, 0, 0, 0, 0, 0,
                                                             0)
    while rval:
        cv2.imshow("exit on ESC", f)
        rval, frame = vc.read()
        # tilt optimization req
        # temp = crop_rot_images(frame, lbp_face_cascade,draw_face=True)
        # if frames_passed % frame_rate == 0:
        temp = get_largest_face(
            frame,
            detect_faces(cv2.CascadeClassifier('lbpcascade_frontalface.xml'),
                         frame),
            draw_face=True)
        # temp = draw_faces(frame, detect_faces(lbp_face_cascade, frame))

        if temp.shape != (0, 0, 3):

            frame = cv2.flip(frame, 1)
            if image_based:
                angry, disgust, fear, happy, neutral, sad, surprise = predict_emotion_image(
                    temp)
            else:
                # implemented for only transfer learning part
                if not frames_passed % frame_rate == 3:
                    temp = cv2.cvtColor(temp, cv2.COLOR_BGR2GRAY)
                    temp = cv2.resize(temp, (48, 48),
                                      interpolation=cv2.INTER_AREA)
                    resized = (np.moveaxis(temp, -1, 0)).reshape(
                        (1, 1, 48, 48))
                    # print(resized.shape)
                    vector = intermediate_layer_model.predict(resized)
                    # print(vector.shape)

                    if len(video) >= 60:
                        video.append(vector.reshape(4608))
                        temp_video = video[1:61]
                    else:
                        video.append(vector.reshape(4608))
                        temp_video = np.concatenate((video, fill),
                                                    axis=0)[0:60]
                    # out.write(frame)
                angry, disgust, fear, happy, neutral, sad, surprise = predict_emotion_video(
                    temp_video)
                frames_passed = frames_passed + 1
            print(
                put_emoji(angry, disgust, fear, happy, neutral, sad, surprise))
            # cv2.putText(frame, status, bottomLeftCornerOfText, font, fontScale, fontColor, lineType)

            # with open('emotion.txt', 'a') as fp:
            #     fp.write('{},{},{},{},{},{},{}\n'.format(time.time(), angry, fear, happy, sad, surprise, neutral))

            f = frame
        else:
            f = cv2.flip(frame, 1)

        key = cv2.waitKey(20)
        if key == 27:  # exit on ESC
            break
    vc.release()
    cv2.destroyWindow("exit on ESC")
示例#7
0
 def facing(self):
     detect_face.detect_faces('faces.png')