Exemple #1
0
def prediction_path(path):
    # load keras model
    model = define_model()
    model = model_weights(model)

    # list of given emotions
    EMOTIONS = [
        'Angry', 'Disgusted', 'Fearful', 'Happy', 'Sad', 'Surprised', 'Neutral'
    ]

    if os.path.exists(path):
        # read the image
        img = cv2.imread(path, 0)
        # check if image is valid or not
        if img is None:
            print('Invalid image !!')
            return
    else:
        print('Image not found')
        return

    # resize image for the model
    img = cv2.resize(img, (48, 48))
    img = np.reshape(img, (1, 48, 48, 1))
    # do prediction
    result = model.predict(img)

    print('Detected emotion: ' + str(EMOTIONS[np.argmax(result[0])]))

    return
Exemple #2
0
def prepare_realtime_emotions():
    global model,faceCascade,emoji_faces
    # load keras model
    model = define_model()
    model = model_weights(model)
    print('Model loaded')
    # load haar cascade for face
    faceCascade = cv2.CascadeClassifier(r'haarcascades/haarcascade_frontalface_default.xml')
    # list of given emotions

    # store the emoji coreesponding to different emotions
    emoji_faces = []
    for index, emotion in enumerate(EMOTIONS):
        emoji_faces.append(cv2.imread('emojis/' + emotion.lower()  + '.png', -1))
def prediction_path(img_str):
    # load keras model
    cv2.ocl.setUseOpenCL(False)
    model = model_weights(define_model())

    path = './logs/' + time.strftime("%Y%m%d-%H%M%S") + '.png'
    with open(path, "wb") as fh:
        fh.write(base64.b64decode(img_str))
    logger.info('Image save at ' + path)

    img = cv2.imread(path)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    img_rot = cv2.rotate(img, cv2.ROTATE_180)
    gray_rot = cv2.cvtColor(img_rot, cv2.COLOR_BGR2GRAY)

    face_cascade = cv2.CascadeClassifier('./haarcascade_frontalface_alt2.xml')

    # Detect faces
    faces = face_cascade.detectMultiScale(
        gray,
        scaleFactor=1.2,
        minNeighbors=5,
        minSize=(20, 20),
    )

    faces_rot = face_cascade.detectMultiScale(
        gray_rot,
        scaleFactor=1.2,
        minNeighbors=5,
        minSize=(20, 20),
    )

    if (len(faces) >= 1):
        logger.info('Image not rotated')
        return do_prediction(faces, img, model)
    elif (len(faces_rot) >= 1):
        logger.info('Image rotated')
        return do_prediction(faces_rot, img_rot, model)
    else:
        return cs.SPECIAL_EMOTIONS[1]
def prediction_path(path):
    model = define_model()
    model = model_weights(model)
    images = cv2.imread(path)
    gray_img = cv2.cvtColor(images, cv2.COLOR_BGR2GRAY)
    face_haar_cascade = cv2.CascadeClassifier(
        'haarcascade_frontalface_default.xml')
    faces_detected = face_haar_cascade.detectMultiScale(
        gray_img,
        scaleFactor=1.1,
        minNeighbors=5,
        minSize=(30, 30),
        flags=cv2.CASCADE_SCALE_IMAGE)
    print(faces_detected)
    print(type(faces_detected))
    for (x, y, w, h) in faces_detected:
        cv2.rectangle(images, (x, y), (x + w, y + h), (255, 0, 0))
        roi_gray = gray_img[
            y:y + w,
            x:x + h]  #cropping region of interest i.e. face area from  image

        roi_gray = cv2.resize(roi_gray, (48, 48))
        roi_gray = np.reshape(roi_gray, (1, 48, 48, 1))
        mean = np.mean(roi_gray)
        std = np.std(roi_gray)
        roi_gray = (roi_gray - mean) / (std + 1e-7)

        predictions = model.predict(roi_gray)
        print(predictions)
        #find max indexed array
        max_index = np.argmax(predictions[0])
        emotions = ('anger', 'happy', 'neutral', 'sad')
        predicted_emotion = emotions[max_index]
        cv2.putText(images, predicted_emotion, (int(x), int(y)),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)

    #resized_img = cv2.resize(images, (int(images.shape[0]/2), int(images.shape[1]/2)))
    cv2.imshow("result", images)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
Exemple #5
0
def main():
    model = define_model()
    model = model_weights(model)
    print('Model loaded')

    result = np.array((1,7))
    faceCascade = cv2.CascadeClassifier(r'haarcascades/haarcascade_frontalface_default.xml')
    # list of given emotions
    EMOTIONS = ['Angry', 'Disgusted', 'Fearful', 'Happy', 'Sad', 'Surprised', 'Neutral']

    # store the emoji coreesponding to different emotions
    emoji_faces = []
    save_loc = 'save_loc/1.jpg'
    for index, emotion in enumerate(EMOTIONS):
        emoji_faces.append(cv2.imread('emojis/' + emotion.lower()  + '.png', -1))
    # set video capture device , webcam in this case
    video_capture = cv2.VideoCapture(0)
    video_capture.set(3, 640)  # WIDTH
    video_capture.set(4, 480)  # HEIGHT

    prev_time = time.time()
    once =False

    while True:
        ret, frame = video_capture.read()
        # mirror the frame
        frame = cv2.flip(frame, 1, 0)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # find face in the frame
        faces = faceCascade.detectMultiScale(
                    gray,
                    scaleFactor=1.1,
                    minNeighbors=5,
                    minSize=(30, 30),
                    flags=cv2.CASCADE_SCALE_IMAGE
                )
        for (x, y, w, h) in faces:

            cv2.rectangle(frame, (x-10, y-70),(x+w+20, y+h+40), (15, 175, 61), 4)

            # roi_gray = gray[y:y+h, x:x+w]
            color_img = frame[y-90:y+h+70, x-50:x+w+50]
            curr_time = time.time()
            cv2.imwrite(save_loc, color_img)
            img = cv2.imread(save_loc, 0)
            
            if img is not None:
                if curr_time - prev_time >=1:

                    img = cv2.resize(img, (48, 48))
                    img = np.reshape(img, (1, 48, 48, 1))
                    # do prediction
                    result = model.predict(img)
                    prev_time = time.time()
                    total_sum = np.sum(result[0])
                    once=True
                    # select the emoji face with highest confidence
                    emoji_face = emoji_faces[np.argmax(result[0])]
            if once==True:
                for index, emotion in enumerate(EMOTIONS):
                    text = str(round(Decimal(result[0][index]/total_sum*100), 2) ) + "%"
                    # for drawing progress bar
                    cv2.rectangle(frame, (100, index * 20 + 10), (100 +int(result[0][index] * 100), (index + 1) * 20 + 4),
                                    (255, 0, 0), -1)
                    # for putting emotion labels
                    cv2.putText(frame, emotion, (10, index * 20 + 20),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (7, 109, 16), 2)
                    # for putting percentage confidence
                    cv2.putText(frame, text, (105 + int(result[0][index] * 100), index * 20 + 20),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1)
            
                for c in range(0, 3):
                    # for doing overlay we need to assign weights to both foreground and background
                    foreground = emoji_face[:, :, c] * (emoji_face[:, :, 3] / 255.0)
                    background = frame[350:470, 10:130, c] * (1.0 - emoji_face[:, :, 3] / 255.0)
                    frame[350:470, 10:130, c] = foreground + background
            break
        # Display the resulting frame
        cv2.imshow('Video', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything is done, release the capture
    video_capture.release()
    cv2.destroyAllWindows()
def realtime_emotions():

    model = define_model()
    model = model_weights(model)

    save_loc = 'save_loc/1.jpg'

    result = np.array((1, 7))

    once = False

    faceCascade = cv2.CascadeClassifier(
        r'haarcascades/haarcascade_frontalface_default.xml')

    EMOTIONS = [
        'Angry', 'Disgusted', 'Fearful', 'Happy', 'Sad', 'Surprised', 'Neutral'
    ]

    emoji_faces = []
    for index, emotion in enumerate(EMOTIONS):
        emoji_faces.append(cv2.imread('emojis/' + emotion + '.png', -1))

    video_capture = cv2.VideoCapture(0)
    video_capture.set(3, 640)
    video_capture.set(4, 480)

    prev_time = time.time()

    # start webcam feed
    while True:
        #
        ret, frame = video_capture.read()

        frame = cv2.flip(frame, 1, 0)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        faces = faceCascade.detectMultiScale(gray,
                                             scaleFactor=1.1,
                                             minNeighbors=5,
                                             minSize=(30, 30),
                                             flags=cv2.CASCADE_SCALE_IMAGE)

        # Draw a rectangle around the faces
        for (x, y, w, h) in faces:

            roi_color = frame[y - 90:y + h + 70, x - 50:x + w + 50]

            cv2.imwrite(save_loc, roi_color)

            cv2.rectangle(frame, (x - 10, y - 70), (x + w + 20, y + h + 40),
                          (15, 175, 61), 4)

            curr_time = time.time()

            if curr_time - prev_time >= 1:

                img = cv2.imread(save_loc, 0)

                if img is not None:

                    once = True

                    img = cv2.resize(img, (48, 48))
                    img = np.reshape(img, (1, 48, 48, 1))

                    result = model.predict(img)
                    print(EMOTIONS[np.argmax(result[0])])

                prev_time = time.time()

            if once == True:
                total_sum = np.sum(result[0])

                emoji_face = emoji_faces[np.argmax(result[0])]
                for index, emotion in enumerate(EMOTIONS):
                    text = str(
                        round(Decimal(result[0][index] / total_sum * 100),
                              2)) + "%"

                    cv2.rectangle(frame, (100, index * 20 + 10),
                                  (100 + int(result[0][index] * 100),
                                   (index + 1) * 20 + 4), (255, 0, 0), -1)

                    cv2.putText(frame, emotion, (10, index * 20 + 20),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (173, 9, 136),
                                2)

                    cv2.putText(
                        frame, text,
                        (105 + int(result[0][index] * 100), index * 20 + 20),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 55, 125), 1)

                for c in range(0, 3):

                    foreground = emoji_face[:, :,
                                            c] * (emoji_face[:, :, 3] / 255.0)
                    background = frame[350:470, 10:130,
                                       c] * (1.0 - emoji_face[:, :, 3] / 255.0)
                    frame[350:470, 10:130, c] = foreground + background
            break

        # Display the resulting frame
        cv2.imshow('Video', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    video_capture.release()
    cv2.destroyAllWindows()
Exemple #7
0
def realtime_emotions():
    # load keras model
    model = define_model()
    model = model_weights(model)
    print('Model loaded')

    # save location for image
    save_loc = 'save_loc/1.jpg'
    # numpy matrix for stroing prediction
    result = np.array((1, 7))
    # for knowing whether prediction has started or not
    once = False
    # load haar cascade for face
    faceCascade = cv2.CascadeClassifier(
        r'haarcascades/haarcascade_frontalface_default.xml')
    # list of given emotions
    EMOTIONS = [
        'Angry', 'Disgusted', 'Fearful', 'Happy', 'Sad', 'Surprised', 'Neutral'
    ]

    # store the emoji coreesponding to different emotions
    emoji_faces = []
    for index, emotion in enumerate(EMOTIONS):
        emoji_faces.append(cv2.imread('emojis/' + emotion.lower() + '.png',
                                      -1))

    # set video capture device , webcam in this case
    video_capture = cv2.VideoCapture(0)
    video_capture.set(3, 640)  # WIDTH
    video_capture.set(4, 480)  # HEIGHT

    # save current time
    prev_time = time.time()

    # start webcam feed
    while True:
        # Capture frame-by-frame
        ret, frame = video_capture.read()
        # mirror the frame
        frame = cv2.flip(frame, 1, 0)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # find face in the frame
        faces = faceCascade.detectMultiScale(gray,
                                             scaleFactor=1.1,
                                             minNeighbors=5,
                                             minSize=(30, 30),
                                             flags=cv2.CASCADE_SCALE_IMAGE)

        # Draw a rectangle around the faces
        for (x, y, w, h) in faces:
            # required region for the face
            roi_color = frame[y - 90:y + h + 70, x - 50:x + w + 50]

            # save the detected face
            cv2.imwrite(save_loc, roi_color)
            # draw a rectangle bounding the face
            cv2.rectangle(frame, (x - 10, y - 70), (x + w + 20, y + h + 40),
                          (15, 175, 61), 4)

            # keeps track of waiting time for emotion recognition
            curr_time = time.time()
            # do prediction only when the required elapsed time has passed
            if curr_time - prev_time >= 1:
                # read the saved image
                img = cv2.imread(save_loc, 0)

                if img is not None:
                    # indicates that prediction has been done atleast once
                    once = True

                    # resize image for the model
                    img = cv2.resize(img, (48, 48))
                    img = np.reshape(img, (1, 48, 48, 1))
                    # do prediction
                    result = model.predict(img)
                    print(EMOTIONS[np.argmax(result[0])])

                #save the time when the last face recognition task was done
                prev_time = time.time()

            if once == True:
                total_sum = np.sum(result[0])
                # select the emoji face with highest confidence
                emoji_face = emoji_faces[np.argmax(result[0])]
                for index, emotion in enumerate(EMOTIONS):
                    text = str(
                        round(Decimal(result[0][index] / total_sum * 100),
                              2)) + "%"
                    # for drawing progress bar
                    cv2.rectangle(frame, (100, index * 20 + 10),
                                  (100 + int(result[0][index] * 100),
                                   (index + 1) * 20 + 4), (255, 0, 0), -1)
                    # for putting emotion labels
                    cv2.putText(frame, emotion, (10, index * 20 + 20),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (7, 109, 16), 2)
                    # for putting percentage confidence
                    cv2.putText(
                        frame, text,
                        (105 + int(result[0][index] * 100), index * 20 + 20),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1)

                # overlay emoji on the frame for all the channels
                for c in range(0, 3):
                    # for doing overlay we need to assign weights to both foreground and background
                    foreground = emoji_face[:, :,
                                            c] * (emoji_face[:, :, 3] / 255.0)
                    background = frame[350:470, 10:130,
                                       c] * (1.0 - emoji_face[:, :, 3] / 255.0)
                    frame[350:470, 10:130, c] = foreground + background
            break

        # Display the resulting frame
        cv2.imshow('Video', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything is done, release the capture
    video_capture.release()
    cv2.destroyAllWindows()
def realtime_emotions():
    # load keras model
    model = define_model()
    model = model_weights(model)
    print('Model loaded')

    # for knowing whether prediction has started or not
    once = True
    # load haar cascade for face
    faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
    # list of given emotions
    EMOTIONS = ['anger', 'happy', 'neutral', 'sad']

    # store the emoji coreesponding to different emotions
    emoji_faces = []
    for index, emotion in enumerate(EMOTIONS):
        emoji_faces.append(cv2.imread('emojis/' + emotion.lower()  + '.png', -1))

    # set video capture device , webcam in this case
    video_capture = cv2.VideoCapture(0)
    video_capture.set(3, 640)  # WIDTH
    video_capture.set(4, 480)  # HEIGHT

    # save current time
    prev_time = time.time()

    countt = 0
    # start webcam feed
    while True:
        # Capture frame-by-frame
        ret, frame = video_capture.read()
        # mirror the frame
        frame = cv2.flip(frame, 1, 0)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # find face in the frame
        faces = faceCascade.detectMultiScale(
                    gray,
                    scaleFactor=1.1,
                    minNeighbors=5,
                    minSize=(30, 30),
                    flags=cv2.CASCADE_SCALE_IMAGE
                )
    
        # Draw a rectangle around the faces
        for (x, y, w, h) in faces:

            # draw a rectangle bounding the face
            cv2.rectangle(frame, (x-10, y-70),
                            (x+w+20, y+h+40), (15, 175, 61), 4)

            curr_time = time.time()    
            if curr_time - prev_time >= 1:
                roi_gray=gray[y:y+w,x:x+h] #cropping region of interest i.e. face area from  image  
                roi_gray = cv2.resize(roi_gray, (48, 48))
                roi_gray = np.reshape(roi_gray, (1, 48, 48, 1)) 
                mean = np.mean(roi_gray)
                std  = np.std(roi_gray)
                roi_gray = (roi_gray-mean)/(std+1e-7) 

				# do prediction
                result = model.predict(roi_gray)
                print(EMOTIONS[np.argmax(result[0])])
                prev_time = time.time()



            total_sum = np.sum(result[0])
            # select the emoji face with highest confidence
            emoji_face = emoji_faces[np.argmax(result[0])]
            for index, emotion in enumerate(EMOTIONS):
                text = str(
                    round(Decimal(result[0][index]/total_sum*100), 2) ) + "%"
                # for drawing progress bar
                cv2.rectangle(frame, (100, index * 20 + 10), (100 +int(result[0][index] * 100), (index + 1) * 20 + 4),
                                (255, 0, 0), -1)
                # for putting emotion labels
                cv2.putText(frame, emotion, (10, index * 20 + 20),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (7, 109, 16), 2)
                # for putting percentage confidence
                cv2.putText(frame, text, (105 + int(result[0][index] * 100), index * 20 + 20),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1)
                    
                    
                # overlay emoji on the frame for all the channels
                for c in range(0, 3):
                    # for doing overlay we need to assign weights to both foreground and background
                    foreground = emoji_face[:, :, c] * (emoji_face[:, :, 3] / 255.0)
                    background = frame[350:470, 10:130, c] * (1.0 - emoji_face[:, :, 3] / 255.0)
                    frame[350:470, 10:130, c] = foreground + background
            break

        # Display the resulting frame
        cv2.imshow('Video', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything is done, release the capture
    video_capture.release()
    cv2.destroyAllWindows()