Example #1
0
network = EMR()
network.build_network()

# for item in data:
#     try:
#         result = network.predict([item])
#         maxindex = np.argmax(result[0])
#         # print(result[0])
#         print(EMOTIONS[maxindex])
#     except:
#         result = None
#         continue

ind = [0, 0, 0, 0, 0]

for i in range(len(data)):
    try:
        result = network.predict([data[i]])
        name = file_name[i]
        # print(name)
        maxindex = np.argmax(result[0])
        # print(result[0])
        # print(EMOTIONS[maxindex])
        ind[maxindex] += 1
        os.rename(
            name, "./output_picture/" +
            str(EMOTIONS[maxindex] + str(ind[maxindex])) + '.PNG')
    except:
        result = None
        continue
while True:
    # Again find haar cascade to draw bounding box around face
    ret, frame = cap.read()
    if not ret:
        break
    facecasc = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = facecasc.detectMultiScale(gray, 1.3, 5)
    if len(faces) > 0:
        # draw box around faces
        for face in faces:
            (x, y, w, h) = face
            frame = cv2.rectangle(frame, (x, y - 30), (x + w, y + h + 10),
                                  (255, 0, 0), 2)
            newimg = cv2.cvtColor(frame[y:y + h, x:x + w], cv2.COLOR_BGR2GRAY)
            cv2.imshow('naming', newimg)
            newimg = cv2.resize(newimg, (48, 48),
                                interpolation=cv2.INTER_CUBIC) / 255.
            result = network.predict(newimg)
            if result is not None:
                maxindex = np.argmax(result[0])
                font = cv2.FONT_HERSHEY_SIMPLEX
                cv2.putText(frame, EMOTIONS[maxindex], (x, y), font, 1,
                            (255, 255, 255), 2, cv2.LINE_AA)

    asd = cv2.resize(frame, None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
    abc = cv2.resize(asd, (900, 600))
    cv2.imshow('Video', abc)
    cv2.waitKey(0)
cap.release()
cv2.destroyAllWindows()
    frame_number += 1

    # Quit when the input video file ends
    if not ret:
        break

    facecasc = cv2.CascadeClassifier(
        './utility/emotionDetection/haarcascade_frontalface_default.xml')
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = facecasc.detectMultiScale(gray, 1.3, 5)
    new_frame = format_image(frame)

    # compute softmax probabilities
    if (len(new_frame) == 48):
        try:
            result = network.predict([new_frame])
        except:
            continue
    else:
        result = None

    # # compute softmax probabilities
    # if(len(new_frame) == 48):
    #     try:
    #         result = network.predict([format_image(frame)])
    #     except:
    #         continue
    # else:
    #     result = None

    if result is not None:
font = cv2.FONT_HERSHEY_SIMPLEX
feelings_faces = []

# append the list with the emoji images
for index, emotion in enumerate(EMOTIONS):
    feelings_faces.append(cv2.imread('./emojis/' + emotion + '.png', -1))
iC = 0
while True:
    # Again find haar cascade to draw bounding box around face
    ret, frame = cap.read()
    facecasc = cv2.CascadeClassifier('haarcascade_files/haarcascade_frontalface_default.xml')
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = facecasc.detectMultiScale(gray,scaleFactor=1.3, minNeighbors=5)

    # compute softmax probabilities
    result = network.predict(format_image(frame))
    if result is not None:
        # write the different emotions and have a bar to indicate probabilities for each class
        for index, emotion in enumerate(EMOTIONS):
            cv2.putText(frame, emotion, (10, index * 20 + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1);
            cv2.rectangle(frame, (130, index * 20 + 10), (130 + int(result[0][index] * 100), (index + 1) * 20 + 4), (255, 0, 0), -1)

        # find the emotion with maximum probability and display it
        maxindex = np.argmax(result[0])
        font = cv2.FONT_HERSHEY_SIMPLEX
        cv2.putText(frame,EMOTIONS[maxindex],(10,360), font, 2,(255,255,255),2,cv2.LINE_AA) 
        face_image = feelings_faces[maxindex]
        if iC == 6:
            if maxindex == 0: # if angry
                keyboard.press(Key.left)
                keyboard.release(Key.left)
Example #5
0
def runDetection(captures):
    # Initialize object of EMR class
    network = EMR()
    network.build_network()

    cap = cv2.VideoCapture(0)
    font = cv2.FONT_HERSHEY_SIMPLEX
    feelings_faces = []
    count = 0
    miliseconds = 0
    # append the list with the emoji images
    for index, emotion in enumerate(EMOTIONS):
        feelings_faces.append(cv2.imread('./emojis/' + emotion + '.png', -1))
    while True:
        # Again find haar cascade to draw bounding box around face
        ret, frame = cap.read()
        if not ret:
            break
        facecasc = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        faces = facecasc.detectMultiScale(gray,
                                          scaleFactor=1.3,
                                          minNeighbors=5)

        # compute softmax probabilities
        result = network.predict(format_image(frame))
        if result is not None:
            # write the different emotions and have a bar to indicate probabilities for each class
            for index, emotion in enumerate(EMOTIONS):
                cv2.putText(frame, emotion, (10, index * 20 + 20),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
                cv2.rectangle(frame, (130, index * 20 + 10),
                              (130 + int(result[0][index] * 100),
                               (index + 1) * 20 + 4), (255, 0, 0), -1)

            # find the emotion with maximum probability and display it
            maxindex = np.argmax(result[0])
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(frame, EMOTIONS[maxindex], (10, 360), font, 2,
                        (255, 255, 255), 2, cv2.LINE_AA)
            face_image = feelings_faces[maxindex]

            count = count + 1
            print("Emotion is: ", EMOTIONS[maxindex], " ID: ", maxindex + 1)
            user = os.environ.get('USERNAME')
            user = user.replace('.', ' ').title()
            insert(user, maxindex + 1)

            # for c in range(0, 3):
            #     # The shape of face_image is (x,y,4). The fourth channel is 0 or 1. In most cases it is 0, so, we assign the roi to the emoji.
            #     # You could also do: frame[200:320,10:130,c] = frame[200:320, 10:130, c] * (1.0 - face_image[:, :, 3] / 255.0)
            #     frame[200:320, 10:130, c] = face_image[:,:,c]*(face_image[:, :, 3] / 255.0) +  frame[200:320, 10:130, c] * (1.0 - face_image[:, :, 3] / 255.0)

        if len(faces) > 0:
            # draw box around face with maximum area
            max_area_face = faces[0]
            for face in faces:
                if face[2] * face[3] > max_area_face[2] * max_area_face[3]:
                    max_area_face = face
            face = max_area_face
            (x, y, w, h) = max_area_face
            frame = cv2.rectangle(frame, (x, y - 50), (x + w, y + h + 10),
                                  (255, 0, 0), 2)

        cv2.imshow(
            'Video',
            cv2.resize(frame, None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC))
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        if count == captures:
            break
    cap.release()
    cv2.destroyAllWindows()