示例#1
0
    except Exception:
        print("----->Problem during resize")
        return None

#     return image
    output = np.array(image)
    output = np.reshape(output, (48, 48, 1))

    return output


# # Create Training set and Testing Set
data, file_name = get_files()

network = EMR()
network.build_network()

# for item in data:
#     try:
#         result = network.predict([item])
#         maxindex = np.argmax(result[0])
#         # print(result[0])
#         print(EMOTIONS[maxindex])
#     except:
#         result = None
#         continue

ind = [0, 0, 0, 0, 0]

for i in range(len(data)):
    try:
示例#2
0
def runDetection(captures):
    # Initialize object of EMR class
    network = EMR()
    network.build_network()

    cap = cv2.VideoCapture(0)
    font = cv2.FONT_HERSHEY_SIMPLEX
    feelings_faces = []
    count = 0
    miliseconds = 0
    # append the list with the emoji images
    for index, emotion in enumerate(EMOTIONS):
        feelings_faces.append(cv2.imread('./emojis/' + emotion + '.png', -1))
    while True:
        # Again find haar cascade to draw bounding box around face
        ret, frame = cap.read()
        if not ret:
            break
        facecasc = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        faces = facecasc.detectMultiScale(gray,
                                          scaleFactor=1.3,
                                          minNeighbors=5)

        # compute softmax probabilities
        result = network.predict(format_image(frame))
        if result is not None:
            # write the different emotions and have a bar to indicate probabilities for each class
            for index, emotion in enumerate(EMOTIONS):
                cv2.putText(frame, emotion, (10, index * 20 + 20),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
                cv2.rectangle(frame, (130, index * 20 + 10),
                              (130 + int(result[0][index] * 100),
                               (index + 1) * 20 + 4), (255, 0, 0), -1)

            # find the emotion with maximum probability and display it
            maxindex = np.argmax(result[0])
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(frame, EMOTIONS[maxindex], (10, 360), font, 2,
                        (255, 255, 255), 2, cv2.LINE_AA)
            face_image = feelings_faces[maxindex]

            count = count + 1
            print("Emotion is: ", EMOTIONS[maxindex], " ID: ", maxindex + 1)
            user = os.environ.get('USERNAME')
            user = user.replace('.', ' ').title()
            insert(user, maxindex + 1)

            # for c in range(0, 3):
            #     # The shape of face_image is (x,y,4). The fourth channel is 0 or 1. In most cases it is 0, so, we assign the roi to the emoji.
            #     # You could also do: frame[200:320,10:130,c] = frame[200:320, 10:130, c] * (1.0 - face_image[:, :, 3] / 255.0)
            #     frame[200:320, 10:130, c] = face_image[:,:,c]*(face_image[:, :, 3] / 255.0) +  frame[200:320, 10:130, c] * (1.0 - face_image[:, :, 3] / 255.0)

        if len(faces) > 0:
            # draw box around face with maximum area
            max_area_face = faces[0]
            for face in faces:
                if face[2] * face[3] > max_area_face[2] * max_area_face[3]:
                    max_area_face = face
            face = max_area_face
            (x, y, w, h) = max_area_face
            frame = cv2.rectangle(frame, (x, y - 50), (x + w, y + h + 10),
                                  (255, 0, 0), 2)

        cv2.imshow(
            'Video',
            cv2.resize(frame, None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC))
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        if count == captures:
            break
    cap.release()
    cv2.destroyAllWindows()