import cv2
import numpy as np
from model import EMR

# prevents opencl usage and unnecessary logging messages
cv2.ocl.setUseOpenCL(False)

EMOTIONS = [
    'Angry', 'Disgusted', 'Fearful', 'Happy', 'Sad', 'Surprised', 'Neutral'
]

# Initialize object of EMR class
network = EMR()
network.build_network()

# In case you want to detect emotions on a video, provide the video file path instead of 0 for VideoCapture.
cap = cv2.VideoCapture('r.jpg')
font = cv2.FONT_HERSHEY_SIMPLEX

while True:
    # Again find haar cascade to draw bounding box around face
    ret, frame = cap.read()
    if not ret:
        break
    facecasc = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = facecasc.detectMultiScale(gray, 1.3, 5)
    if len(faces) > 0:
        # draw box around faces
        for face in faces:
            (x, y, w, h) = face
Пример #2
0
                           (48, 48), interpolation=cv2.INTER_CUBIC) / 255.
    except Exception:
        print("----->Problem during resize")
        return None

#     return image
    output = np.array(image)
    output = np.reshape(output, (48, 48, 1))

    return output


# # Create Training set and Testing Set
data, file_name = get_files()

network = EMR()
network.build_network()

# for item in data:
#     try:
#         result = network.predict([item])
#         maxindex = np.argmax(result[0])
#         # print(result[0])
#         print(EMOTIONS[maxindex])
#     except:
#         result = None
#         continue

ind = [0, 0, 0, 0, 0]

for i in range(len(data)):
Пример #3
0
    face = max_area_face

    # extract ROI of face
    image = image[face[1]:(face[1] + face[2]), face[0]:(face[0] + face[3])]

    try:
        # resize the image so that it can be passed to the neural network
        image = cv2.resize(image, (48,48), interpolation = cv2.INTER_CUBIC) / 255.
    except Exception:
        print("----->Problem during resize")
        return None

    return image

# Initialize object of EMR class
network = EMR()
network.build_network()

cap = cv2.VideoCapture(0)
font = cv2.FONT_HERSHEY_SIMPLEX
feelings_faces = []

# append the list with the emoji images
for index, emotion in enumerate(EMOTIONS):
    feelings_faces.append(cv2.imread('./emojis/' + emotion + '.png', -1))
iC = 0
while True:
    # Again find haar cascade to draw bounding box around face
    ret, frame = cap.read()
    facecasc = cv2.CascadeClassifier('haarcascade_files/haarcascade_frontalface_default.xml')
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# # Create Training set and Testing Set
training_data, training_labels, testing_data, testing_labels = make_sets()

try:
    epoch = int(sys.argv[1])
except:
    epoch = 50

# # Convert to numpy array
training_data = np.array(training_data)
training_labels = np.array(training_labels)

testing_data = np.array(testing_data)
testing_labels = np.array(testing_labels)

network = EMR()
network.build_network()

network.train(training_data, training_labels, testing_data, testing_labels,
              epoch)

print("Finish")

# # Create NN Model
# network = tflearn.input_data(shape=[None, 48, 48, 1])
# print("Input data     ", network.shape[1:])

# network = conv_2d(network, 64, 5, activation='relu')
# print("Conv1          ", network.shape[1:])

# network = max_pool_2d(network, 3, strides=2)
from model import EMR
from singleface import runDetection
import sys
import configparser

if __name__ == "__main__":
    print("\n------------Emotion Detection Program------------\n")
    network = EMR()

    config = configparser.ConfigParser()
    config.read('config.ini')
    captures = int(config['process']['Captures'])

    print(captures)
    runDetection(captures)
        # resize the image so that it can be passed to the neural network
        image = cv2.resize(image,
                           (48, 48), interpolation=cv2.INTER_CUBIC) / 255.
    except Exception:
        print("----->Problem during resize")
        return None

#     return image
    output = np.array(image)
    output = np.reshape(output, (48, 48, 1))

    return output


# Initialize object of EMR class
network = EMR()
network.build_network()

# name = input('Input videoname (abc.xyz) : ')
try:
    name = sys.argv[1]
    print(name)
except:
    print('please fill the videoname in input_video')
    exit()

cap = cv2.VideoCapture(name)

length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
frame_number = 0
fps = cap.get(cv2.CAP_PROP_FPS)
Пример #7
0
def runDetection(captures):
    # Initialize object of EMR class
    network = EMR()
    network.build_network()

    cap = cv2.VideoCapture(0)
    font = cv2.FONT_HERSHEY_SIMPLEX
    feelings_faces = []
    count = 0
    miliseconds = 0
    # append the list with the emoji images
    for index, emotion in enumerate(EMOTIONS):
        feelings_faces.append(cv2.imread('./emojis/' + emotion + '.png', -1))
    while True:
        # Again find haar cascade to draw bounding box around face
        ret, frame = cap.read()
        if not ret:
            break
        facecasc = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        faces = facecasc.detectMultiScale(gray,
                                          scaleFactor=1.3,
                                          minNeighbors=5)

        # compute softmax probabilities
        result = network.predict(format_image(frame))
        if result is not None:
            # write the different emotions and have a bar to indicate probabilities for each class
            for index, emotion in enumerate(EMOTIONS):
                cv2.putText(frame, emotion, (10, index * 20 + 20),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
                cv2.rectangle(frame, (130, index * 20 + 10),
                              (130 + int(result[0][index] * 100),
                               (index + 1) * 20 + 4), (255, 0, 0), -1)

            # find the emotion with maximum probability and display it
            maxindex = np.argmax(result[0])
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(frame, EMOTIONS[maxindex], (10, 360), font, 2,
                        (255, 255, 255), 2, cv2.LINE_AA)
            face_image = feelings_faces[maxindex]

            count = count + 1
            print("Emotion is: ", EMOTIONS[maxindex], " ID: ", maxindex + 1)
            user = os.environ.get('USERNAME')
            user = user.replace('.', ' ').title()
            insert(user, maxindex + 1)

            # for c in range(0, 3):
            #     # The shape of face_image is (x,y,4). The fourth channel is 0 or 1. In most cases it is 0, so, we assign the roi to the emoji.
            #     # You could also do: frame[200:320,10:130,c] = frame[200:320, 10:130, c] * (1.0 - face_image[:, :, 3] / 255.0)
            #     frame[200:320, 10:130, c] = face_image[:,:,c]*(face_image[:, :, 3] / 255.0) +  frame[200:320, 10:130, c] * (1.0 - face_image[:, :, 3] / 255.0)

        if len(faces) > 0:
            # draw box around face with maximum area
            max_area_face = faces[0]
            for face in faces:
                if face[2] * face[3] > max_area_face[2] * max_area_face[3]:
                    max_area_face = face
            face = max_area_face
            (x, y, w, h) = max_area_face
            frame = cv2.rectangle(frame, (x, y - 50), (x + w, y + h + 10),
                                  (255, 0, 0), 2)

        cv2.imshow(
            'Video',
            cv2.resize(frame, None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC))
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        if count == captures:
            break
    cap.release()
    cv2.destroyAllWindows()