示例#1
0
class EmotionRecognizer(object):
    def __init__(self, emotions=[
        'disgust',
        'happiness',
        'surprise',
    ]):
        self.sub = rospy.Subscriber('/body_tracker/rgb/raw_image', Image,
                                    self.recognize)
        self.pub = rospy.Publisher('emotion_image',
                                   numpy_msg(Image),
                                   queue_size=0)
        self.count = 0
        self.target_emotions = emotions
        self.detector = dlib.get_frontal_face_detector()
        self.model = FERModel(self.target_emotions, verbose=True)

    def recognize(self, msg):
        try:
            frame = np.frombuffer(msg.data, dtype=np.uint8).reshape(
                msg.height, msg.width, -1)

            dets = self.detector(frame, 1)

            assert len(dets) > 0, "There is no face!"

            for d in dets:
                self.model.predict(frame[d.top():d.bottom(),
                                         d.left():d.right()])
                cv2.imshow("a", frame[d.top():d.bottom(), d.left():d.right()])
                cv2.waitKey(1)
                #self.pub.publish(ros_frame)
                break
        except Exception as e:
            print("Error: " + str(e))
def dothing(b64_bytes):
    #b64_bytes = sys.argv [1]
    b64_string = b64_bytes.decode ()
    im = imread (io.BytesIO (base64.b64decode (b64_string)))
    im = cv2.cvtColor (im, cv2.COLOR_RGB2BGR)


    #Can choose other target emotions from the emotion subset defined in 
    # fermodel.py in src directory. The function 
    #defined as `def _check_emotion_set_is_supported(self):`

    target_emotions = ['calm', 'anger', 'happiness']
    model = FERModel (target_emotions, verbose=True)

    #Capture frame
    file = 'face.jpg'
    cv2.imwrite(file, im)	

    frameString = model.predict(file)

#Display emotion
    output = str(model.predict(file))
    print(output)
    return output
示例#3
0
def get_emotion_from_camera():

    # Specify the camera which you want to use. The default argument is '0'
    video_capture = cv2.VideoCapture(0)
    file = 'image_data/image.jpg'
    frame = capture_image(video_capture, file)

    if frame is not None:
        # Can choose other target emotions from the emotion subset defined in
        # fermodel.py in src directory. The function
        # defined as `def _check_emotion_set_is_supported(self):`
        target_emotions = ['calm', 'anger', 'happiness']
        model = FERModel(target_emotions, verbose=True)

        frame_string = model.predict(file)
        display_prediction(frame, frame_string)
    else:
        print("Image could not be captured")
示例#4
0
def get_emotion_from_camera():
    #target_emotions = ['anger', 'fear', 'calm', 'sadness', 'happiness', 'surprise', 'disgust']
    
    target_emotions = ['anger', 'happiness', 'calm']
	
    model = FERModel(target_emotions, verbose=True)

    # Specify the camera which you want to use. The default argument is '0'
    video_capture = cv2.VideoCapture(0)
    # Capturing a smaller image for speed purposes
    video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
    video_capture.set(cv2.CAP_PROP_FPS, 15)
	
	#!!!!!!!!! CURRENTLY writes to file
    file = 'image_data/image.jpg'
	
    while True:
        frame = capture_image(video_capture, file)
        if frame is not None:
            # Can choose other target emotions from the emotion subset defined in
            # fermodel.py in src directory. The function
            # defined as `def _check_emotion_set_is_supported(self):`
            #target_emotions = ['calm', 'anger', 'happiness']

            frame_string = model.predict(file)
            display_prediction(frame, frame_string)
        else:
            print("Image could not be captured")
            break;

        key = cv2.waitKey(200)
        # Press Esc to exit the window
        if key == 27: # or cv2.getWindowProperty(window_name, cv2.WND_PROP_VISIBLE) < 1:
            break

			
    # Closes all windows
    cv2.destroyAllWindows()
	
    #stop capture
    video_capture.release()
示例#5
0
def emopy(request):
    template = 'video.html'
    fontFace = cv2.FONT_HERSHEY_SIMPLEX
    fontScale = 1
    thickness = 2

    #Specify the camera which you want to use. The default argument is '0'
    video_capture = cv2.VideoCapture(0)
    #Capturing a smaller image fçor speed purposes
    video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 360)
    video_capture.set(cv2.CAP_PROP_FPS, 15)

    #Can choose other target emotions from the emotion subset defined in fermodel.py in src directory. The function
    # defined as `def _check_emotion_set_is_supported(self):`
    target_emotions = ['happiness', 'disgust', 'surprise']
    model = FERModel(target_emotions, verbose=True)

    ret, frame = video_capture.read()
    r = os.path.dirname(os.path.abspath(__file__))
    r = r.replace("monitoring", "IoTransit_web")
    file = r + '/static/image_data/image.jpg'
    cv2.imwrite(file, frame)

    frameString = model.predict(file)
    m = frameString
    image = Image.open(file)
    draw = ImageDraw.Draw(image)
    # O bien /usr/share/fonts/truetype/ttf-dejavu/DejaVuSerif.ttf.
    font = ImageFont.load_default()
    color = 'rgb(255, 255, 255)'
    draw.text((50, 50), frameString, font=font, fill="black")
    image.save(file)
    K.clear_session()
    context = {'image': image, 'estado': m}
    return render(request, template, context=context)
示例#6
0
def toCSVrow(filename,emotionLevels):
    # list comprehension modified from Method #3 of
    # https://www.geeksforgeeks.org/python-program-to-convert-a-list-to-string/
    listToStr = ','.join([str(elem) for elem in emotionLevels])
    return filename + "," + listToStr + "\n"


#target_emotions = ['calm', 'anger', 'happiness']
target_emotions = FERModel.POSSIBLE_EMOTIONS
model = FERModel(target_emotions, verbose=True)

faces = open("LIP_face_files.txt",'r')

predictionFile = open("FERmodel_LIP_predictions.csv",'w')

predictionFile.write(toCSVrow("filename",model.emotion_map.keys()))
# Read the first file
for file in faces.readlines():
#file = faces.readline()
#if True:
    fileNamePrefix = file.split('.')[0]
    
    fPath = "LIP_data/" + fileNamePrefix + ".jpg"
    print('Predicting on',fPath)
    prediction = model.predict(resource_filename('EmoPy.examples',fPath))
    predictionFile.write(toCSVrow(fPath,prediction[0]))


predictionFile.close()
import cv2
import time
import random

target_emotions = ['fear', 'calm', 'anger', 'happiness', 'disgust', 'sadness', 'surprise']
model = FERModel(target_emotions, verbose=True)

fontFace = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 1
thickness = 2

video_capture = cv2.VideoCapture(0)
#Capturing a smaller image fçor speed purposes
video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 80)
video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 80)

while True:
    ret, frame = video_capture.read()
    #to_save = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
    cv2.imwrite(resource_filename('EmoPy.examples', '1.jpg'), frame)
    print('Predicting on image...')
    frameString = model.predict(resource_filename('EmoPy.examples', '1.jpg'))
    pYes a lot of love talking I took statistics benevolent countermeasures yeah did not a deep enough they going to OR sneezing division rightYeah me yeah exactly saw him ask I got a nice big didn't have his baseball as about away go to deep statisticsI'll get that class sounds impossible had as a business what was it really that is from collar on sentencesI'm almost a whole calculation children in the same classI'm a probably do like a voice a voice that is it what's the distaste 11 lumbar support money point think I'll leastFor any status of the estate within his that it is color Cody SquareYes I'll out of the Decidiste skate didn't session just had Okay on your vacationOkay by Dunaway? I got y'all standings worry on a cervix eight any anything bookkeeperHolly Holly get hereBut I even the deep blue Gold this anything here Essien wordYell work you go to barf on my bootyIs calculus on everything companies, KinoI love your mother of everything okay I mean I  did all the more modern science policies and statistics consisted six okay yes and also in on those images files done and yeah I think some people just want to program beyond slate so I harassed gallon Java file without calculus and all that stuff yet and it's always Jo Jo was good but is off all contributors especially hoping so because despite the specialty.'(1) & 0xFF == 27:
        break






示例#8
0
from EmoPy.src.fermodel import FERModel
from pkg_resources import resource_filename

target_emotions = ['calm', 'anger', 'happiness']
model = FERModel(target_emotions, verbose=True)

print('Predicting on happy image...')
model.predict(
    resource_filename('EmoPy.examples', 'image_data/sample_happy_image.png'))

print('Predicting on disgust image...')
model.predict(
    resource_filename('EmoPy.examples', 'image_data/sample_disgust_image.png'))

print('Predicting on anger image...')
model.predict(
    resource_filename('EmoPy.examples', 'image_data/sample_anger_image2.png'))
video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 360)
video_capture.set(cv2.CAP_PROP_FPS, 15)

#Can choose other target emotions from the emotion subset defined in fermodel.py in src directory. The function
# defined as `def _check_emotion_set_is_supported(self):`
target_emotions = ['calm', 'anger', 'happiness']
model = FERModel(target_emotions, verbose=True)

while True:
    #Capture frame-by-frame
    ret, frame = video_capture.read()
    #Save the captured frame on disk
    file = 'image_data/image.jpg'
    cv2.imwrite(file, frame)

    frameString = model.predict(file)

    #Display emotion
    retval, baseline = cv2.getTextSize(frameString, fontFace, fontScale,
                                       thickness)
    cv2.rectangle(frame, (0, 0), (20 + retval[0], 50), (0, 0, 0), -1)
    cv2.putText(frame, frameString, (10, 35), fontFace, fontScale,
                (255, 255, 255), thickness, cv2.LINE_AA)
    cv2.imshow('Video', frame)
    cv2.waitKey(1)

    #Press Esc to exit the window
    if cv2.waitKey(1) & 0xFF == 27:
        break
#Closes all windows
cv2.destroyAllWindows()
示例#10
0
model = FERModel(target_emotions, verbose=True)

# model = load_model(
#     "./output/custom.h5", custom_objects={'SliceLayer': SliceLayer, 'ChannelShuffle': ChannelShuffle,
#                                           'PadZeros': PadZeros})

# model = model_from_json(
#     open("./output/model_4layer_2_2_pool.json", "r").read())
# model.load_weights('./output/model_4layer_2_2_pool.h5')

# model = load_model(
#     "./output/lightvgg2.h5")

count = 0
target_emotion = "anger"

print('Predicting on Neutral image...')

files = [file for file in os.listdir(
    "./image_data/FER2013Test_Sorted/" + target_emotion)if not file.startswith('.')]

for file in files:
    print("\n" + file)
    emotion = model.predict(resource_filename(
        'EmoPy.examples', 'image_data/FER2013Test_Sorted/' + target_emotion + '/' + file))
    if(emotion == target_emotion):
        count = count + 1

print(count/len(files))
示例#11
0
class RosemoVideoServer:
    def __init__(self, labels, queue_size=10):
        self.labels = labels
        self.pub = rospy.Publisher('rosemo_pub/video', EmotionResult)
        self.camera = cv2.VideoCapture(0)
        self.camera.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
        self.camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
        self.camera.set(cv2.CAP_PROP_FPS, 24)
        self.path = '/rosemo/data/' + str(int(time()))
        os.mkdir(self.path)
        self.model = FERModel(self.labels, verbose=True)
        self.face_detector = cv2.CascadeClassifier(PATH)

    def capture_image(self, camera, file, face_file):
        face = None
        if camera.isOpened():
            ret = False
            print("Capturing image ...")

            counter = 0
            max_tries = 100
            while not ret and camera.isOpened():
                counter += 1
                if counter >= max_tries:
                    return None
                # Capture frame-by-frame
                ret, frame = camera.read()
            # Save the captured frame on disk
            frame = cv2.cvtColor(frame, code=cv2.COLOR_BGR2GRAY)
            cv2.imwrite(file, frame)
            # Detect the face and crop it
            faces = self.face_detector.detectMultiScale(frame, 1.25, 6)
            for f in faces:
                # Define the region in the image
                x, y, w, h = [v for v in f]
                face = frame[y:y + h, x:x + w]
                cv2.imwrite(face_file, face)
            print("Face written to: ", face_file)
        else:
            print("Cannot access the webcam")

        return face

    def get_emotion_from_camera(self):
        file_name = str(time())
        file = f'{self.path}/{file_name}.jpg'
        face_file = f'{self.path}/{file_name}_face.jpg'
        face = self.capture_image(self.camera, file, face_file)

        if face is not None:
            # Can choose other target emotions from the emotion subset defined in
            # fermodel.py in src directory. The function
            # defined as `def _check_emotion_set_is_supported(self):`
            result = self.model.predict(face_file)
            with open(f'{self.path}/{file_name}.txt', 'w') as f:
                f.write(f"{str(self.labels)}\n{str(result)}")
            return result
        else:
            print("Face could not be detected")
            return None

    def run(self):
        rospy.init_node('rosemo_video')
        rate = rospy.Rate(2)  # 2hz
        print("Ready: Rosemo Video Server")
        while not rospy.is_shutdown():
            result = self.get_emotion_from_camera()
            if result is not None:
                rospy.loginfo(f'Result: {result}')
                self.pub.publish(EmotionResult(self.labels, result))
            rate.sleep()
        self.camera.release()
示例#12
0
font = cv2.FONT_HERSHEY_SIMPLEX

cap = cv2.VideoCapture("./test5.mp4")

fps = cap.get(cv2.CAP_PROP_FPS)

fps = np.int32(fps)

print("Frames Per Second:", fps, "\n")

ret, frame = cap.read()

emotion = ""

while ret:
    cv2.imwrite("test.jpg", frame)
    if counter == 0:
        emotion = model.predict("test.jpg")
    if counter == (fps - 1):
        counter = 0
    cv2.putText(frame, emotion, (15, 15), font, 0.5, (255, 0, 0), 2,
                cv2.LINE_AA)
    cv2.imshow("Output", frame)
    counter = counter + 1
    if cv2.waitKey(5) == 27:
        break
    ret, frame = cap.read()

cv2.destroyAllWindows()
cap.release()