Example #1
0
class EmotionRecognizer(object):
    def __init__(self, emotions=[
        'disgust',
        'happiness',
        'surprise',
    ]):
        self.sub = rospy.Subscriber('/body_tracker/rgb/raw_image', Image,
                                    self.recognize)
        self.pub = rospy.Publisher('emotion_image',
                                   numpy_msg(Image),
                                   queue_size=0)
        self.count = 0
        self.target_emotions = emotions
        self.detector = dlib.get_frontal_face_detector()
        self.model = FERModel(self.target_emotions, verbose=True)

    def recognize(self, msg):
        try:
            frame = np.frombuffer(msg.data, dtype=np.uint8).reshape(
                msg.height, msg.width, -1)

            dets = self.detector(frame, 1)

            assert len(dets) > 0, "There is no face!"

            for d in dets:
                self.model.predict(frame[d.top():d.bottom(),
                                         d.left():d.right()])
                cv2.imshow("a", frame[d.top():d.bottom(), d.left():d.right()])
                cv2.waitKey(1)
                #self.pub.publish(ros_frame)
                break
        except Exception as e:
            print("Error: " + str(e))
Example #2
0
 def __init__(self, labels, queue_size=10):
     self.labels = labels
     self.pub = rospy.Publisher('rosemo_pub/video', EmotionResult)
     self.camera = cv2.VideoCapture(0)
     self.camera.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
     self.camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
     self.camera.set(cv2.CAP_PROP_FPS, 24)
     self.path = '/rosemo/data/' + str(int(time()))
     os.mkdir(self.path)
     self.model = FERModel(self.labels, verbose=True)
     self.face_detector = cv2.CascadeClassifier(PATH)
Example #3
0
def main():

    print("\n\n")
    print("Press c to identify emotion or Esc to exit.")
    print("The object should cover at least 80 percent of the captured image")

    target_emotions = ['anger', 'happiness', 'calm']
    model_emotions = FERModel(target_emotions, verbose=True)
    model = keras.models.load_model('weights-improvement-07-0.97.hdf5')

    video = cv2.VideoCapture(0)
    ret, inputImg = video.read()
    xo, xb, yo, yb = setROI(inputImg.shape, 900, 720)
    inputImg = inputImg[xo:xb, yo:yb]
    alignment = AlignDlib('landmarks.dat')
    boxColor = (0, 0, 255)
    boxThickness = 3
    text = ''

    key = 0
    while key != 27:
        ret, inputImg = video.read()
        inputImg = inputImg[xo:xb, yo:yb]
        bb = alignment.getLargestFaceBoundingBox(inputImg)

        if bb is not None:
            pt1 = (bb.left(), bb.top())
            pt2 = (bb.left() + bb.width(), bb.height() + bb.top())
            cv2.rectangle(inputImg, pt1, pt2, boxColor, boxThickness)

        if key == 99:
            if bb is not None:
                imgAligned = alignment.align(
                    96,
                    inputImg,
                    bb,
                    landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)
                # imgAligned = tensor_img[:, :, 0]
                text = model_emotions.predict_image_array(imgAligned)

        ###########################################################################
        #Insert Text
        inputImg = cv2.putText(inputImg, text, (50, 50), 0, 1.0, (0, 255, 0),
                               2)
        #Title
        cv2.imshow("Face Analysis", inputImg)
        #Key check
        key = cv2.waitKey(1)

    video.release()
Example #4
0
 def __init__(self, emotions=[
     'disgust',
     'happiness',
     'surprise',
 ]):
     self.sub = rospy.Subscriber('/body_tracker/rgb/raw_image', Image,
                                 self.recognize)
     self.pub = rospy.Publisher('emotion_image',
                                numpy_msg(Image),
                                queue_size=0)
     self.count = 0
     self.target_emotions = emotions
     self.detector = dlib.get_frontal_face_detector()
     self.model = FERModel(self.target_emotions, verbose=True)
Example #5
0
def get_emotion_from_camera():

    # Specify the camera which you want to use. The default argument is '0'
    video_capture = cv2.VideoCapture(0)
    file = 'image_data/image.jpg'
    frame = capture_image(video_capture, file)

    if frame is not None:
        # Can choose other target emotions from the emotion subset defined in
        # fermodel.py in src directory. The function
        # defined as `def _check_emotion_set_is_supported(self):`
        target_emotions = ['calm', 'anger', 'happiness']
        model = FERModel(target_emotions, verbose=True)

        frame_string = model.predict(file)
        display_prediction(frame, frame_string)
    else:
        print("Image could not be captured")
Example #6
0
def get_emotion_from_camera():
    #target_emotions = ['anger', 'fear', 'calm', 'sadness', 'happiness', 'surprise', 'disgust']
    
    target_emotions = ['anger', 'happiness', 'calm']
	
    model = FERModel(target_emotions, verbose=True)

    # Specify the camera which you want to use. The default argument is '0'
    video_capture = cv2.VideoCapture(0)
    # Capturing a smaller image for speed purposes
    video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
    video_capture.set(cv2.CAP_PROP_FPS, 15)
	
	#!!!!!!!!! CURRENTLY writes to file
    file = 'image_data/image.jpg'
	
    while True:
        frame = capture_image(video_capture, file)
        if frame is not None:
            # Can choose other target emotions from the emotion subset defined in
            # fermodel.py in src directory. The function
            # defined as `def _check_emotion_set_is_supported(self):`
            #target_emotions = ['calm', 'anger', 'happiness']

            frame_string = model.predict(file)
            display_prediction(frame, frame_string)
        else:
            print("Image could not be captured")
            break;

        key = cv2.waitKey(200)
        # Press Esc to exit the window
        if key == 27: # or cv2.getWindowProperty(window_name, cv2.WND_PROP_VISIBLE) < 1:
            break

			
    # Closes all windows
    cv2.destroyAllWindows()
	
    #stop capture
    video_capture.release()
Example #7
0
def emopy(request):
    template = 'video.html'
    fontFace = cv2.FONT_HERSHEY_SIMPLEX
    fontScale = 1
    thickness = 2

    #Specify the camera which you want to use. The default argument is '0'
    video_capture = cv2.VideoCapture(0)
    #Capturing a smaller image fçor speed purposes
    video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 360)
    video_capture.set(cv2.CAP_PROP_FPS, 15)

    #Can choose other target emotions from the emotion subset defined in fermodel.py in src directory. The function
    # defined as `def _check_emotion_set_is_supported(self):`
    target_emotions = ['happiness', 'disgust', 'surprise']
    model = FERModel(target_emotions, verbose=True)

    ret, frame = video_capture.read()
    r = os.path.dirname(os.path.abspath(__file__))
    r = r.replace("monitoring", "IoTransit_web")
    file = r + '/static/image_data/image.jpg'
    cv2.imwrite(file, frame)

    frameString = model.predict(file)
    m = frameString
    image = Image.open(file)
    draw = ImageDraw.Draw(image)
    # O bien /usr/share/fonts/truetype/ttf-dejavu/DejaVuSerif.ttf.
    font = ImageFont.load_default()
    color = 'rgb(255, 255, 255)'
    draw.text((50, 50), frameString, font=font, fill="black")
    image.save(file)
    K.clear_session()
    context = {'image': image, 'estado': m}
    return render(request, template, context=context)
def dothing(b64_bytes):
    #b64_bytes = sys.argv [1]
    b64_string = b64_bytes.decode ()
    im = imread (io.BytesIO (base64.b64decode (b64_string)))
    im = cv2.cvtColor (im, cv2.COLOR_RGB2BGR)


    #Can choose other target emotions from the emotion subset defined in 
    # fermodel.py in src directory. The function 
    #defined as `def _check_emotion_set_is_supported(self):`

    target_emotions = ['calm', 'anger', 'happiness']
    model = FERModel (target_emotions, verbose=True)

    #Capture frame
    file = 'face.jpg'
    cv2.imwrite(file, im)	

    frameString = model.predict(file)

#Display emotion
    output = str(model.predict(file))
    print(output)
    return output
Example #9
0
def init_ahegao_classifier_emopy():
    target_emotions = ['anger', 'fear', 'surprise', 'calm']
    model = FERModel(target_emotions, verbose=True)

    def ahegao_classification_emopy(frame):
        gray_image = frame
        if len(frame.shape) > 2:
            gray_image = cv2.cvtColor(frame, code=cv2.COLOR_BGR2GRAY)
        resized_image = cv2.resize(gray_image, model.target_dimensions, interpolation=cv2.INTER_CUBIC)
        final_image = np.expand_dims(np.expand_dims(resized_image, -1), 0)
        prediction = model.model.predict(final_image)[0]
        # normalized_prediction = prediction / sum(prediction)
        normalized_prediction = softmax(prediction)
        return {emotion: normalized_prediction[i] for i, emotion in enumerate(model.emotion_map.keys())}

    return ahegao_classification_emopy
Example #10
0
from EmoPy.src.fermodel import FERModel

models = []
models.append(FERModel(['happiness', 'anger'], verbose=True))
models.append(FERModel(['surprise', 'disgust', 'sadness'], verbose=True))
models.append(FERModel(['surprise', 'disgust', 'happiness'], verbose=True))


Example #11
0
                'message_type': 'text',
                'text': data['text']
            }).get_result()['output']['intents'][0]['intent']

        web_client = payload['web_client']
        channel_id = data['channel']
        web_client.chat_postMessage(channel=channel_id,
                                    text=text[emotion][intent])

        if intent == "感謝":
            rtm_client = payload['rtm_client']
            rtm_client.stop()


target_emotions = ['anger', 'happiness', 'calm']
model = FERModel(target_emotions, verbose=True)
model.model._make_predict_function()
emotion = ''

service = AssistantV2(iam_apikey=os.environ["WATSON_API_KEY"],
                      version='2019-02-28',
                      url='https://gateway.watsonplatform.net/assistant/api')
session_id = service.create_session(
    assistant_id=os.environ["ASSISTANT_ID"]).get_result()['session_id']

text = json.load(open('text.json'))

slack_token = os.environ["SLACK_BOT_TOKEN"]
rtm_client = slack.RTMClient(token=slack_token)
rtm_client.start()
Example #12
0
from EmoPy.src.fermodel import FERModel
from pkg_resources import resource_filename

target_emotions = ['calm', 'anger', 'happiness']
model = FERModel(target_emotions, verbose=True)

print('Predicting on happy image...')
model.predict(
    resource_filename('EmoPy.examples', 'image_data/sample_happy_image.png'))

print('Predicting on disgust image...')
model.predict(
    resource_filename('EmoPy.examples', 'image_data/sample_disgust_image.png'))

print('Predicting on anger image...')
model.predict(
    resource_filename('EmoPy.examples', 'image_data/sample_anger_image2.png'))
Example #13
0
def main():
    target_emotions = ['anger', 'fear', 'surprise', 'calm']
    model = FERModel(target_emotions, verbose=True)

    # prevents opencl usage and unnecessary logging messages
    cv2.ocl.setUseOpenCL(False)
    EMOTIONS = [
        'angry', 'disgusted', 'fearful', 'happy', 'sad', 'surprised', 'neutral'
    ]

    cap = cv2.VideoCapture('videos/6614059835535133954.mp4')
    # cap = cv2.VideoCapture('videos/6620049294030277893.mp4')
    # cap = cv2.VideoCapture('videos/6628669426822548741.mp4')
    feelings_faces = [
        cv2.imread('./emojis/' + emotion + '.png', -1)
        for index, emotion in enumerate(EMOTIONS)
    ]

    # append the list with the emoji images
    for index, emotion in enumerate(EMOTIONS):
        feelings_faces.append(cv2.imread('./emojis/' + emotion + '.png', -1))

    while True:
        # Again find haar cascade to draw bounding box around face
        ret, frame = cap.read()
        if not ret:
            break

        # compute softmax probabilities
        result = predict_frame(model, frame)
        # todo: try and fix prolly
        # if result is not None:
        #     # write the different emotions and have a bar to indicate probabilities for each class
        #     for index, emotion in enumerate(EMOTIONS):
        #         cv2.putText(frame, emotion, (10, index * 20 + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
        #         cv2.rectangle(frame, (130, index * 20 + 10), (130 + int(result[0][index] * 100), (index + 1) * 20 + 4),
        #                       (255, 0, 0), -1)
        #
        #     # find the emotion with maximum probability and display it
        #     maxindex = np.argmax(result[0])
        #     font = cv2.FONT_HERSHEY_SIMPLEX
        #     cv2.putText(frame, EMOTIONS[maxindex], (10, 360), font, 2, (255, 255, 255), 2, cv2.LINE_AA)
        #     face_image = feelings_faces[maxindex]
        #
        #     for c in range(0, 3):
        #         # The shape of face_image is (x,y,4). The fourth channel is 0 or 1. In most cases it is 0, so, we assign the roi to the emoji.
        #         # You could also do: frame[200:320,10:130,c] = frame[200:320, 10:130, c] * (1.0 - face_image[:, :, 3] / 255.0)
        #         frame[200:320, 10:130, c] = face_image[:, :, c] * (face_image[:, :, 3] / 255.0) + frame[200:320, 10:130,
        #                                                                                           c] * (
        #                                                 1.0 - face_image[:, :, 3] / 255.0)
        #
        # if len(faces) > 0:
        #     # draw box around face with maximum area
        #     max_area_face = faces[0]
        #     for face in faces:
        #         if face[2] * face[3] > max_area_face[2] * max_area_face[3]:
        #             max_area_face = face
        #     face = max_area_face
        #     (x, y, w, h) = max_area_face
        #     frame = cv2.rectangle(frame, (x, y - 50), (x + w, y + h + 10), (255, 0, 0), 2)

        cv2.imshow(
            'Video',
            cv2.resize(frame, None, fx=1, fy=1, interpolation=cv2.INTER_CUBIC))
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
Example #14
0
from EmoPy.src.fermodel import FERModel
from pkg_resources import resource_filename

def toCSVrow(filename,emotionLevels):
    # list comprehension modified from Method #3 of
    # https://www.geeksforgeeks.org/python-program-to-convert-a-list-to-string/
    listToStr = ','.join([str(elem) for elem in emotionLevels])
    return filename + "," + listToStr + "\n"


#target_emotions = ['calm', 'anger', 'happiness']
target_emotions = FERModel.POSSIBLE_EMOTIONS
model = FERModel(target_emotions, verbose=True)

faces = open("LIP_face_files.txt",'r')

predictionFile = open("FERmodel_LIP_predictions.csv",'w')

predictionFile.write(toCSVrow("filename",model.emotion_map.keys()))
# Read the first file
for file in faces.readlines():
#file = faces.readline()
#if True:
    fileNamePrefix = file.split('.')[0]
    
    fPath = "LIP_data/" + fileNamePrefix + ".jpg"
    print('Predicting on',fPath)
    prediction = model.predict(resource_filename('EmoPy.examples',fPath))
    predictionFile.write(toCSVrow(fPath,prediction[0]))

Example #15
0
 def __init__(self, lecture_instance, target_emotions):
     self.lecture_instance = lecture_instance
     self.target_emotions = target_emotions
     self.model = FERModel(target_emotions, verbose=False)
fontFace = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 1
thickness = 2

#Specify the camera which you want to use. The default argument is '0'
video_capture = cv2.VideoCapture(0)
#Capturing a smaller image fçor speed purposes
video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 360)
video_capture.set(cv2.CAP_PROP_FPS, 15)

#Can choose other target emotions from the emotion subset defined in fermodel.py in src directory. The function
# defined as `def _check_emotion_set_is_supported(self):`
target_emotions = ['calm', 'anger', 'happiness']
model = FERModel(target_emotions, verbose=True)

while True:
    #Capture frame-by-frame
    ret, frame = video_capture.read()
    #Save the captured frame on disk
    file = 'image_data/image.jpg'
    cv2.imwrite(file, frame)

    frameString = model.predict(file)

    #Display emotion
    retval, baseline = cv2.getTextSize(frameString, fontFace, fontScale,
                                       thickness)
    cv2.rectangle(frame, (0, 0), (20 + retval[0], 50), (0, 0, 0), -1)
    cv2.putText(frame, frameString, (10, 35), fontFace, fontScale,
from EmoPy.src.fermodel import FERModel
from pkg_resources import resource_filename
import cv2 as cv

# these are all the emotions but there are pretrained models only for subsets
#target_emotions = ['calm', 'anger', 'happiness', 'disgust', 'surprise', 'sadness', 'fear']

target_emotions = ['surprise', 'anger',
                   'fear']  #['calm', 'anger', 'happiness']
model = FERModel(target_emotions, verbose=True)

cam = cv.VideoCapture(0)

while True:
    retval, frame = cam.read()

    model.predict_frame(image=frame)

    cv.imshow('cam', frame)
    cv.waitKey(1) & 0xFF == ord('q')
    if cv.waitKey(1) & 0xFF == ord('q'):
        break
Example #18
0
#    'surprise', 'disgust', 'fear', 'sadness']
# target_emotions = ['anger', 'fear', 'surprise', 'calm']
# target_emotions = ["surprise", "sadness", "happiness", "anger"]
# target_emotions = ['angry', 'disgust', 'fear',
#                    'happy', 'sad', 'surprise', 'neutral']
# target_emotions = ['happiness', 'surprise', 'disgust']
# target_emotions = ['anger', 'fear', 'surprise']
# target_emotions = ['anger', 'calm', 'happiness']
# target_emotions = ['Anger', 'Disgust', 'Fear',
#                    'Happy', 'Sad', 'Surprise', 'Neutral']
target_emotions = ['sadness', 'disgust', 'calm',
                   'fear', 'happiness', 'anger', 'surprise']
# target_emotions = ['anger', 'happiness']
# target_emotions = ["anger", "happiness", "neutral", "sadness", "surprise"]

model = FERModel(target_emotions, verbose=True)

# model = load_model(
#     "./output/custom.h5", custom_objects={'SliceLayer': SliceLayer, 'ChannelShuffle': ChannelShuffle,
#                                           'PadZeros': PadZeros})

# model = model_from_json(
#     open("./output/model_4layer_2_2_pool.json", "r").read())
# model.load_weights('./output/model_4layer_2_2_pool.h5')

# model = load_model(
#     "./output/lightvgg2.h5")

count = 0
target_emotion = "anger"
Example #19
0
    'image_data/fear.png', 'image_data/happiness.png',
    'image_data/surprise.png'
]

target_emotions = ['surprise', 'anger', 'fear', 'calm']
target_emotions2 = ['surprise', 'disgust', 'happiness']
target_emotions3 = ['surprise', 'anger', 'fear']
target_emotions4 = ['anger', 'fear', 'calm']
target_emotions5 = ['anger', 'calm', 'happiness']
target_emotions6 = ['anger', 'fear', 'disgust']
target_emotions7 = ['disgust', 'surprise', 'calm']
target_emotions8 = ['surprise', 'disgust', 'sadness']
target_emotions9 = ['anger', 'happiness']

models = [
    FERModel(target_emotions, verbose=True),
    FERModel(target_emotions2, verbose=True),
    FERModel(target_emotions3, verbose=True),
    FERModel(target_emotions4, verbose=True),
    FERModel(target_emotions5, verbose=True),
    FERModel(target_emotions6, verbose=True),
    FERModel(target_emotions7, verbose=True),
    FERModel(target_emotions8, verbose=True),
    FERModel(target_emotions9, verbose=True),
]


def extract_data(filepath):
    print('Predicting on ' + filepath)

    #   format data
Example #20
0
class RosemoVideoServer:
    def __init__(self, labels, queue_size=10):
        self.labels = labels
        self.pub = rospy.Publisher('rosemo_pub/video', EmotionResult)
        self.camera = cv2.VideoCapture(0)
        self.camera.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
        self.camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
        self.camera.set(cv2.CAP_PROP_FPS, 24)
        self.path = '/rosemo/data/' + str(int(time()))
        os.mkdir(self.path)
        self.model = FERModel(self.labels, verbose=True)
        self.face_detector = cv2.CascadeClassifier(PATH)

    def capture_image(self, camera, file, face_file):
        face = None
        if camera.isOpened():
            ret = False
            print("Capturing image ...")

            counter = 0
            max_tries = 100
            while not ret and camera.isOpened():
                counter += 1
                if counter >= max_tries:
                    return None
                # Capture frame-by-frame
                ret, frame = camera.read()
            # Save the captured frame on disk
            frame = cv2.cvtColor(frame, code=cv2.COLOR_BGR2GRAY)
            cv2.imwrite(file, frame)
            # Detect the face and crop it
            faces = self.face_detector.detectMultiScale(frame, 1.25, 6)
            for f in faces:
                # Define the region in the image
                x, y, w, h = [v for v in f]
                face = frame[y:y + h, x:x + w]
                cv2.imwrite(face_file, face)
            print("Face written to: ", face_file)
        else:
            print("Cannot access the webcam")

        return face

    def get_emotion_from_camera(self):
        file_name = str(time())
        file = f'{self.path}/{file_name}.jpg'
        face_file = f'{self.path}/{file_name}_face.jpg'
        face = self.capture_image(self.camera, file, face_file)

        if face is not None:
            # Can choose other target emotions from the emotion subset defined in
            # fermodel.py in src directory. The function
            # defined as `def _check_emotion_set_is_supported(self):`
            result = self.model.predict(face_file)
            with open(f'{self.path}/{file_name}.txt', 'w') as f:
                f.write(f"{str(self.labels)}\n{str(result)}")
            return result
        else:
            print("Face could not be detected")
            return None

    def run(self):
        rospy.init_node('rosemo_video')
        rate = rospy.Rate(2)  # 2hz
        print("Ready: Rosemo Video Server")
        while not rospy.is_shutdown():
            result = self.get_emotion_from_camera()
            if result is not None:
                rospy.loginfo(f'Result: {result}')
                self.pub.publish(EmotionResult(self.labels, result))
            rate.sleep()
        self.camera.release()
from EmoPy.src.fermodel import FERModel
from EmoPy.src.ph import SmartLight
from pkg_resources import resource_filename
import cv2
import time
import random

target_emotions = ['fear', 'calm', 'anger', 'happiness', 'disgust', 'sadness', 'surprise']
model = FERModel(target_emotions, verbose=True)

fontFace = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 1
thickness = 2

video_capture = cv2.VideoCapture(0)
#Capturing a smaller image fçor speed purposes
video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 80)
video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 80)

while True:
    ret, frame = video_capture.read()
    #to_save = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
    cv2.imwrite(resource_filename('EmoPy.examples', '1.jpg'), frame)
    print('Predicting on image...')
    frameString = model.predict(resource_filename('EmoPy.examples', '1.jpg'))
    pYes a lot of love talking I took statistics benevolent countermeasures yeah did not a deep enough they going to OR sneezing division rightYeah me yeah exactly saw him ask I got a nice big didn't have his baseball as about away go to deep statisticsI'll get that class sounds impossible had as a business what was it really that is from collar on sentencesI'm almost a whole calculation children in the same classI'm a probably do like a voice a voice that is it what's the distaste 11 lumbar support money point think I'll leastFor any status of the estate within his that it is color Cody SquareYes I'll out of the Decidiste skate didn't session just had Okay on your vacationOkay by Dunaway? I got y'all standings worry on a cervix eight any anything bookkeeperHolly Holly get hereBut I even the deep blue Gold this anything here Essien wordYell work you go to barf on my bootyIs calculus on everything companies, KinoI love your mother of everything okay I mean I  did all the more modern science policies and statistics consisted six okay yes and also in on those images files done and yeah I think some people just want to program beyond slate so I harassed gallon Java file without calculus and all that stuff yet and it's always Jo Jo was good but is off all contributors especially hoping so because despite the specialty.'(1) & 0xFF == 27:
        break



Example #22
0
import logging

from EmoPy.src.fermodel import FERModel
from emotion_detector.const import target_emotions


model = FERModel(target_emotions)


def detect_emotion(image_path=''):
    try:
        return model.predict(image_path)
    except Exception as e:
        logging.error('Emotion detection failed! Error: {}'.format(e))
        return None

Example #23
0
from EmoPy.src.fermodel import FERModel
import os

target = ['anger', 'happiness']  # 怒り、嬉しい
model = FERModel(target, verbose=True)
file = "core/data/image.jpg"
assert os.path.isfile(file)

# https://github.com/thoughtworksarts/EmoPy


def use_gpu():
    """
    gpuを使用する場合はgpuの指定のためにコードを実行する必要があります。
    :return: None
    """
    print("USING GPU")
    from keras.backend.tensorflow_backend import set_session
    import tensorflow as tf

    config = tf.ConfigProto(gpu_options=tf.GPUOptions(
        visible_device_list="0",  # specify GPU number
        allow_growth=True))
    set_session(tf.Session(config=config))


def main():
    global file
    res, ps = model.predict(file)
    print(res, ps)
    return res, round(ps, 1)
Example #24
0
from EmoPy.src.fermodel import FERModel

from pkg_resources import resource_filename

import cv2

counter = 0

target_emotions = ["calm", "happiness", "anger"]
model = FERModel(target_emotions, verbose=True)
font = cv2.FONT_HERSHEY_SIMPLEX

cap = cv2.VideoCapture("./test5.mp4")

fps = cap.get(cv2.CAP_PROP_FPS)

fps = np.int32(fps)

print("Frames Per Second:", fps, "\n")

ret, frame = cap.read()

emotion = ""

while ret:
    cv2.imwrite("test.jpg", frame)
    if counter == 0:
        emotion = model.predict("test.jpg")
    if counter == (fps - 1):
        counter = 0
    cv2.putText(frame, emotion, (15, 15), font, 0.5, (255, 0, 0), 2,