Ejemplo n.º 1
0
def make_player(canvas, engine="gstreamer"):
    def stop(player):
        player.file = ""
    
    def play(player, media):
        stop(player)
        player.file = join_path(DATA_DIR, media)
        player.play = True

    def replay(player):
        fname = player.file
        stop(player)
        play(player, fname)

    sfx_player = Emotion(canvas, module_filename=engine)
    bgm_player = Emotion(canvas, module_filename=engine)

    bgm_player.on_playback_finished_add(replay)

    add_hook('play:sfx',  play, sfx_player)
    add_hook('game:end',   stop, sfx_player)

    add_hook('play:bgm',  play, bgm_player)
    add_hook('game:over', stop, bgm_player)
    add_hook('game:end',   stop, bgm_player)
 def _load_emotions(self, wn_domains_dir):
     """Loads the hierarchy of emotions from the WordNet-Affect xml."""
     
     tree = ET.parse("{0}/a-hierarchy.xml".format(wn_domains_dir))
     root = tree.getroot()
     for elem in root.findall("categ"):
         name = elem.get("name")
         if name == "root":
             Emotion.emotions["root"] = Emotion("root")
         else:
             Emotion.emotions[name] = Emotion(name, elem.get("isa"))
Ejemplo n.º 3
0
 def __init__(self, label, clf):
     self.frame = None
     self.thread1 = None
     self.thread2 = None
     self.stopEvent1 = None
     self.stopEvent2 = None
     self.started = None
     self.captured = None
     self.label = label
     self.vs = VideoStream().start()
     self.fa = Face()
     self.emotion = Emotion(clf)
     self.outputPath = None
Ejemplo n.º 4
0
def main(path, friend, limit):
    print('Loading sentences...')
    sentences = load_sentences(path=path, friend=friend, limit=limit)

    print('Inferring emotions...')
    labels = Emotion().predict(sentences)

    print('Grouping sentences and labels...')
    emotion_dict = group_sentences_and_labels(sentences, labels)

    print('Creating wordclouds...')
    wordclouds = create_wordclouds(emotion_dict)

    print('Plotting wordclouds...')
    plot_wordclouds(wordclouds)
Ejemplo n.º 5
0
 def compute_emotions(self, emo_labels, fps=4):
     #import video
     video = Video([], self.duration, path=self.front_video, name=self.ID, fps=fps)
     
     if video.successful_loading:
         #Detect and crop the face of the user
         video.face_detect(mode='crop', model='both')
         #Predict emotions with the CNN with the cropped face as input
         video.emotions_prediction()
         #7 predicitons outputed from the NN for each timestamp (4 fps) along the video 
         #contenated with the winning emotion for each timestamp
         preds7 = np.asarray(video.emotions.preds7)
         best_guess7 = np.asarray(np.expand_dims(video.emotions.best_guess7, axis=1))
         concat = np.concatenate((preds7, best_guess7), axis=1)
         df = pd.concat([pd.Series(x) for x in concat], axis=1).T
         #Create an emotion class for further processing 
         self.emotion = Emotion(df, emo_labels, fps=fps)
         return 1
     else:
         print('Front video of clip {} not found.' .format(self.ID))
         return 0
Ejemplo n.º 6
0
#!/usr/bin/env python

# Playing a radio stream with EFL (Emotion)

import ecore
import ecore.evas
from emotion import Emotion

DSIZE = 320, 240
FILENAME = 'http://relay5.slayradio.org:8000/'

ee = ecore.evas.new()
ee.size = DSIZE
ee.show()

canvas = ee.evas
em = Emotion(canvas, module_filename='generic')
em.file = FILENAME
em.size = DSIZE
em.play = True
em.show()

if __name__ == '__main__':
    m = ecore.MainLoop()
    m.begin()