示例#1
0
def make_player(canvas, engine="gstreamer"):
    def stop(player):
        player.file = ""
    
    def play(player, media):
        stop(player)
        player.file = join_path(DATA_DIR, media)
        player.play = True

    def replay(player):
        fname = player.file
        stop(player)
        play(player, fname)

    sfx_player = Emotion(canvas, module_filename=engine)
    bgm_player = Emotion(canvas, module_filename=engine)

    bgm_player.on_playback_finished_add(replay)

    add_hook('play:sfx',  play, sfx_player)
    add_hook('game:end',   stop, sfx_player)

    add_hook('play:bgm',  play, bgm_player)
    add_hook('game:over', stop, bgm_player)
    add_hook('game:end',   stop, bgm_player)
 def _load_emotions(self, wn_domains_dir):
     """Loads the hierarchy of emotions from the WordNet-Affect xml."""
     
     tree = ET.parse("{0}/a-hierarchy.xml".format(wn_domains_dir))
     root = tree.getroot()
     for elem in root.findall("categ"):
         name = elem.get("name")
         if name == "root":
             Emotion.emotions["root"] = Emotion("root")
         else:
             Emotion.emotions[name] = Emotion(name, elem.get("isa"))
示例#3
0
 def __init__(self, label, clf):
     self.frame = None
     self.thread1 = None
     self.thread2 = None
     self.stopEvent1 = None
     self.stopEvent2 = None
     self.started = None
     self.captured = None
     self.label = label
     self.vs = VideoStream().start()
     self.fa = Face()
     self.emotion = Emotion(clf)
     self.outputPath = None
示例#4
0
def main(path, friend, limit):
    print('Loading sentences...')
    sentences = load_sentences(path=path, friend=friend, limit=limit)

    print('Inferring emotions...')
    labels = Emotion().predict(sentences)

    print('Grouping sentences and labels...')
    emotion_dict = group_sentences_and_labels(sentences, labels)

    print('Creating wordclouds...')
    wordclouds = create_wordclouds(emotion_dict)

    print('Plotting wordclouds...')
    plot_wordclouds(wordclouds)
示例#5
0
 def compute_emotions(self, emo_labels, fps=4):
     #import video
     video = Video([], self.duration, path=self.front_video, name=self.ID, fps=fps)
     
     if video.successful_loading:
         #Detect and crop the face of the user
         video.face_detect(mode='crop', model='both')
         #Predict emotions with the CNN with the cropped face as input
         video.emotions_prediction()
         #7 predicitons outputed from the NN for each timestamp (4 fps) along the video 
         #contenated with the winning emotion for each timestamp
         preds7 = np.asarray(video.emotions.preds7)
         best_guess7 = np.asarray(np.expand_dims(video.emotions.best_guess7, axis=1))
         concat = np.concatenate((preds7, best_guess7), axis=1)
         df = pd.concat([pd.Series(x) for x in concat], axis=1).T
         #Create an emotion class for further processing 
         self.emotion = Emotion(df, emo_labels, fps=fps)
         return 1
     else:
         print('Front video of clip {} not found.' .format(self.ID))
         return 0
示例#6
0
class Capture:
    def __init__(self, label, clf):
        self.frame = None
        self.thread1 = None
        self.thread2 = None
        self.stopEvent1 = None
        self.stopEvent2 = None
        self.started = None
        self.captured = None
        self.label = label
        self.vs = VideoStream().start()
        self.fa = Face()
        self.emotion = Emotion(clf)
        self.outputPath = None

    def start(self):
        #setting started to true to set it is started in the train function.
        #Because None type cannot be set
        self.started = True

        #Erasing stopEvent value (if any) to loop in the function
        self.stopEvent1 = None

        #checking if train thred is started
        #if started close it
        if (self.captured):
            self.stopEvent2.set()

        #initialize the thread
        self.stopEvent1 = threading.Event()
        self.thread1 = threading.Thread(target=self.videoLoop, args=())
        self.thread1.start()

    def close(self):

        #initiallly checking if the threads have been started and then closing
        #if started
        if (self.started):
            self.stopEvent1.set()
        if (self.captured):
            self.stopEvent2.set()
        # self.vs.stop()

    def exit(self):
        if (self.started):
            self.stopEvent1.set()
            print "closing thread 1"
        if (self.captured):
            self.stopEvent2.set()
            print "closing thread 2"
        self.vs.stop()
        VideoStream().stop()

    def capture(self, emotion):
        #setting the trained variable to true for same reason as started
        self.captured = True
        # self.emo = emotion
        self.outputPath = "Dataset" + os.sep + emotion
        if os.path.exists(self.outputPath):
            pass
        else:
            os.mkdir(self.outputPath)

        #Setting StopEvent to None to loop
        self.stopEvent2 = None

        #stopping started thread if running
        if (self.started):
            self.stopEvent1.set()

        #initializing the train thred
        self.stopEvent2 = threading.Event()
        self.thread2 = threading.Thread(target=self.captureLoop, args=())
        self.thread2.start()

    def snapshot(self):
        ts = datetime.datetime.now()
        filename = "{}.jpg".format(ts.strftime("%Y-%m-%d_%H-%M-%S"))
        output = os.path.sep.join((self.outputPath, filename))
        cv2.imwrite(output, self.frame.copy())
        self.showInfo()

    def showInfo(self):
        info.show()

    def videoLoop(self):
        try:
            #loop until the stopEvent is not set
            while not self.stopEvent1.is_set():
                self.frame = self.vs.read()

                gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
                image = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)

                faces = self.fa.detect(gray)

                image = self.emotion.getLandmarks(image, faces)

                image = Image.fromarray(image)

                image = ImageTk.PhotoImage(image)

                self.label.configure(image=image)
                self.label.image = image

        except RuntimeError, e:
            print "Error"
示例#7
0
print('Negative_feedback Count : {}'.format(len(negative_feedbacks)))
print(negative_feedbacks)
print()
print()
print()
print()

# emotion
wna = WNAffect('emotion_analyzer/wordnet-1.6/', 'emotion_analyzer/wn-domains-3.2/')

# part-of-speech tag
for f in feedbacks:
    ws = f.split(" ")
    for w in ws:
        word = w
        tokens = nltk.word_tokenize(word)
        post_tag = nltk.pos_tag(tokens=tokens)
        print(word)
        print(post_tag[0][1])

        emo = wna.get_emotion(str(word), str(post_tag[0][1]))
        print("Emotion for *{}*: {}".format(word, emo))

        if emo:
            parent = emo.get_level(emo.level - 1)
            print("Parent: {}".format(parent))

            e = Emotion.emotions[str(parent)]
            # print(e.nb_children())
            Emotion.printTree(e)
示例#8
0
#!/usr/bin/env python

# Playing a radio stream with EFL (Emotion)

import ecore
import ecore.evas
from emotion import Emotion

DSIZE = 320, 240
FILENAME = 'http://relay5.slayradio.org:8000/'

ee = ecore.evas.new()
ee.size = DSIZE
ee.show()

canvas = ee.evas
em = Emotion(canvas, module_filename='generic')
em.file = FILENAME
em.size = DSIZE
em.play = True
em.show()

if __name__ == '__main__':
    m = ecore.MainLoop()
    m.begin()