Esempio n. 1
0
def video():
    cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
    cap.isOpened()

    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640, 480))

    while (True):
        ret, frame = cap.read()

        if ret == True:
            # frame = cv2.flip(frame, 0)
            # out.write(frame)
            # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            detector = FER()
            print(detector.top_emotion(frame))

            # cv2.imshow('frame', frame)

            if 0xFF == ord('q'):
                break
        else:
            break

    cap.release()
    out.release()
    cv2.destroyAllWindows()
Esempio n. 2
0
def upload():
    target = os.path.join(APP_ROOT, 'images/')
    print(target)
    if not os.path.isdir(target):
        os.mkdir(target)
    else:
        print("Couldn't create upload directory: {}".format(target))
        #print(request.files.getlist("file"))
    for upload in request.files.getlist("file"):
        print(upload)
        #print("{} is the file name".format(upload.filename))

        filename = upload.filename
        destination = "/".join([target, filename])
        upload.save(destination)

    folder = 'images'
    ex = folder + '/' + filename
    img = plt.imread(ex)
    detector = FER(mtcnn=True)
    emotion, score = detector.top_emotion(img)
    ans = a[emotion]

    return render_template("complete_display_image.html",
                           image_name=ex,
                           text=ans)
def run_FER(path):
    img = cv2.imread(path)
    image_name = os.path.basename(path)
    detector = FER()
    emotion_label = detector.detect_emotions(img)
    
    return {image_name : emotion_label}
Esempio n. 4
0
    def run(self):
        # Capture from Webcam
        width = 400  # Width of the captured video frame, units in pixels
        height = 300  # Height of the captured video frame, units in pixels
        video_capture_device = cv2.VideoCapture(0)
        video_capture_device.set(cv2.CAP_PROP_FRAME_WIDTH, width)
        video_capture_device.set(cv2.CAP_PROP_FRAME_HEIGHT, height)

        fps = 0  # initialize fps counter to 0
        detector = FER()  # initialize Facial Expression Recognition

        while True:
            startTime = time.time(
            )  # Get the start time for the fps calculation

            # I averaged about 20 fps, so 30 frames would allow for a time difference of a little more than a second
            frameCounter = 30  # Let the for loop count to this number before calculating new fps

            for i in range(
                    0, frameCounter
            ):  # for loop to allow a time difference to calculate fps
                if self.isInterruptionRequested():
                    video_capture_device.release()
                    return
                else:
                    ret, frame = video_capture_device.read()
                    if ret:
                        self.new_frame_signal.emit(frame)

                        # When no face is detected, the emotion [] array is empty, so when detector.top_emotion() is called,
                        # an IndexError is thrown. The try: will execute code when a face is detected, and therefore no IndexError.
                        # except IndexError as error: this code will execute when no face is detected, and the IndexError is thrown.
                        try:
                            emotion, score = detector.top_emotion(
                                frame
                            )  # get the top emotion and score from the video frame
                            UI.emotionMagLabel.setText(
                                "Score: " + str(score)
                            )  # Output the magnitude of emotion to GUI
                            UI.emotionTypeLabel.setText(
                                "Emotion: " +
                                emotion)  # Output the type of emotion to GUI

                        except IndexError as error:  # no face is detected
                            UI.emotionMagLabel.setText(
                                "Score N/A "
                            )  # Magnitude of emotion is unavailabe since no face is detected
                            UI.emotionTypeLabel.setText(
                                "Emotion N/A"
                            )  # Type of emotion is unavailabe since no face is detected

                        UI.outputFPS.setText(
                            "Frames Per Second: " +
                            str(fps))  # Output the current fps

            stopTime = time.time()  # Get the stop time for the fps calculation
            fps = round(
                frameCounter / float(stopTime - startTime), 3
            )  # calculate the current fps, and round the answer to 3 decimal places
Esempio n. 5
0
 def get_mood(self, *args):
     img = plt.imread("selfie1.jpg")
     detector = FER(mtcnn=True)
     valence = self.get_valence(detector, img)
     energy = self.get_energy(detector, img)
     print(detector.detect_emotions(img))
     print(valence, energy)
     self.create_playlist(valence, energy)
def getEmotions(img):
    detector = FER(mtcnn=True)
    result = detector.detect_emotions(img)
    data = result[0]['emotions']
    if data is None:
        st.write('No result')
        return False
    else:
        return data
Esempio n. 7
0
def emotions():
    img = plt.imread("temp/temp.jpg")
    detector = FER(mtcnn=True)
    key_value = detector.detect_emotions(img)[0]['emotions']
    emo = {}
    sorted_keys = sorted(key_value, key=key_value.get, reverse=True)
    for w in sorted_keys:
        emo[w] = key_value[w]
    emotions = list(emo.keys())[0:3]
    return {"emo": emotions}
class EmotionDetection():
    def __init__(self):

        self.detector = FER()

    def detect(self):
        self.img = cv2.imread("image/image.jpg")
        self.detector.detect_emotions(self.img)
        emotion, score = self.detector.top_emotion(self.img)
        print("The emotion of the person:", emotion)
Esempio n. 9
0
    def run(self):
        """Catures Webcam Frame & Processes Emotions"""
        # Capture from Webcam
        width = 320
        height = 240
        video_capture_device = cv2.VideoCapture(0)
        video_capture_device.set(cv2.CAP_PROP_FRAME_WIDTH, width)
        video_capture_device.set(cv2.CAP_PROP_FRAME_HEIGHT, height)

        # Create facial expression recognition object
        detector = FER()

        # FPS Variables
        sampled_frames = 0
        start_time = time.time()

        while True:
            if self.isInterruptionRequested():
                video_capture_device.release()
                return
            else:
                # Calculate FPS
                current_time = time.time()
                if current_time >= start_time + 1:
                    self.FPS = sampled_frames
                    sampled_frames = 0
                    start_time = time.time()
                else:
                    sampled_frames += 1

                # Capture current frame
                ret, frame = video_capture_device.read()

                # Get result of FER
                result = detector.detect_emotions(frame)
                if result:
                    self.curr_emotion = 'neutral'
                    emotion_score = 0.5
                    for idx, (emotion, score) in enumerate(
                            result[0]['emotions'].items()):
                        # Update Top Emotion in Current Frame
                        if score > emotion_score and score > 0.6:
                            self.curr_emotion = emotion
                            emotion_score = score

                        # Store All Emotions from Current Frame
                        if UIThread.Get_State() == 'Running':
                            self.analyzed_frames += 1
                            self.emotions[idx] += score
                    if UIThread.Get_State() == 'Running':
                        self.analyzed_frames += 1

                if ret:
                    self.new_frame_signal.emit(frame)
Esempio n. 10
0
def emotion(impath):
    try:
        print(impath)
        image = cv2.imread(impath)
        detector = FER()
        images = detector.detect_emotions(image)
        if len(images) == 0 or images == None:
            return None
        emot = (str(detector.top_emotion(image)[0]))
        return emot
    except IndexError:
        return None
Esempio n. 11
0
 def getUserEmotion(self):
     # detector for facial emotion
     detector = FER(mtcnn=True)
     cap = cv2.VideoCapture(0)
     ret, frame = cap.read()
     cv2.imwrite('temp.jpeg', frame)
     img = plt.imread('temp.jpeg')
     res = detector.detect_emotions(img)
     os.remove('temp.jpeg')
     if len(res)==0:
         logger.info("No face detected")
         return 'neutral'
     res_emotion = res[0]['emotions']
     return max(res_emotion, key=res_emotion.get)
def main():
	recognizer = FER()
	a = 0

	print "Welcome to Facial Expression Recognizer!"

	while True:
		print "[1] Train"
		print "[2] Test Dataset"
		print "[3] Classify Image"

		a = int(raw_input("You want to:"))
		if a is 1:#to train
			filename = "fer2013.csv"#raw_input("Enter the name (relative to resources folder) of the csv file:")
			training_set = load_train_set(filename)
			#format of each element in each list is (emotion,pixel_list)
			train = recognizer.train(training_set) 
			if train[0]:
				print "Training successful!"
				print "Time:",train[1]
			else:
				print "Training failed. Program will terminate."
			break
		elif a is 2: #to test
			filename = "fer2013.csv" #raw_input("Enter the name (relative to resources folder) of the csv file:")
			test_set = load_test_set(filename)
			weights = load_weights()
			test = recognizer.test(test_set,weights)

			if test[1]:
				print "Testing complete!"
				print "Accuracy:",test[0]
				print "Time:",test[2]
			else:
				print "Testing failed!"
			break			
		elif a is 3: #to classify
			for i in range(len(expressions)):
				print "["+str(i)+"]",expressions[i]
			img_path = raw_input("Please enter the name of the image:")
			image_pixel = load_image(img_path)
			weights = load_weights()
			result = recognizer.predict(image_pixel,weights)

			print "Your image:",img_path,"is:"
			print "["+str(result)+"]",expressions[result]
			break
		else:
			print "Please enter a correct input."
Esempio n. 13
0
    def infer(self):
        camera = Camera()
        img = camera.get_image_2()
        cv2.imwrite("test.jpeg", img)
        detector = FER()
        result = detector.detect_emotions(img)

        if not result:
            return False, None
        else:
            # only looking at the first face found, check largest face in ideal world
            happy = result[0]['emotions']['happy']
            sad = result[0]['emotions']['sad']
            overall_happiness = (1.0 + (happy - sad)) / 2.0
            return True, overall_happiness
    def __init__(self, simspark_ip='localhost',
                 simspark_port=3100,
                 teamname='DAInamite',
                 player_id=0,
                 sync_mode=True):
        super(EmoBotAgent, self).__init__(simspark_ip, simspark_port, teamname, player_id, sync_mode)
        
        self.isAngry = False
        self.cap = cv2.VideoCapture(0)

        # Check if the webcam is opened correctly
        if not self.cap.isOpened():
            raise IOError("Cannot open webcam")

        self.detector = FER()
Esempio n. 15
0
    def findFace(self, i):
        flag = False

        # stworzenie katalogu dla zdjec (jesli nie istnieje (podana sciezka w loginInfo.py))
        if not os.path.exists(savePicturesDirectory):
            os.makedirs(savePicturesDirectory)

        # zrobienie screenshota
        self.driver.get_screenshot_as_file('screenshot' + str(i) + '.png')
        img.append(cv2.imread('screenshot' + str(i) + '.png'))

        face_cascade = cv2.CascadeClassifier(faceLibraryPath)

        #wycinamy część zdjęcia, żeby nie szukać twarzy na zdjęciach już sparowanych
        crop_img.append(img[i][100:650, 1000:1300])
        # zapisanie przycietego
        cv2.imwrite("cropp" + str(i) + ".png", crop_img[i])
        # szukanie twarzy
        img1.append(cv2.imread('cropp' + str(i) + '.png'))
        photoGray.append(cv2.cvtColor(img1[i], cv2.COLOR_BGR2GRAY))

        faces.append(
            face_cascade.detectMultiScale(photoGray[i],
                                          scaleFactor=1.1,
                                          minNeighbors=5,
                                          minSize=(30, 30),
                                          flags=cv2.CASCADE_SCALE_IMAGE))

        x = len(faces[i])
        if x == 0:
            print("no face detected")
            #jeśli nie znajdziemy twarzy na zdjęciu, zapisujemy je w folderze
            cv2.imwrite(savePicturesDirectory + str(i) + ".png", img1[i])
        else:
            # jesli znaleziono twarz to szukamy emocji
            detector = FER()
            emotion, score = detector.top_emotion(img1[i])
            print(emotion, score)

            if (emotion == 'angry' or emotion == 'sad') and score > 0.9:
                print('too angry for me')
                cv2.imwrite(savePicturesDirectory + str(i) + ".png", img1[i])

            else:
                print('Perfect')
                flag = True
        return flag
Esempio n. 16
0
def camstream(camera):
    camera.init()
    DEVICE = 0
    SIZE = (640, 480)
    FILENAME = 'capture.png'
    display = pygame.display.set_mode(SIZE, 0)
    camera = pygame.camera.Camera(DEVICE, SIZE)
    camera.start()
    screen = pygame.surface.Surface(SIZE, 0, display)
    capture = True
    while capture:
        # photo = camera.get_image(screen)
        display.blit(screen, (0, 0))
        pygame.display.flip()
        for event in pygame.event.get():
            if event.type == QUIT:
                capture = False
            elif event.type == KEYDOWN and event.key == K_s:
                photo = camera.get_image(screen)
                pygame.image.save(
                    photo,
                    "E://Projects//Pygame experiment//images_without_bg//img.jpg"
                )
                img = plt.imread(
                    "E://Projects//Pygame experiment//images_without_bg//img.jpg"
                )

                detector = FER()

                print("Started processing..")
                try:
                    print(detector.top_emotion(img))
                    print("Ended Processing result :)")
                except:
                    print("Couldn't generate emotion analysis :(")
                # plt.imshow(img)

                # image = pygame.image.save(screen, FILENAME)
                # pygame.image.save(photo, "E://Projects//Pygame experiment//images_without_bg//img.jpg")
                # img = plt.imread("E://Projects//Pygame experiment//images_without_bg//img.jpg")
                # detector = FER(mtcnn=True)
                # print(detector.detect_emotions("E://Projects//Pygame experiment//images_without_bg//img.jpg"))
                # plt.imshow(img)

    camera.stop()
    # pygame.quit()
    return
Esempio n. 17
0
    def infer_video(self):
        self.cam = cv2.VideoCapture(0)
        font = cv2.FONT_HERSHEY_SIMPLEX
        detector = FER()
        i = 0
        person = PersonAPI(0, 0)
        avg_happines = 1
        while True:
            i += 1
            ret, frame = self.cam.read()
            if not ret:
                break

            result = detector.detect_emotions(frame)
            if len(result) > 0:
                print(result)
                happy = result[0]['emotions']['happy']
                sad = result[0]['emotions']['sad']
                x, y, w, h = result[0]["box"]

                overall_happiness = (1.0 + (happy - sad)) / 2.0
                cv2.rectangle(frame, (x, y), (x + w, y + h), (200, 0, 0), 2)
            else:
                overall_happiness = 0.5

            cv2.putText(frame, str(overall_happiness), (0, 50), font, 1,
                        (0, 255, 0), 1, cv2.LINE_AA)
            avg_happines = avg_happines * 0.9 + overall_happiness * 0.1
            print("average happines {}".format(avg_happines))
            if i == 20:
                i = 0
                if avg_happines > 0.6:
                    if person.STATE:
                        continue
                    else:
                        play_happy()
                    person.STATE = 1
                elif avg_happines < 0.5:
                    if person.STATE:
                        play_sad()
                    else:
                        continue
                    person.STATE = 0

            cv2.imshow("frame", frame)
            cv2.waitKey(40)
Esempio n. 18
0
    def post(self, request, format=None):

        serializer = ImageUploadSerializer(data=request.data)
        if serializer.is_valid():
            try:
                image = serializer.validated_data['image'].file.read()
                npimg = np.fromstring(image, dtype=np.uint8)
                img = cv2.imdecode(npimg, 1)
                detector = FER()
                emotion = detector.top_emotion(img)[0]
                res = Image.objects.all().filter(
                    emotion=emotion.upper())[0].link
                return Response(res, status=status.HTTP_200_OK)
            except IndexError:
                return Response(status=status.HTTP_400_BAD_REQUEST)
        return Response(serializer.errors,
                        status=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE)
Esempio n. 19
0
 def post(self, request, *args, **kwargs):
     serializer = FaceSerializer(data=request.data)
     if serializer.is_valid():
         print("Valid photo")
         # Save photo to file
         serializer.save()
         # Get emotions
         image_file = os.curdir + serializer.data["image"]
         image = plt.imread(image_file)
         detector = FER(mtcnn=True)
         response_data = detector.detect_emotions(image)[0]
         # Remove photo
         os.remove(image_file)
         return Response(response_data, status=status.HTTP_201_CREATED)
     else:
         print("Invalid photo", serializer.errors)
         return Response(serializer.errors,
                         status=status.HTTP_400_BAD_REQUEST)
Esempio n. 20
0
def detect_emotions(image_file):

    # clear_directory()

    img = cv2.imread(settings.MEDIA_ROOT + str(image_file))
    detector = FER(mtcnn=True)
    result = detector.detect_emotions(img)

    for i in range(len(result)):
        box = result[i]['box']
        x, y, h, w = box[0], box[1], box[2], box[3]
        emotion = max(result[0]['emotions'].items(),
                      key=operator.itemgetter(1))[0]
        if img.shape[0] < 500 or img.shape[1] < 500:
            cv2.rectangle(img, (x, y), (x + h, y + w), (0, 0, 0), 2)
            cv2.rectangle(img, (x, y), (x + h, y + w), (255, 255, 255), 1)
            cv2.putText(img,
                        emotion, (x, y - 5),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        0.5, (0, 0, 0),
                        thickness=2)
            cv2.putText(img,
                        emotion, (x, y - 5),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        0.5, (255, 255, 255),
                        thickness=1)
        else:
            cv2.rectangle(img, (x, y), (x + h, y + w), (0, 0, 0), 5)
            cv2.rectangle(img, (x, y), (x + h, y + w), (255, 255, 255), 3)
            cv2.putText(img,
                        emotion, (x, y - 5),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        3, (0, 0, 0),
                        thickness=5)
            cv2.putText(img,
                        emotion, (x, y - 5),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        3, (255, 255, 255),
                        thickness=3)

    path = settings.MEDIA_ROOT + str(image_file)
    cv2.imwrite(path, img)
Esempio n. 21
0
 def run(self):
     global emotions, taskQueue, thresh
     print("start")
     x = len(taskQueue)
     taskQueue.append(x)
     try:
         detector = FER(mtcnn=True)
         print("DETECT")
         res = detector.detect_emotions(self.img_cv)
         print(res)
         if len(res):
             res = res[0]['emotions']
             emos = [e for e in res if res[e] > thresh]
             emotions.extend(emos)
     except InvalidImage:
         print("Invalid Image")
     except IndexError:
         print("Index Error")
     taskQueue.remove(x)
     print("done")
     return
Esempio n. 22
0
def detect_emotion(image_as_txt, feeling=None):

    # On récupère l'image en base 64 (on enlève le préfixe qui définit le format de l'image). On la transforme en bytes
    imgdata = base64.b64decode(image_as_txt[23:])

    img = Image.open(
        BytesIO(imgdata))  #On crée une image PIL depuis l'image en bytes
    image = np.asarray(
        img
    )  # On transforme l'image PIL en un array numpy afin de préparer l'info à être donnée au réseau de neurones.

    detector = FER()  # On instancie le détecteur
    result = detector.detect_emotions(image)

    # Passage nécessaire de parsing des float en str afin de rendre le Dict sérialisable en JSON pour l'insérer dans les
    # requêtes SQL
    jsonable_result = {}
    for key, value in result[0]['emotions'].items():
        jsonable_result[key] = str(value)

    return jsonable_result
Esempio n. 23
0
def detect_motion(frameCount):
    # grab global references to the video stream, output frame, and
    # lock variables
    global vs, outputFrame, lock
    while True:
        # read the next frame from the video stream, resize it,
        # convert the frame to grayscale, and blur it
        frame = vs.read()
        frame = imutils.resize(frame, 600, 800)

        # grab the current timestamp and draw it on the frame
        timestamp = datetime.datetime.now()
        detector = FER()
        try:
            emotion, score = detector.top_emotion(frame)
        except:
            emotion, score = "finding...", 0
        cv2.putText(frame, emotion, (10, frame.shape[0] - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        with lock:
            outputFrame = frame.copy()
Esempio n. 24
0
def non_thread_emotion(img, buf, emots):
    with urllib.request.urlopen(img) as response:
        data = response.read()
    if os.path.exists("static/imgs/buffer" + str(buf) + ".png"):
        os.remove("static/imgs/buffer" + str(buf) + ".png")
    with open("static/imgs/buffer" + str(buf) + ".png", "wb+") as f:
        f.write(data)
    img_cv = cv2.imread("static/imgs/buffer" + str(buf) + ".png")
    try:
        detector = FER(mtcnn=True)
        print("DETECT")
        res = detector.detect_emotions(img_cv)
        print(res)
        if len(res):
            res = res[0]['emotions']
            emos = [e for e in res if res[e] > thresh]
            emots.extend(emos)
            # return emos
    except InvalidImage:
        print("Invalid Image")
    except IndexError:
        print("Index Error")
Esempio n. 25
0
def hello_world():
    # Fungsi ini tujuan untuk menampilkan emosi dari gambar
    # Tanpa menyimpan file tersebut ke dalam penyimpanan
    top = request.form.get('top')

    # Cek jika form data terdapat image
    if 'image' not in request.files:
        return {'message': 'No File Part'}
    image = request.files['image']

    # Cek jika image memiliki nama
    if image.filename == '':
        return {'message': 'No Selected File'}

    # Cek jika image ada dan tipe file dibolehkan
    if image and allowed_file(image.filename):
        # Buat gambar menjadi string
        image_string = image.read()
        #  Ubah ke dalam numpy
        np_img = numpy.fromstring(image_string, numpy.uint8)
        # Simpan ke dalam cv2
        img = cv2.imdecode(np_img, cv2.IMREAD_UNCHANGED)

        # Siapkan pendeteksi
        detector = FER()

        # Top = 1 jika ingin menampilkan emosi tertinggi
        if top == '1':
            emotion, score = detector.top_emotion(img)
            data = {'emotion': emotion, 'score': float(score)}
        # Top = 0 jika ingin menampilkan peluang beberapa emosi
        else:
            result = detector.detect_emotions(img)
            data = pd.DataFrame(result).to_json()

        return data

    return 'Hello, World!'
Esempio n. 26
0
def emotions_face_video(myvideo):
    
    ''' Functions to return dictonary of bounding
    boxes for faces, emotions and scores'''
    
    clip = VideoFileClip(myvideo)
    duration = clip.duration                         
    vidcap = cv2.VideoCapture(myvideo)                      # VideCapture from cv2
    i = 0                                                   # initiate the variable for loop, will run for number of frames/images
    d = []                                                  # dictionary to capture the input of each image
    sec = 0                                                 # Variable to capture frame at particular time in the video
    frameRate = 1.0                                         # frameRate, to alter the time at which the frame is captured
    while i < abs((duration/frameRate) + 1):                # Numebr of frames based on duration and frameRate
            sec = sec + frameRate
            vidcap.set(cv2.CAP_PROP_POS_MSEC, sec*1000)     #Capturing video at particular intervals
            ret, image = vidcap.read() 
            if ret:                                         # If it has a frame
                    cv2.imwrite("image.jpg", image)         # saving image
                    img = plt.imread("image.jpg")           # reading image
                    detector = FER()                        # Calling fer for using already trained model
                    d = d + detector.detect_emotions(img)   # dictionary to store output of each image
            i = i + 1                                       # incrementing Loop
    return d
Esempio n. 27
0
def vid_proc(path):
    video_filename = path
    video = Video(video_filename)

    detector = FER(mtcnn=True)

    raw_data = video.analyze(detector, frequency=5, save_frames=False, save_video=False)

    for x in raw_data:
        x.pop('box0')
        key_max = max(x, key=x.get)
        x['result'] = key_max

    return json.dumps(raw_data)
Esempio n. 28
0
    def __init__(self):

        # Global variables and structure to support decisions
        self.algorithmMap = {
            "Detection": {
                "target_analysis_function": self.detectFaces,
                "target_function": self.faceDetection
            },
            "Expression": {
                "target_analysis_function": self.findExpressions,
                "target_function": self.facialExpression
            },
            "Recognition": {
                "target_analysis_function": self.recognizePeople,
                "target_function": self.faceRecognition
            }
        }

        # User could modify the following variables
        self.useCnn = True  # boolean -> MTCNN network or OpenCV's Haar Cascade classifier
        self.FPS = 100  # FramePerSecond: any number -> default is 50
        self.RESIZE_FRAME = 1  # Resize frame to improve speed
        self.width_limit = 700  # Max image width to display it on the screen. If it's bigger, it will be resized
        self.height_limit = 444  # Max image height to display it on the screen. If it's bigger, it will be resized
        self.threshold = 0.6  # Max distance (in range[0-1]) to be recognized from algorithm

        #init some variables
        self.fx = self.fy = 1 / self.RESIZE_FRAME
        self.result = []

        #init FER model
        self.detector = FER(mtcnn=self.useCnn)

        self.db_encodings = []
        self.db_names = []

        return
Esempio n. 29
0
def getMoviethroughEmotion():
    global clicked

    def back(*args):
        clicked = True

    cap = cv2.VideoCapture(0)

    def mouse_click(event, x, y, flags, param):
        global clicked
        if event == cv2.EVENT_LBUTTONDOWN:
            print("clicked")
            clicked = True

    clicked = False
    while True:
        ret, image = cap.read()

        if not ret:
            break

        cv2.imshow("image", image)
        cv2.setMouseCallback("image", mouse_click)

        if clicked:
            break

        k = cv2.waitKey(1)
        if k == ord("q"):
            break

    detector = FER()
    emotion = detector.top_emotion(image)
    print(emotion)
    cv2.destroyAllWindows()
    cap.release()
    return emotion
Esempio n. 30
0
    def test_video(self):
        detector = FER()
        video = Video("tests/woman2.mp4")

        raw_data = video.analyze(detector, display=False)
        assert isinstance(raw_data, list)

        # Convert to pandas for analysis
        df = video.to_pandas(raw_data)
        assert isinstance(df, pd.DataFrame)
        assert 'angry' in df
        df = video.get_first_face(df)
        assert isinstance(df, pd.DataFrame)
        df = video.get_emotions(df)
        assert isinstance(df, pd.DataFrame)
Esempio n. 31
0
    def test_video(self):
        detector = FER()
        video = Video("tests/woman2.mp4")

        raw_data = video.analyze(detector, display=False)
        assert isinstance(raw_data, list)

        # Convert to pandas for analysis
        df = video.to_pandas(raw_data)
        assert sum(df.neutral[:5] > 0.5) == 5, f"Expected neutral > 0.5, got {df.neutral[:5]}"
        assert isinstance(df, pd.DataFrame)
        assert "angry" in df
        df = video.get_first_face(df)
        assert isinstance(df, pd.DataFrame)
        df = video.get_emotions(df)
        assert isinstance(df, pd.DataFrame)