class EmotionDetection():
    def __init__(self):

        self.detector = FER()

    def detect(self):
        self.img = cv2.imread("image/image.jpg")
        self.detector.detect_emotions(self.img)
        emotion, score = self.detector.top_emotion(self.img)
        print("The emotion of the person:", emotion)
def run_FER(path):
    img = cv2.imread(path)
    image_name = os.path.basename(path)
    detector = FER()
    emotion_label = detector.detect_emotions(img)
    
    return {image_name : emotion_label}
Exemplo n.º 3
0
 def get_mood(self, *args):
     img = plt.imread("selfie1.jpg")
     detector = FER(mtcnn=True)
     valence = self.get_valence(detector, img)
     energy = self.get_energy(detector, img)
     print(detector.detect_emotions(img))
     print(valence, energy)
     self.create_playlist(valence, energy)
def getEmotions(img):
    detector = FER(mtcnn=True)
    result = detector.detect_emotions(img)
    data = result[0]['emotions']
    if data is None:
        st.write('No result')
        return False
    else:
        return data
Exemplo n.º 5
0
def emotions():
    img = plt.imread("temp/temp.jpg")
    detector = FER(mtcnn=True)
    key_value = detector.detect_emotions(img)[0]['emotions']
    emo = {}
    sorted_keys = sorted(key_value, key=key_value.get, reverse=True)
    for w in sorted_keys:
        emo[w] = key_value[w]
    emotions = list(emo.keys())[0:3]
    return {"emo": emotions}
Exemplo n.º 6
0
    def run(self):
        """Catures Webcam Frame & Processes Emotions"""
        # Capture from Webcam
        width = 320
        height = 240
        video_capture_device = cv2.VideoCapture(0)
        video_capture_device.set(cv2.CAP_PROP_FRAME_WIDTH, width)
        video_capture_device.set(cv2.CAP_PROP_FRAME_HEIGHT, height)

        # Create facial expression recognition object
        detector = FER()

        # FPS Variables
        sampled_frames = 0
        start_time = time.time()

        while True:
            if self.isInterruptionRequested():
                video_capture_device.release()
                return
            else:
                # Calculate FPS
                current_time = time.time()
                if current_time >= start_time + 1:
                    self.FPS = sampled_frames
                    sampled_frames = 0
                    start_time = time.time()
                else:
                    sampled_frames += 1

                # Capture current frame
                ret, frame = video_capture_device.read()

                # Get result of FER
                result = detector.detect_emotions(frame)
                if result:
                    self.curr_emotion = 'neutral'
                    emotion_score = 0.5
                    for idx, (emotion, score) in enumerate(
                            result[0]['emotions'].items()):
                        # Update Top Emotion in Current Frame
                        if score > emotion_score and score > 0.6:
                            self.curr_emotion = emotion
                            emotion_score = score

                        # Store All Emotions from Current Frame
                        if UIThread.Get_State() == 'Running':
                            self.analyzed_frames += 1
                            self.emotions[idx] += score
                    if UIThread.Get_State() == 'Running':
                        self.analyzed_frames += 1

                if ret:
                    self.new_frame_signal.emit(frame)
Exemplo n.º 7
0
def emotion(impath):
    try:
        print(impath)
        image = cv2.imread(impath)
        detector = FER()
        images = detector.detect_emotions(image)
        if len(images) == 0 or images == None:
            return None
        emot = (str(detector.top_emotion(image)[0]))
        return emot
    except IndexError:
        return None
Exemplo n.º 8
0
 def getUserEmotion(self):
     # detector for facial emotion
     detector = FER(mtcnn=True)
     cap = cv2.VideoCapture(0)
     ret, frame = cap.read()
     cv2.imwrite('temp.jpeg', frame)
     img = plt.imread('temp.jpeg')
     res = detector.detect_emotions(img)
     os.remove('temp.jpeg')
     if len(res)==0:
         logger.info("No face detected")
         return 'neutral'
     res_emotion = res[0]['emotions']
     return max(res_emotion, key=res_emotion.get)
Exemplo n.º 9
0
    def infer(self):
        camera = Camera()
        img = camera.get_image_2()
        cv2.imwrite("test.jpeg", img)
        detector = FER()
        result = detector.detect_emotions(img)

        if not result:
            return False, None
        else:
            # only looking at the first face found, check largest face in ideal world
            happy = result[0]['emotions']['happy']
            sad = result[0]['emotions']['sad']
            overall_happiness = (1.0 + (happy - sad)) / 2.0
            return True, overall_happiness
Exemplo n.º 10
0
    def infer_video(self):
        self.cam = cv2.VideoCapture(0)
        font = cv2.FONT_HERSHEY_SIMPLEX
        detector = FER()
        i = 0
        person = PersonAPI(0, 0)
        avg_happines = 1
        while True:
            i += 1
            ret, frame = self.cam.read()
            if not ret:
                break

            result = detector.detect_emotions(frame)
            if len(result) > 0:
                print(result)
                happy = result[0]['emotions']['happy']
                sad = result[0]['emotions']['sad']
                x, y, w, h = result[0]["box"]

                overall_happiness = (1.0 + (happy - sad)) / 2.0
                cv2.rectangle(frame, (x, y), (x + w, y + h), (200, 0, 0), 2)
            else:
                overall_happiness = 0.5

            cv2.putText(frame, str(overall_happiness), (0, 50), font, 1,
                        (0, 255, 0), 1, cv2.LINE_AA)
            avg_happines = avg_happines * 0.9 + overall_happiness * 0.1
            print("average happines {}".format(avg_happines))
            if i == 20:
                i = 0
                if avg_happines > 0.6:
                    if person.STATE:
                        continue
                    else:
                        play_happy()
                    person.STATE = 1
                elif avg_happines < 0.5:
                    if person.STATE:
                        play_sad()
                    else:
                        continue
                    person.STATE = 0

            cv2.imshow("frame", frame)
            cv2.waitKey(40)
Exemplo n.º 11
0
 def post(self, request, *args, **kwargs):
     serializer = FaceSerializer(data=request.data)
     if serializer.is_valid():
         print("Valid photo")
         # Save photo to file
         serializer.save()
         # Get emotions
         image_file = os.curdir + serializer.data["image"]
         image = plt.imread(image_file)
         detector = FER(mtcnn=True)
         response_data = detector.detect_emotions(image)[0]
         # Remove photo
         os.remove(image_file)
         return Response(response_data, status=status.HTTP_201_CREATED)
     else:
         print("Invalid photo", serializer.errors)
         return Response(serializer.errors,
                         status=status.HTTP_400_BAD_REQUEST)
Exemplo n.º 12
0
def detect_emotions(image_file):

    # clear_directory()

    img = cv2.imread(settings.MEDIA_ROOT + str(image_file))
    detector = FER(mtcnn=True)
    result = detector.detect_emotions(img)

    for i in range(len(result)):
        box = result[i]['box']
        x, y, h, w = box[0], box[1], box[2], box[3]
        emotion = max(result[0]['emotions'].items(),
                      key=operator.itemgetter(1))[0]
        if img.shape[0] < 500 or img.shape[1] < 500:
            cv2.rectangle(img, (x, y), (x + h, y + w), (0, 0, 0), 2)
            cv2.rectangle(img, (x, y), (x + h, y + w), (255, 255, 255), 1)
            cv2.putText(img,
                        emotion, (x, y - 5),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        0.5, (0, 0, 0),
                        thickness=2)
            cv2.putText(img,
                        emotion, (x, y - 5),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        0.5, (255, 255, 255),
                        thickness=1)
        else:
            cv2.rectangle(img, (x, y), (x + h, y + w), (0, 0, 0), 5)
            cv2.rectangle(img, (x, y), (x + h, y + w), (255, 255, 255), 3)
            cv2.putText(img,
                        emotion, (x, y - 5),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        3, (0, 0, 0),
                        thickness=5)
            cv2.putText(img,
                        emotion, (x, y - 5),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        3, (255, 255, 255),
                        thickness=3)

    path = settings.MEDIA_ROOT + str(image_file)
    cv2.imwrite(path, img)
Exemplo n.º 13
0
def detect_emotion(image_as_txt, feeling=None):

    # On récupère l'image en base 64 (on enlève le préfixe qui définit le format de l'image). On la transforme en bytes
    imgdata = base64.b64decode(image_as_txt[23:])

    img = Image.open(
        BytesIO(imgdata))  #On crée une image PIL depuis l'image en bytes
    image = np.asarray(
        img
    )  # On transforme l'image PIL en un array numpy afin de préparer l'info à être donnée au réseau de neurones.

    detector = FER()  # On instancie le détecteur
    result = detector.detect_emotions(image)

    # Passage nécessaire de parsing des float en str afin de rendre le Dict sérialisable en JSON pour l'insérer dans les
    # requêtes SQL
    jsonable_result = {}
    for key, value in result[0]['emotions'].items():
        jsonable_result[key] = str(value)

    return jsonable_result
Exemplo n.º 14
0
 def run(self):
     global emotions, taskQueue, thresh
     print("start")
     x = len(taskQueue)
     taskQueue.append(x)
     try:
         detector = FER(mtcnn=True)
         print("DETECT")
         res = detector.detect_emotions(self.img_cv)
         print(res)
         if len(res):
             res = res[0]['emotions']
             emos = [e for e in res if res[e] > thresh]
             emotions.extend(emos)
     except InvalidImage:
         print("Invalid Image")
     except IndexError:
         print("Index Error")
     taskQueue.remove(x)
     print("done")
     return
Exemplo n.º 15
0
class EmotionDetector:
    """
    Main class which predicts facial emotions for a given image.
        1. Predicts one of the cardinal emotions (fear, anger, surprise, neutral, disgust, sad), if faces were detected.
        2. Predicts none if no face or emotion was detected.
    This class is utilized by the aesthetics_predictor_api_pk, which delegates flask requests to the predict method and
    sends the response back to the client.
    """
    def __init__(self):
        self.session = tf.compat.v1.Session()

        with self.session.as_default():
            with self.session.graph.as_default():
                self.detector = FER(mtcnn=True)

    def predict(self, content_path, start_frame=0, end_frame=0):
        try:
            with self.session.as_default():
                with self.session.graph.as_default():
                    image = cv2.imread(content_path)
                    results = self.detector.detect_emotions(image)
                    pprint.pprint(results)
                    parsed_output = self.parse_output(results)
                    return jsonify(parsed_output)
        except Exception as e:
            print(str(e))
            return "Error during prediction.", 400

    def parse_output(self, results):
        facial_emotion_key = "facialEmotions"
        if results is None or len(results) == 0:
            return {facial_emotion_key: "none"}
        facial_emotions = []
        for result in results:
            dominant_emotion = max(result["emotions"].items(),
                                   key=operator.itemgetter(1))[0]
            facial_emotions.append(dominant_emotion)
        distinct_facial_emotions = list(set(facial_emotions))
        return {facial_emotion_key: distinct_facial_emotions}
Exemplo n.º 16
0
def non_thread_emotion(img, buf, emots):
    with urllib.request.urlopen(img) as response:
        data = response.read()
    if os.path.exists("static/imgs/buffer" + str(buf) + ".png"):
        os.remove("static/imgs/buffer" + str(buf) + ".png")
    with open("static/imgs/buffer" + str(buf) + ".png", "wb+") as f:
        f.write(data)
    img_cv = cv2.imread("static/imgs/buffer" + str(buf) + ".png")
    try:
        detector = FER(mtcnn=True)
        print("DETECT")
        res = detector.detect_emotions(img_cv)
        print(res)
        if len(res):
            res = res[0]['emotions']
            emos = [e for e in res if res[e] > thresh]
            emots.extend(emos)
            # return emos
    except InvalidImage:
        print("Invalid Image")
    except IndexError:
        print("Index Error")
Exemplo n.º 17
0
def hello_world():
    # Fungsi ini tujuan untuk menampilkan emosi dari gambar
    # Tanpa menyimpan file tersebut ke dalam penyimpanan
    top = request.form.get('top')

    # Cek jika form data terdapat image
    if 'image' not in request.files:
        return {'message': 'No File Part'}
    image = request.files['image']

    # Cek jika image memiliki nama
    if image.filename == '':
        return {'message': 'No Selected File'}

    # Cek jika image ada dan tipe file dibolehkan
    if image and allowed_file(image.filename):
        # Buat gambar menjadi string
        image_string = image.read()
        #  Ubah ke dalam numpy
        np_img = numpy.fromstring(image_string, numpy.uint8)
        # Simpan ke dalam cv2
        img = cv2.imdecode(np_img, cv2.IMREAD_UNCHANGED)

        # Siapkan pendeteksi
        detector = FER()

        # Top = 1 jika ingin menampilkan emosi tertinggi
        if top == '1':
            emotion, score = detector.top_emotion(img)
            data = {'emotion': emotion, 'score': float(score)}
        # Top = 0 jika ingin menampilkan peluang beberapa emosi
        else:
            result = detector.detect_emotions(img)
            data = pd.DataFrame(result).to_json()

        return data

    return 'Hello, World!'
Exemplo n.º 18
0
def emotions_face_video(myvideo):
    
    ''' Functions to return dictonary of bounding
    boxes for faces, emotions and scores'''
    
    clip = VideoFileClip(myvideo)
    duration = clip.duration                         
    vidcap = cv2.VideoCapture(myvideo)                      # VideCapture from cv2
    i = 0                                                   # initiate the variable for loop, will run for number of frames/images
    d = []                                                  # dictionary to capture the input of each image
    sec = 0                                                 # Variable to capture frame at particular time in the video
    frameRate = 1.0                                         # frameRate, to alter the time at which the frame is captured
    while i < abs((duration/frameRate) + 1):                # Numebr of frames based on duration and frameRate
            sec = sec + frameRate
            vidcap.set(cv2.CAP_PROP_POS_MSEC, sec*1000)     #Capturing video at particular intervals
            ret, image = vidcap.read() 
            if ret:                                         # If it has a frame
                    cv2.imwrite("image.jpg", image)         # saving image
                    img = plt.imread("image.jpg")           # reading image
                    detector = FER()                        # Calling fer for using already trained model
                    d = d + detector.detect_emotions(img)   # dictionary to store output of each image
            i = i + 1                                       # incrementing Loop
    return d
Exemplo n.º 19
0
def main():
	"""Análisis de sentimientos"""

	st.title("Aplicación de aprendizaje automático")
	st.text("Análisis de sentimiento en fotos y videos")

	activites = ["Imagen","Video"]

	choice = st.sidebar.selectbox("Seleccione",activites)

	if choice == 'Imagen':
		st.subheader("Análisis de imagen")
		uploaded_file = st.file_uploader("Subir imagen",type=["png","jpg"])
		if uploaded_file is not None:

			file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
			opencv_image = cv2.imdecode(file_bytes, 1)
			st.image(opencv_image, channels="BGR", use_column_width=True)
			detector = FER()
			#detector = FER(mtcnn=True)
			result = detector.detect_emotions(opencv_image)
			bounding_box = result[0]["box"]
			emotions = result[0]["emotions"]
			result_df=DataFrame(result)
			result_dic=result_df.loc[: , "emotions"][0]
			result_dic_df=pd.Series(result_dic).to_frame(name="Score")

			my_bar = st.progress(0)

			for percent_complete in range(100):
				time.sleep(0.1)
				my_bar.progress(percent_complete + 1)

			cv2.rectangle(
				opencv_image,
				(bounding_box[0], bounding_box[1]),
				(bounding_box[0] + bounding_box[2], bounding_box[1] + bounding_box[3]),
				(0, 155, 255),
				2,
			)
			for idx, (emotion, score) in enumerate(emotions.items()):
				color = (211, 211, 211) if score < 0.01 else (0, 255, 0)
				emotion_score = "{}: {}".format(
					emotion, "{:.2f}".format(score) if score > 0.01 else ""
				)
				cv2.putText(
					opencv_image,
					emotion_score,
					(bounding_box[0], bounding_box[1] + bounding_box[3] + 30 + idx * 15),
					cv2.FONT_HERSHEY_SIMPLEX,
					0.5,
					color,
					1,
					cv2.LINE_AA,
				)
			cv2.imwrite("imagen_result.jpg", opencv_image)
			st.subheader('Imagen analizada')
			st.image(opencv_image, channels="BGR", use_column_width=True)
			st.markdown(get_binary_file_downloader_html("imagen_result.jpg", 'Imagen'), unsafe_allow_html=True)

			emocion, puntaje = detector.top_emotion(opencv_image) 
			st.write("Emoción predominante: ", emocion)
			st.write("Puntaje: ", puntaje)

			st.markdown("---")
			st.subheader('Tabla de resultados')
			st.table(result_dic_df)
			st.subheader('Gráfica')
			st.line_chart(result_dic_df)



	elif choice == 'Video':
		st.subheader("Análisis de video")
		uploaded_video = st.file_uploader("Subir video en .mp4",type=["mp4"])

		if uploaded_video is not None:
			video_bytes = np.asarray(bytearray(uploaded_video.read()), dtype=np.uint8)
			st.video(video_bytes)
			#video_path = st.text_input('CSV file path')

			temporary_location = "video_result.mp4"
			with open(temporary_location, 'wb') as out:  ## Open temporary file as bytes
				out.write(video_bytes)  ## Read bytes into file
			out.close()
			video = Video("video_result.mp4")
			# Analyze video, displaying the output
			detector = FER()
			raw_data = video.analyze(detector, display=True)
			
			
			# Barra progreso
			my_bar = st.progress(0)
			for percent_complete in range(100):
				time.sleep(0.1)
				my_bar.progress(percent_complete + 1)
			st.markdown("---")
			st.subheader('Tabla de resultados')			
			# Resultados			
			df = video.to_pandas(raw_data)
			st.table(df.head())
			# Plot emotions
			medias=df[['angry','disgust','fear','happy','sad','surprise','neutral']].mean()
			st.write(medias)
			st.bar_chart(medias)
			st.subheader('Gráfica')	
			st.line_chart(df[['angry','disgust','fear','happy','sad','surprise','neutral']])
	
	elif choice == 'Créditos':
		st.subheader("Créditos")
		st.text("Jorge O. Cifuentes")
		st.write('*[email protected]* :sunglasses:')
Exemplo n.º 20
0
from fer import FER
import cv2

img=cv2.imread('happy.jpg')
detector=FER()
a=detector.detect_emotions(img)
print(a)
Exemplo n.º 21
0
import matplotlib.pyplot as plt 
import time
import pymongo
import datetime
import numpy as np
import cv2
import requests

cap = cv2.VideoCapture(0)

while(True):
	ret, frame = cap.read()
	time.sleep(5.5)
	img=frame
	detector = FER(mtcnn=True)
	mydict = detector.detect_emotions(img)[0]

	url = 'https://www.w3schools.com/python/demopage.php'
	x = requests.post(url, data = mydict)
	print(x.text)

	cv2.imshow('frame',frame)
	if cv2.waitKey(1) & 0xFF == ord('q'):
	    break

cap.release()
cv2.destroyAllWindows()



Exemplo n.º 22
0
class OpenFader:

    # Constructor
    def __init__(self):

        # Global variables and structure to support decisions
        self.algorithmMap = {
            "Detection": {
                "target_analysis_function": self.detectFaces,
                "target_function": self.faceDetection
            },
            "Expression": {
                "target_analysis_function": self.findExpressions,
                "target_function": self.facialExpression
            },
            "Recognition": {
                "target_analysis_function": self.recognizePeople,
                "target_function": self.faceRecognition
            }
        }

        # User could modify the following variables
        self.useCnn = True  # boolean -> MTCNN network or OpenCV's Haar Cascade classifier
        self.FPS = 100  # FramePerSecond: any number -> default is 50
        self.RESIZE_FRAME = 1  # Resize frame to improve speed
        self.width_limit = 700  # Max image width to display it on the screen. If it's bigger, it will be resized
        self.height_limit = 444  # Max image height to display it on the screen. If it's bigger, it will be resized
        self.threshold = 0.6  # Max distance (in range[0-1]) to be recognized from algorithm

        #init some variables
        self.fx = self.fy = 1 / self.RESIZE_FRAME
        self.result = []

        #init FER model
        self.detector = FER(mtcnn=self.useCnn)

        self.db_encodings = []
        self.db_names = []

        return

    # Connect the external GUI with the OpenFader class
    def connectGUI(self, gui):
        # Guest User Interface
        self.GUI = gui
        return

    # Add an image to the FER (Face Recognition Model) training dataset
    #
    # Parameters:
    # path_image:   the path to the image
    # name:         the name of the individual in the image
    def addTrainImage(self, path_image, name):
        temp = face_recognition.load_image_file(path_image)
        temp_encoding = face_recognition.face_encodings(temp)[0]
        self.db_encodings.append(temp_encoding)
        self.db_names.append(name)
        return

    # Update the active frame
    #
    # Parameters:
    # newFrame:  the new active frame
    def updateFrame(self, newFrame):
        self.frame = newFrame
        return

    # Put a text on the GUI terminal
    def putText(self, mode, text):
        self.GUI.printMode(mode)
        self.GUI.printResult(text)
        return

    # Add a rectangle on the displayed GUI image / frame
    def printRectangle(self,
                       frame,
                       coordinates,
                       fontColor=(255, 255, 255),
                       lineType=4,
                       polaroid=False):
        (x, y, w, h) = coordinates
        cv2.rectangle(frame, (x, y), (x + w, y + h), fontColor, lineType)
        if polaroid:
            cv2.rectangle(frame, (x - 2, y + h + 35), (x + w + 2, y + h),
                          fontColor, cv2.FILLED)
        return

    # Return the best emotion in an array of emotions
    #
    # Parameters:
    # emotions: the array of emotions
    def getBestEmotion(self, emotions):
        maxValue = 0
        bestEmotion = 'neutral'
        for k in emotions:
            if emotions[k] > maxValue:
                maxValue = emotions[k]
                bestEmotion = k
        return bestEmotion

    # Update the active frame with the detection rectangle
    # and print on GUI terminal the best found emotion
    def facialExpression(self, frame):
        self.updateFrame(frame)  # update the active frame
        self.GUI.cleanTerminal()  # clean the GUI terminal
        for p in self.result:
            bestEmotion = self.getBestEmotion(
                p['emotions'])  # get best emotion
            self.printRectangle(
                frame,
                p['box'])  # print the dection rectangle on the active frame
            self.putText(
                "FACIAL EXPRESSION",
                bestEmotion)  # print the best emotion on the GUI terminal
        return

    # Update the active frame with the detection rectangle
    def faceDetection(self, frame):
        self.updateFrame(frame)  # update the active frame
        for coord in self.result:
            self.printRectangle(
                frame,
                coord)  # print the dection rectangle on the active frame
        return

    # Update the active frame with the detection rectangle
    # and print on GUI terminal the name of the recognized individual
    def faceRecognition(self, frame):
        self.updateFrame(frame)  # update the active frame
        self.GUI.cleanTerminal()  # clean the GUI terminal
        for coord, name in zip(self.result, self.face_names):
            self.peopleNotFound = False
            self.printRectangle(
                frame,
                coord)  # print the dection rectangle on the active frame
            self.putText(
                "FACE RECOGNITION", name
            )  # print the name of the recognized individual on the GUI terminal
        return

    # Stop the current analysis
    def stopAnalysis(self):
        self.needToStop = True
        return

    # Convert the box coordinates from (x, y, w, h) format to (1, 2, 3, 4) format
    def convertBox(self, coordinates):
        result = []
        for coord in coordinates:
            (x, y, w, h) = coord
            result.append((y, x + w, y + h, x))
        return result

    # Detect the faces in a frame
    #
    # Parameters:
    # frame: the frame to analyze
    def detectFaces(self, frame):
        self.result = self.detector.find_faces(
            frame)  # Run the Face Detection Algorithm
        return

    # Detect the expressions of the faces in a frame
    #
    # Parameters:
    # frame: the frame to analyze
    def findExpressions(self, frame):
        self.result = self.detector.detect_emotions(
            frame)  # Run the Facial Expression Algorithm
        return

    # Recognize the people in a frame
    #
    # Parameters:
    # frame: the frame to analyze
    def recognizePeople(self, frame):
        if self.peopleNotFound:  # only if I haven't found anybody
            small_frame = cv2.resize(frame, (0, 0), fx=self.fx, fy=self.fy)
            rgb_small_frame = small_frame[:, :, ::-1]
            temp_result = self.detector.find_faces(
                frame)  # Run the Face Detection Algorithm
            temp = self.convertBox(temp_result)
            temp_names = []
            face_encodings = face_recognition.face_encodings(
                rgb_small_frame, temp)  # Run the Face Recognition Algorithm
            for face_encoding in face_encodings:
                face_distances = face_recognition.face_distance(
                    self.db_encodings,
                    face_encoding)  # Compute the Recognition Error
                if len(face_distances) > 0 and min(
                        face_distances) < self.threshold:
                    best_match_index = np.argmin(face_distances)
                    name = self.db_names[best_match_index]
                    temp_names.append(
                        name
                    )  # Add the name of the found individual in the result

            self.result = temp_result
            self.face_names = temp_names
        else:
            self.detectFaces(
                frame
            )  # If I've already found someone, run the Face Detection Algorithm
        return

    # Run analisys on an image
    #
    # Parameters:
    # src:                      the image
    # target_analysis_function: the function with the analysis to be executed
    # target_function:          the function to print the detection rectangle on the image
    def runImageAnalysis(self, src, target_analysis_function, target_function):

        # Check image
        src = checkImage(src, self.width_limit, self.height_limit)
        # Read image
        frame = cv2.imread(src)
        # Make analysis
        target_analysis_function(frame)
        # Print rectangle and/or texts
        target_function(frame)
        # Show image on GUI
        self.GUI.analyzePhoto(frame)
        return

    # Initialize
    def initAgain(self):
        # Initialize some variables
        self.frame = []
        self.result = []
        self.face_names = []
        return

    # Run analisys on a video
    #
    # Parameters:
    # target_analysis_function: the function with the analysis to be executed
    def runVideoAnalysis(self, target_analysis_function):

        if len(self.frame) > 0:  # only if there is an active frame
            target_analysis_function(self.frame)

        return

    # Run analisys on the current media
    #
    # Parameters:
    # selectedAlgorithm: the analysis to be executed
    # source:            the type of source to analyze (image, video or webcam)
    # path_to_source:    the path to the media to be analyzed
    def runAnalysis(self, selectedAlgorithm, source, path_to_source):

        self.initAgain()

        self.peopleNotFound = True

        # Set target analiysis function and target function
        target_analysis_function = self.algorithmMap[selectedAlgorithm][
            "target_analysis_function"]
        target_function = self.algorithmMap[selectedAlgorithm][
            "target_function"]

        # Select the source
        if source == 'image':
            # Insert here the path to the image
            selectedSource = path_to_source
            self.runImageAnalysis(selectedSource, target_analysis_function,
                                  target_function)
        else:
            if source == 'video':
                # Insert here the path to the video
                path_to_video = resizeVideo(path_to_source)
                selectedSource = path_to_video
                interval = 1000
            else:
                # Webcam
                selectedSource = 0
                interval = 200

            # Create the thread able to manage the video stream during the analisys
            self.videoStreamObject = self.GUI.openSource(
                selectedSource, target_function)
            self.GUI.analyze(self.runVideoAnalysis, target_analysis_function,
                             interval)
        return
def main():
    detector = FER(mtcnn=True)
    img = plt.imread('./assets/Robin_Williams.jpg')
    result = detector.detect_emotions(img)
    print(result)
Exemplo n.º 24
0
cap = cv2.VideoCapture(0)

cascPath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
detector = FER()

while (True):
    ret, frame = cap.read()
    image = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)
    faces = faceCascade.detectMultiScale(image,
                                         scaleFactor=1.1,
                                         minNeighbors=5,
                                         minSize=(30, 30),
                                         flags=cv2.CASCADE_SCALE_IMAGE)
    results = detector.detect_emotions(image)

    for item in results:
        bounding_box = item["box"]
        emotions = sorted(item["emotions"].items(), key=operator.itemgetter(1))
        emotions.reverse()
        cv2.rectangle(frame, (bounding_box[0], bounding_box[1]),
                      (bounding_box[0] + bounding_box[2],
                       bounding_box[1] + bounding_box[3]), (0, 255, 0), 2)
        cv2.putText(frame, str(emotions[0]),
                    (bounding_box[0] + 5, bounding_box[1] + 30),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2, cv2.LINE_AA)

    cv2.imshow('frame', frame)

    print("Found {0} faces!".format(len(faces)))
Exemplo n.º 25
0
spf = 0  #seconds per frame

frameCounter = 0
while (validFrames < dummyFrames):
    frameCounter += 1
    validFrames += 1
    t = time.time()
    ret, frame = video_capter.read()
    cv2.flip(frame, 1, frame)

    frameScaled = cv2.resize(frame,
                             dsize=None,
                             fx=1.0 / FACE_DOWNSAMLE_RATIO,
                             fy=1.0 / FACE_DOWNSAMLE_RATIO)

    faces_emotions = emotion_detector.detect_emotions(frameScaled)
    # [{'box': (146, 120, 114, 114), 'emotions': {'angry': 0.22, 'disgust': 0.0, 'fear': 0.03, 'happy': 0.0, 'sad': 0.13, 'surprise': 0.0, 'neutral': 0.62}}]

    # print(faces_emotions)

    timeFaces = time.time() - t
    # if face not detected then dont add this time to the calculation
    if len(faces_emotions) == 0:
        validFrames -= 1
    else:
        totalTime += timeFaces

    text_org = (30, 30)
    sub_prex = "." * (frameCounter % 6 + 1)
    frame = drawUnicodeText(frame,
                            "计算电脑相关的参数" + sub_prex,
Exemplo n.º 26
0
def predict(filename):
    image_url = url_for('images', filename=filename)
    image_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)

    image_data = load_and_prepare(image_path)
    print(hasattr(image_data, "_getexif"))

    # keras imports
    from tensorflow.keras.preprocessing.image import load_img, img_to_array
    from tensorflow.keras import Sequential
    from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, GlobalAveragePooling2D
    from tensorflow.keras.layers import Dropout, InputLayer, BatchNormalization

    # instantiate the model
    NN = Sequential()
    NN.add(InputLayer(input_shape=(150, 150, 3)))
    # Conv block 1.
    NN.add(Conv2D(filters=2, kernel_size=3, activation='elu', padding='same'))
    NN.add(MaxPooling2D(2))
    NN.add(BatchNormalization())
    NN.add(Dropout(0.2))
    # Conv block 2
    NN.add(Conv2D(filters=4, kernel_size=3, activation='elu', padding='same'))
    NN.add(MaxPooling2D(2))
    NN.add(BatchNormalization())
    # Conv block 3
    NN.add(Conv2D(filters=8, kernel_size=3, activation='elu', padding='same'))
    NN.add(MaxPooling2D(2))
    NN.add(BatchNormalization())
    NN.add(Dropout(0.5))
    # Conv block 4
    NN.add(Conv2D(filters=12, kernel_size=3, activation='elu', padding='same'))
    NN.add(MaxPooling2D(2))
    NN.add(BatchNormalization())
    # Conv block 5
    NN.add(Conv2D(filters=24, kernel_size=3, activation='elu', padding='same'))
    NN.add(MaxPooling2D(2))
    NN.add(BatchNormalization())
    # Fully connected block - flattening followed by dense and output layers
    NN.add(Flatten())
    NN.add(Dense(4, activation='elu'))
    NN.add(BatchNormalization())
    NN.add(Dropout(0.5))
    NN.add(Dense(2, activation='sigmoid'))  # 2 target classes, output layer

    # define the model and load the weights
    NEURAL_NET_MODEL_PATH = os.environ['NEURAL_NET_MODEL_PATH']
    NN.load_weights(NEURAL_NET_MODEL_PATH)

    # predict and generate bar graph
    predictions = NN.predict(image_data)[0]

    # imports for emotion detection
    from fer import FER
    import cv2

    detector = FER()
    image_data = image_data * 255
    image_data = image_data[0]
    image_data = image_data.astype('uint8')
    emotions = detector.detect_emotions(image_data)
    emotions_dict = emotions[0]['emotions']
    script, div = generate_barplot(emotions_dict)

    try:
        return render_template('predict.html',
                               plot_script=script,
                               plot_div=div,
                               image_url=image_url,
                               message=predictions[1])

    except IndexError:
        return render_template(
            'predict_no_emotion.html',
            plot_script=script,
            plot_div=div,
            image_url=image_url,
            message=
            'Oops! We could not capture your emotions with that photo. Please try again and make sure your face is clearly visible!'
        )
Exemplo n.º 27
0
    h = 60
    flag = 0

    # Variable for deciding placement of text in images
    pos_x, pos_y = (450, 184)
    capture = True
    print("Loop start")
    detector = FER()
    counter = 0
    while capture:
        counter += 1
        print(counter)
        pygame.time.Clock().tick(10)
        try:
            ret, frame = cam.read()
            print(detector.detect_emotions(frame))
            # print(detector.top_emotion(frame))
            emotion = detector.top_emotion(frame)[0]
            print("Done :)")
            screen.fill(bg_color)

            if emotion == 'happy':
                # For image
                image_display(emotion)

            elif emotion == 'neutral':
                # For image
                image_display(emotion)

            elif emotion == 'sad':
                # For image
    haar_cascade_face = cv2.CascadeClassifier(
        os.getcwd() + '/data/haarcascade_frontalface_default.xml')

    #invokes cascade on img
    faces_rects = haar_cascade_face.detectMultiScale(img_gray,
                                                     scaleFactor=1.8,
                                                     minNeighbors=5)

    #prints number of faces found to console
    print('Faces found: ', len(faces_rects))

    #draws rectangle around face
    for (x, y, w, h) in faces_rects:
        cv2.rectangle(img_copy, (x, y), (x + w, y + h), (0, 255, 0), 2)

    detector_output = detector.detect_emotions(img_copy)

    output_dict = {"face_present": "", "emotions": {}}

    if len(faces_rects) > 0:
        output_dict["face_present"] = True
        output_dict["emotions"] = str(detector_output[0]["emotions"])

    json_string = json.dumps(output_dict)

    with open('json_file.json', 'w') as f:
        json.dump(output_dict, f)

    time.sleep(5.0)

#displays img