예제 #1
0
    def get_frame(self):

        conn = sqlite3.connect('database.db')
        c = conn.cursor()
        fname = "recognizer/trainingData.yml"
        if not os.path.isfile(fname):
            print("\nPlease train the data first\n")
            return None

        # Model For Facial Expression Recognition
        model = FacialExpressionModel("model.json", "model_weights.h5")

        # Model For Face Recognizer
        face_detector = mtcnn.MTCNN()
        recognizer = cv2.face.LBPHFaceRecognizer_create()
        recognizer.read(fname)

        _, img = self.video.read()
        faces = face_detector.detect_faces(img)

        for res in faces:
            x, y, w, h = res["box"]
            x, y = abs(x), abs(y)
            x1, y1 = x+w, y+h
            image = img[y:y1, x:x1]
            image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

            # Recognizing Face
            ids, conf = recognizer.predict(image)
            c.execute("select roll_no from users where id = (?);", (ids,))
            result = c.fetchall()
            try:
                roll_no = result[0][0]
            except:
                roll_no = 'Error'

            if conf > 50:
                roll_no = "No Match"

            image2 = cv2.resize(image, (48, 48))

            # Predicting Expression
            pred = model.predict_emotion(image2[np.newaxis, :, :, np.newaxis])

            msg = pred + " " + roll_no

            # Mark the Expression if Face is detected
            if roll_no != "Error" and roll_no != "No Match" :
                marked = count(roll_no, pred)
                if(marked):
                    Attendance(roll_no)
                    msg = "MARKED"

            cv2.putText(img, msg, (x, y), cv2.FONT_HERSHEY_SIMPLEX,
                        1, (255, 255, 0), 2)
            cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)

        _, jpeg = cv2.imencode('.jpg', img)
        return jpeg.tobytes()
예제 #2
0
파일: app.py 프로젝트: Manas73/fer-2013
def get_frame(our_image):
    model = FacialExpressionModel("model.json", "model_weights.h5")
    image = np.array(our_image.convert("RGB"))
    new_image = cv2.cvtColor(image, 1)
    gray_fr = cv2.cvtColor(new_image, cv2.COLOR_BGR2GRAY)
    faces = facec.detectMultiScale(gray_fr, 1.3, 5)
    for (x, y, w, h) in faces:
        fc = gray_fr[y:y + h, x:x + w]

        roi = cv2.resize(fc, (48, 48))
        pred, probs = model.predict_emotion(roi[np.newaxis, :, :, np.newaxis])

        cv2.putText(new_image, pred, (x, y), font, 1, (255, 255, 0), 2)
        cv2.rectangle(new_image, (x, y), (x + w, y + h), (255, 0, 0), 2)
    if len(faces) == 0:
        probs = []
    # _, jpeg = cv2.imencode('.jpg', fr)
    return new_image, faces, probs
예제 #3
0
import cv2
from model import FacialExpressionModel
import numpy as np

facec = cv2.CascadeClassifier(
    '/fer-model-realtime/haarcascade_frontalface_default.xml')
model = FacialExpressionModel("/fer-model-realtime/Model/fer_model.json",
                              "/fer-model-realtime/Model/weights.h5")
font = cv2.FONT_HERSHEY_SIMPLEX


class VideoCamera(object):
    def __init__(self):
        self.video = cv2.VideoCapture(0)

    def __del__(self):
        self.video.release()

    # returns camera frames along with bounding boxes and predictions
    def get_frame(self):
        ret, fr = self.video.read()
        gray_fr = cv2.cvtColor(fr, cv2.COLOR_BGR2GRAY)
        faces = facec.detectMultiScale(gray_fr, 1.3, 5)

        for (x, y, w, h) in faces:
            fc = gray_fr[y:y + h, x:x + w]

            roi = cv2.resize(fc, (48, 48))
            pred = model.predict_emotion(roi[np.newaxis, :, :, np.newaxis])

            cv2.putText(fr, pred, (x, y), font, 1, (0, 0, 0), 2)
예제 #4
0
    return command


if __name__ == "__main__":

    ap = argparse.ArgumentParser()
    ap.add_argument(
        "-i",
        "--input",
        help=
        "Path to video file or image. 'cam' for capturing video stream from camera",
        required=True,
        type=str,
        default=None)
    ap.add_argument(
        "-pt",
        "--prob_threshold",
        help="Probability threshold for object detections filtering",
        default=0.5,
        type=float)

    args = vars(ap.parse_args())
    # Model object instantiation
    obj_detector = ObjectDetectionModel(
        args['prob_threshold'])  # ObjectDetection model
    face_detector = MTCNN()  # FaceDetection & Landmark model
    emotion_detector = FacialExpressionModel()  # EmotionRecognition model
    age_gener_detector = FaceGenderAge()  # Age&Gender Prediction model

    app.run()
예제 #5
0
import cv2
from model import FacialExpressionModel
import numpy as np

facec=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
#This is an inbuilt opencv classifier for face detection
model=FacialExpressionModel('model.json','model_weights.h5')
#This is the model required for prediction
font=cv2.FONT_HERSHEY_SIMPLEX

class VideoCamera(object):
    def __init__(self):
        # Using OpenCV to capture from device 0. If you have trouble capturing
        # from a webcam, comment the line below out and use a video file
        # instead.
        self.video = cv2.VideoCapture(0)
        # If you decide to use video.mp4, you must have this file in the folder
        # as the main.py.
        # self.video = cv2.VideoCapture('video.mp4')

    def __del__(self):
        self.video.release()

    def get_frame(self):
        success, fr = self.video.read()
        # We are using Motion JPEG, but OpenCV defaults to capture raw images,
        # so we must encode it into JPEG in order to correctly display the
        # video stream.
        gray_fr=cv2.cvtColor(fr,cv2.COLOR_BGR2GRAY)
        faces=facec.detectMultiScale(gray_fr,1.3,5)
예제 #6
0
import cv2
from model import FacialExpressionModel
import numpy as np
from ffpyplayer.player import MediaPlayer

facec = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
model = FacialExpressionModel("model.json", "modelV2.h5")
font = cv2.FONT_HERSHEY_SIMPLEX


class VideoCamera(object):
    def __init__(self):
        self.video = cv2.VideoCapture(0)
        #self.player = MediaPlayer("videos/vj.mp4")
    def __del__(self):
        self.video.release()

    # returns camera frames along with bounding boxes and predictions
    def get_frame(self):
        _, fr = self.video.read()
        #audio_frame, val = self.player.get_frame()
        gray_fr = cv2.cvtColor(fr, cv2.COLOR_BGR2GRAY)
        faces = facec.detectMultiScale(gray_fr, 1.2, 6)
        #if val != 'eof' and audio_frame is not None:
        #audio
        #   img, t = audio_frame

        for (x, y, w, h) in faces:
            fc = gray_fr[y:y + h, x:x + w]

            roi = cv2.resize(fc, (48, 48))
예제 #7
0
import cv2
from model import FacialExpressionModel
import numpy as np

facec = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
model = FacialExpressionModel("model.json", "model_weights.h5")
font = cv2.FONT_HERSHEY_SIMPLEX


class VideoCamera(object):
    def __init__(self):
        self.video = cv2.VideoCapture(
            'C:/Users/neel/Desktop/Facial Expression Recognition with Keras Project/Project/videos/WhatsApp Video 2020-09-05 at 10.02.43 PM.mp4'
        )

    def __del__(self):
        self.video.release()

    # returns camera frames along with bounding boxes and predictions
    def get_frame(self):
        _, fr = self.video.read()
        gray_fr = cv2.cvtColor(fr, cv2.COLOR_BGR2GRAY)
        faces = facec.detectMultiScale(gray_fr, 1.3, 5)

        for (x, y, w, h) in faces:
            fc = gray_fr[y:y + h, x:x + w]

            roi = cv2.resize(fc, (48, 48))
            pred = model.predict_emotion(roi[np.newaxis, :, :, np.newaxis])

            cv2.putText(fr, pred, (x, y), font, 1, (255, 255, 0), 2)
예제 #8
0
import cv2
from model import FacialExpressionModel
import numpy as np
from tensorflow.keras.preprocessing.image import img_to_array

face_classifier=cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
model = FacialExpressionModel("emotionModel.json", "EmotionModelv3.h5")
font = cv2.FONT_HERSHEY_SIMPLEX

class VideoCam(object):
    def __init__(self):
        self.video = cv2.VideoCapture(0)

    def __del__(self):
        self.video.release()

    def get_frame(self):
        _,fr = self.video.read()
        gray_frame = cv2.cvtColor(fr, cv2.COLOR_BGR2GRAY)
        faces = face_classifier.detectMultiScale(gray_frame, 1.3, 5)

        for (x, y, w, h) in faces:
            fc = gray_frame[y:y+h, x:x+w]

            roi = cv2.resize(fc, (48, 48))
            numpy_img = img_to_array(roi)
            image_batch = np.expand_dims(numpy_img, axis=0)
            image_batch /= 255.
            pred = model.predict_emotion(image_batch)

            cv2.putText(fr, pred, (x,y), font, 1, (255, 255, 0), 2)
예제 #9
0
import cv2
from model import FacialExpressionModel
import dlib
import numpy as np

facec = cv2.CascadeClassifier(
    'E://Youtube/Real-Time-Face-Expression-Recognition/haarcascade_frontalface_default.xml'
)
model = FacialExpressionModel(
    "E://Youtube/Real-Time-Face-Expression-Recognition/model.json",
    "E://Youtube/Real-Time-Face-Expression-Recognition/model_weights.h5")
font = cv2.FONT_HERSHEY_SIMPLEX
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(
    "C://Users/omkar/Downloads/shape_predictor_68_face_landmarks.dat")


class VideoCamera(object):
    def __init__(self):
        self.video = cv2.VideoCapture(0)

    def __del__(self):
        self.video.release()

    # returns camera frames along with bounding boxes and predictions
    def get_frame(self, red, blue, green, pigment=0.3):
        _, fr = self.video.read()
        output = fr.copy()
        gray_fr = cv2.cvtColor(fr, cv2.COLOR_BGR2GRAY)
        faces = facec.detectMultiScale(gray_fr, 1.3, 5)
        faces2 = detector(gray_fr)
예제 #10
0
import cv2
from model import FacialExpressionModel
import numpy as np
import pafy
import base64
import io

facec = cv2.CascadeClassifier(
    'app/artifact/haarcascade_frontalface_default.xml')
model = FacialExpressionModel("model.json", "app/artifact/model_weights.h5")
font = cv2.FONT_HERSHEY_SIMPLEX


def data_uri_to_cv2_img(uri):
    encoded_data = uri.split(',')[1]
    nparr = np.fromstring(base64.b64decode(encoded_data), np.uint8)
    img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
    return img


class VideoCamera(object):
    def __init__(self, video_id):
        if video_id is None:
            self.video = cv2.VideoCapture(0)
        else:
            url = "https://www.youtube.com/watch?v={}".format(video_id)
            vPafy = pafy.new(url)
            play = vPafy.getbest(preftype="mp4")
            self.video = cv2.VideoCapture(play.url)

    def __del__(self):
예제 #11
0
def runn(fname):
    model = FacialExpressionModel("face_model.json", "face_model.h5")
    start_app(model, fname)
예제 #12
0
import cv2
from model import FacialExpressionModel
import numpy as np

facec = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
model = FacialExpressionModel('checkpoints/model_final.json',
                              'checkpoints/model_weights.h5')
font = cv2.FONT_HERSHEY_SIMPLEX
PIC_SIZE = 48


class VideoCamera(object):
    def __init__(self):
        self.video = cv2.VideoCapture(0)

    def __del__(self):
        self.video.release()

    def get_frame(self):
        _, fr = self.video.read()
        gray_fr = cv2.cvtColor(fr, cv2.COLOR_BGR2GRAY)
        faces = facec.detectMultiScale(gray_fr, 1.3, 5)

        for (x, y, w, h) in faces:
            fc = gray_fr[y:y + h, x:x + w]

            roi = cv2.resize(fc, (PIC_SIZE, PIC_SIZE))
            pred = model.predict_emotion(roi[np.newaxis, :, :, np.newaxis])

            cv2.putText(fr, pred, (x, y), font, 1, (255, 255, 0), 2)
            cv2.rectangle(fr, (x, y), (x + w, y + h), (255, 0, 0), 2)

def start_app(facial_model):
    skip_frame = 10
    data = []
    flag = False
    ix = 0
    while True:
        ix += 1

        faces, fr, gray_fr = __get_data__()
        for x, y, w, h in faces:
            fc = gray_fr[y:y + h, x:x + w]

            roi = cv2.resize(fc, (48, 48))
            pred = facial_model.predict_emotion(roi[np.newaxis, :, :,
                                                    np.newaxis])

            cv2.putText(fr, pred, (x, y), font, 1, (255, 255, 0), 2)
            cv2.rectangle(fr, (x, y), (x + w, y + h), (255, 0, 0), 2)

        if cv2.waitKey(1) == 27:
            break
        cv2.imshow('Filter', fr)
    cv2.destroyAllWindows()


if __name__ == '__main__':
    model = FacialExpressionModel('face_model.json', 'face_model.h5')
    start_app(model)
예제 #14
0
    
    returns: tuple (faces in image, frame read, grayscale frame)
    """
    _, fr = rgb.read()
    gray = cv2.cvtColor(fr, cv2.COLOR_BGR2GRAY)
    faces = facec.detectMultiScale(gray, 1.3, 5)
    
    return faces, fr, gray

def start_app(cnn):
    
    while True:
        faces, fr, gray_fr = __get_data__()
        for (x, y, w, h) in faces:
            fc = gray_fr[y:y+h, x:x+w]
            roi = cv2.resize(fc, (48, 48))
            pred, prob = cnn.predict_emotion(roi[np.newaxis, :, :, np.newaxis])
            text = "%s , %.2f"%(pred,prob * 100)
            cv2.putText(fr, text, (x, y), font, 1, (255, 255, 0), 2)
            cv2.rectangle(fr,(x,y),(x+w,y+h),(255,0,0),2)

        if cv2.waitKey(1) == 27:
            break
        cv2.imshow('WebCam', fr)
    cv2.destroyAllWindows()

if __name__ == '__main__':
    model = FacialExpressionModel("./models/model.json", "./models/model.h5")
    start_app(model)

                
                    cv2.imshow("emotion_win",fr)
                    cv2.waitKey(1)
                    logging.debug('ConsumerThread ' + str(qbuffer.qsize()) + ' items in queue') 

                    #out.write(fr)                    
        return
        


if __name__ == '__main__':

    rgb = cv2.VideoCapture(0)
    facec = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
    font = cv2.FONT_HERSHEY_SIMPLEX
    cnn = FacialExpressionModel("face_model.json", "face_model.h5")
    #fourcc = cv2.VideoWriter_fourcc(*'XVID')
    #out = cv2.VideoWriter('output.avi',fourcc,float(5), (640,480))    

    t1 = ProducerThread(name='producer')
    t2 = ConsumerThread(name='consumer')

    t1.start()   
    t2.start()
   
    time.sleep( 30 )
    t1.stop() 
    t2.stop() 
    
    
예제 #16
0
import cv2
from model import FacialExpressionModel
import numpy as np

facec = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') #cascade classifier
model = FacialExpressionModel("model.json", "model_weights.h5") #new model for predictions
font = cv2.FONT_HERSHEY_SIMPLEX

class VideoCamera(object): #class for the video
    def __init__(self):
        self.video = cv2.VideoCapture("/home/rhyme/Desktop/Project/videos/facial_exp.mkv") #change path to zero for source as input source

    def __del__(self):
        self.video.release()

    # returns camera frames along with bounding boxes and predictions
    def get_frame(self): #load video and check expression in each frame
        _, fr = self.video.read()
        gray_fr = cv2.cvtColor(fr, cv2.COLOR_BGR2GRAY)
        faces = facec.detectMultiScale(gray_fr, 1.3, 5) #gray scale

        for (x, y, w, h) in faces:
            fc = gray_fr[y:y+h, x:x+w]

            roi = cv2.resize(fc, (48, 48)) # predict new emotion
            pred = model.predict_emotion(roi[np.newaxis, :, :, np.newaxis])

            cv2.putText(fr, pred, (x, y), font, 1, (255, 255, 0), 2) #create expression text by opencv
            cv2.rectangle(fr,(x,y),(x+w,y+h),(255,0,0),2) # rectangular where expression is using opencv

        _, jpeg = cv2.imencode('.jpg', fr)
import cv2
import numpy as np
import matplotlib.pyplot as plt

from model import FacialExpressionModel

# Creating an instance of the class with the parameters as model and its weights.
test_model = FacialExpressionModel("model.json", "model.h5")

# Loading the classifier from the file.
facec = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')


def Emotion_Analysis(img):
    """ It does prediction of Emotions found in the Image provided, does the
    Graphical visualisation, saves as Images and returns them """

    # Read the Image through OpenCv's imread()
    path = "static/" + str(img)
    image = cv2.imread(path)

    # Convert the Image into Gray Scale
    gray_frame = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # Image size is reduced by 30% at each image scale.
    scaleFactor = 1.3

    # 5 neighbors should be present for each rectangle to be retained.
    minNeighbors = 5

    # Detect the Faces in the given Image and store it in faces.
예제 #18
0
# import the necessary packages
from scipy.spatial import distance as dist
from imutils.video import VideoStream
from imutils import face_utils
from threading import Thread
import playsound
import imutils
import time
import dlib
import cv2
import numpy as np
import datetime
from model import FacialExpressionModel

model = FacialExpressionModel("model_filter_V2_SGD_100.h5")


# FPS counter
class FPS:
    def __init__(self):
        # store the start time, end time, and total number of frames
        # that were examined between the start and end intervals
        self._start = None
        self._end = None
        self._numFrames = 0

    def start(self):
        # start the timer
        self._start = datetime.datetime.now()
        return self
예제 #19
0
import cv2
from model import FacialExpressionModel
import numpy as np

faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
model = FacialExpressionModel("model.json", "model_weights.h5")
font = cv2.FONT_HERSHEY_SIMPLEX

video_capture = cv2.VideoCapture(0)
# video_capture = cv2.VideoCapture("1.mp4")

while True:
    ret, fr = video_capture.read()
    fr = cv2.flip(fr, 1)
    gray_fr = cv2.cvtColor(fr, cv2.COLOR_BGR2GRAY)
    faces = faceCascade.detectMultiScale(gray_fr,
                                         scaleFactor=1.1,
                                         minNeighbors=5,
                                         minSize=(30, 30))
    for (x, y, w, h) in faces:
        fc = gray_fr[y:y + h, x:x + w]
        roi = cv2.resize(fc, (48, 48))
        pred = model.predict_emotion(roi[np.newaxis, :, :, np.newaxis])

        cv2.putText(fr, pred, (x, y), font, 1, (255, 255, 0), 2)
        cv2.rectangle(fr, (x, y), (x + w, y + h), (255, 0, 0), 2)
        _, jpeg = cv2.imencode('.jpg', fr)

    cv2.imshow('Video', fr)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
예제 #20
0
from model import FacialExpressionModel
from tensorflow import keras
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
from cv2 import *
import keras
import os, sys, time, socket
import threading
import argparse
import numpy as np
import cv2

modelsdir = 'models'

model_loaded = FacialExpressionModel("models/model.json", "models/fer.h5")
facec = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
font = cv2.FONT_HERSHEY_SIMPLEX

default_server_port = 9253
"""
Predict class of an image
"""


def predictImage(imagefile):

    if isinstance(imagefile, str):
        inp = inputImage(imagefile)
    else:
        inp = imagefile

    if inp is not None:
예제 #21
0
import cv2
from model import FacialExpressionModel
import numpy as np

facec = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
model = FacialExpressionModel("model.h5")
font = cv2.FONT_HERSHEY_SIMPLEX


class VideoCamera(object):
    def __init__(self):
        self.video = cv2.VideoCapture(0)
#To use recorded video replace parameter 0 by 'loaction\nameoffile'
#eg: self.video = cv2.VideoCapture('Videos\\sample.mp4')

    def __del__(self):
        self.video.release()

    # returns camera frames along with bounding boxes and predictions
    def get_frame(self):
        _, fr = self.video.read()
        gray_fr = cv2.cvtColor(fr, cv2.COLOR_BGR2GRAY)
        faces = facec.detectMultiScale(gray_fr, 1.3, 5)

        for (x, y, w, h) in faces:
            fc = gray_fr[y:y + h, x:x + w]

            roi = cv2.resize(fc, (48, 48))
            pred = model.predict_emotion(roi[np.newaxis, :, :, np.newaxis])

            cv2.putText(fr, pred, (x, y), font, 1, (255, 255, 0), 2)
예제 #22
0

def start_app(cnn):
    skip_frame = 10
    data = []
    flag = False
    ix = 0
    while True:
        ix += 1

        faces, fr, gray_fr = __get_data__()
        for (x, y, w, h) in faces:
            fc = gray_fr[y:y + h, x:x + w]

            roi = cv2.resize(fc, (48, 48))
            pred = cnn.predict_emotion(roi[np.newaxis, :, :, np.newaxis])

            cv2.putText(fr, pred, (x, y), font, 1, (255, 255, 0), 2)
            cv2.rectangle(fr, (x, y), (x + w, y + h), (255, 0, 0), 2)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        imS = cv2.resize(fr, (960, 540))
        cv2.imshow('Filter', imS)
    cv2.destroyAllWindows()


if __name__ == '__main__':
    model = FacialExpressionModel("dataset/face_model.json",
                                  "dataset/face_model.h5")
    start_app(model)
예제 #23
0
#Importing the relevant library
import cv2
from model import FacialExpressionModel
import numpy as np

#Following function detects faces
facec = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
model = FacialExpressionModel("model.json", "model_weights.h5") # loading our model
font = cv2.FONT_HERSHEY_SIMPLEX

class VideoCamera(object):
    def __init__(self):
        #Initialising video capturing function, pointing opencv to a video already stored
        self.video = cv2.VideoCapture("/home/rhyme/Desktop/Project/videos/facial_exp.mkv")
        #self.video = cv2.VideoCapture(0) # I am initialising the user webcam here instead of passing a default #file
        #change to 0 -> for default webcam

    def __del__(self):
        self.video.release()

    # returns camera frames along with bounding boxes and predictions
    def get_frame(self):
        _, fr = self.video.read()
        #Make sure each input frame is in GRAYSCALE
        gray_fr = cv2.cvtColor(fr, cv2.COLOR_BGR2GRAY)
        faces = facec.detectMultiScale(gray_fr, 1.3, 5)

        for (x, y, w, h) in faces:
            fc = gray_fr[y:y+h, x:x+w]

            roi = cv2.resize(fc, (48, 48))