def emotion_from_image():
    # Initialise the model
    er = EmotionRecognition(device='gpu', gpu_id=0)
    # Start the camera
    cam = VideoCapture(0)
    success, frame = cam.read()
    # Extract the emotion and the proba
    emotion, proba = er.recognise_emotion(frame, return_type='BGR')
    initial_time = time()
    # Try to capture an emotion during 5 senconds, if no faces are detected, we return None
    while emotion is None and time() - initial_time < 5:
        success, frame = cam.read()
        emotion, proba = er.recognise_emotion(frame, return_type='BGR')
    cam.release()
    destroyAllWindows()
    return emotion, max(proba)
Example #2
0
def init():
    global frame_width
    global frame_height
    global input_dir
    global output_dir
    global emotion_model
    global time_to_leave_in_sec
    global lectureKeyWords
    global path_tesseract
    input_dir = "./input"
    frame_width = 1920
    frame_height = 1080
    output_dir = "./output/"
    emotion_model = EmotionRecognition(device='cpu', gpu_id=0)
    time_to_leave_in_sec = 5
    lectureKeyWords = open("./keywords.txt", "r").read().split(";")
    path_tesseract = "C:/Program Files (x86)/Tesseract-OCR/tesseract.exe"
    pytesseract.tesseract_cmd = "C:/Program Files (x86)/Tesseract-OCR/tesseract.exe"
from facial_emotion_recognition import EmotionRecognition

import cv2

er = EmotionRecognition(device='cpu')

cam = cv2.VideoCapture(0)

while True:
    success, frame = cam.read()
    frame = er.recognise_emotion(frame, return_type='BGR')
    cv2.imshow('frame', frame)
    key = cv2.waitKey(1)
    if key == 27:
        break
cam.release()
cv2.destroyAllWindows()
Example #4
0
pip install might work.

'''

from facenet_pytorch import MTCNN
import torch

'''
these two libraries are supposed to be used inside the library only, but it's imported here since it help with faces counter
'''

import numpy as np
import cv2 as cv


er = EmotionRecognition(device='gpu', gpu_id=0) 
gpu_id=0 # this is used for faces counting
device = torch.device(f'cuda:{str(gpu_id)}') # to use gpu if your device supports it, if not comment this line and change device below to 'cpu'
cam = cv.VideoCapture(r'C:\Users\komsi\Desktop\Misk - GeStream\AI project - face detection\Data for emotion\Studenlecture04.mov') # video to be tested, in case of using Web cam the argument will be 0
mtcnn = MTCNN(keep_all=True, device=device) # for face counting also, if your device not gpu supported make device = 'cpu' and comment line number 22
font = cv.FONT_HERSHEY_SIMPLEX # font for the stuednts attendance
total_students = 50 # default value that will be inputted by the user later


while True:
    

    success, frame = cam.read() # read frame by frame
    faces,_ = mtcnn.detect(frame) ## faces will be returned as numpy array, the number of elements is the number of faces, if there are no faces it will return none
    if type(faces) == np.ndarray: ## if a face or more was detected
        cv.putText(frame, 'present students = {number}'.format(number = len(faces)) , (10, 50), font, 1, (0, 255, 255), 1, cv.LINE_4)
from facial_emotion_recognition import EmotionRecognition
import cv2
er = EmotionRecognition(
    device='gpu')  #device='cpu', change if you dont have GPU
cam = cv2.VideoCapture(0)

while True:
    _, img = cam.read()
    img = er.recognise_emotion(img, return_type='BGR')
    cv2.imshow('frame', img)
    if cv2.waitKey(1) == 27:  #esc
        break
cam.release()
cv2.destroyAllWindows()
Example #6
0
 def __init__(self):
     self.cuadro = Cuadro()
     self.er = EmotionRecognition(device='gpu', gpu_id=0)
Example #7
0
def run_alexa():
    command = take_command()
    print(command)
    if "play" in command:
        song = command.replace('play', '')
        talk('playing' + song)
        pywhatkit.playonyt(song)
    elif "search" in command:
        se = command.replace("search", " ")
        talk("great" + se)
        pywhatkit.search(se)
    elif 'time' in command:
        time = datetime.datetime.now().strftime('%I:%M %p')
        talk('current time is ' + time)
    elif 'who is ' in command:
        person = command.replace('who is', '')
        info = wikipedia.summary(person, 5)
        print(info)
        talk(info)
    elif "date" in command:
        talk("sorry, I have a boyfriend")
    elif "are you single" in command:
        talk("I am in a relationship in kuku")
    elif "joke" in command:
        talk(pyjokes.get_joke())
    elif "hello" in command:
        talk("hello who are you" " how may i help you ")
    elif "and you" in command:
        talk("am good" "how may i help you")

    elif "call" in command:
        pywhatkit.sendwhatmsg("datetime.datetime.now()")

    elif "search" in command:

        google = command.replace('search', '')
        print(google)
        talk('search' + google)
        pywhatkit.search(google)
    elif "open camera" in command:
        vs = cv2.VideoCapture(0)
        talk("camera open")
        while True:
            _, img = vs.read()
            cv2.imshow('videostream', img)
            key = cv2.waitKey(2) & 0xff

            if key == ord('q'):
                break
        cv2.waitKey(0)
        vs.release()
    elif "open image" in command:
        talk("image open")
        img = cv2.imread("G:\IMG.jpg")
        cv2.imshow("original", img)
        cv2.imwrite("IMG.jpg", img)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    elif "selfie" in command:
        talk("say chese")
        cam = cv2.VideoCapture(0)

        _, img = cam.read()
        cv2.imwrite("imagefromCamera.jpg", img)
        cv2.imshow("imagefromCamera.jpg", img)
        cam.release()
        cv2.waitKey(0)
        cv2.destroyAllWindows()
    elif "detected me" in command:
        cam = cv2.VideoCapture(0)

        firstFrame = None
        area = 500
        while True:
            _, img = cam.read()
            text = "Normal"
            img = imutils.resize(img, width=500)
            grayImg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            gaussianImg = cv2.GaussianBlur(grayImg, (21, 21), 0)
            if firstFrame is None:
                firstFrame = gaussianImg
                continue
            imgDiff = cv2.absdiff(firstFrame, gaussianImg)
            "threah img impROVEd pig"
            threahImg = cv2.threshold(imgDiff, 25, 255, cv2.THRESH_BINARY)[1]
            threahImg = cv2.dilate(threahImg, None, iterations=2)
            "cnta = help find never for thing "
            cnts = cv2.findContours(threahImg.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
            cnts = imutils.grab_contours(cnts)
            count = 0

            for c in cnts:
                if cv2.contourArea(c) < area:
                    continue
                (x, y, w, h) = cv2.boundingRect(c)
                cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
                count = count + 1
                text = 'moving object detected' + str(count)
                print(text)
            cv2.putText(img, text, (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                        (0, 0, 255), 2)

            cv2.imshow("cameraFeed", img)
            key = cv2.waitKey(1) & 0xFF
            if key == ord("q"):
                break
        cv2.waitKey(0)
        cam.release()

        cv2.destroyAllWindows()
    elif "face" in command:
        alg = 'G:\haarcascade_frontalface_default.xml'
        haar_cascade = cv2.CascadeClassifier(alg)
        cam = cv2.VideoCapture(0)

        while True:
            _, img = cam.read()
            text = 'no person detected'
            grayImg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            face = haar_cascade.detectMultiScale(grayImg, 1.3, 4)

            for (x, y, w, h) in face:
                cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
                text = 'person detected'
                (cv2.putText(img, text, (10, 20), cv2.FONT_HERSHEY_SIMPLEX,
                             0.5, (0, 0, 255), 2))

            if text == text:
                (cv2.putText(img, text, (10, 20), cv2.FONT_HERSHEY_SIMPLEX,
                             0.5, (0, 0, 255), 2))

            cv2.imshow('facedetection', img)
            key = cv2.waitKey(10)
            if key == 27:
                break
            if key == ord('q'):
                break

        cam.release()
        cv2.destroyAllWindows()
    elif "emotion" in command:

        er = EmotionRecognition(device='cpu')
        cam = cv2.VideoCapture(0)
        while True:
            sucess, frame = cam.read()
            frame = er.recognise_emotion(frame, return_type='BGR')
            cv2.imshow('frame', frame)
            key = cv2.waitKey(1)
            if key == 27:
                break
        cam.release()
        cv2.destroyAllWindows()

    else:
        talk(" sorry could not understand audio")