Beispiel #1
0
def analyze_post():
    request_data = request.get_json()

    # Validación parámetro id
    if not 'id' in request_data:
        return jsonify(message='El parámetro id es requerido'), 400
    elif not isinstance(
            request_data['id'],
            int) or request_data['id'] <= 0 or request_data['id'] is None:
        return jsonify(message='El parámetro id debe un entero mayor a 0'), 400
    else:
        affiliate_path = os.path.join(path, str(request_data['id']))
        if not os.path.exists(affiliate_path):
            os.mkdir(affiliate_path)

    # Validación parámetro is_base64
    if not 'is_base64' in request_data:
        return jsonify(message='El parámetro is_base64 es requerido'), 400
    elif not isinstance(request_data['is_base64'], str) and not isinstance(
            request_data['is_base64'], bool) and not isinstance(
                request_data['is_base64'],
                int) or request_data['is_base64'] is None:
        return jsonify(message='El parámetro is_base64 debe ser booleano'), 400
    else:
        if isinstance(request_data['is_base64'], int):
            if request_data['is_base64'] not in (1, 0):
                return jsonify(
                    message='El parámetro is_base64 debe ser booleano'), 400
            else:
                is_base64 = bool(request_data['is_base64'])
        elif isinstance(request_data['is_base64'], bool):
            is_base64 = request_data['is_base64']
        elif isinstance(request_data['is_base64'], str):
            is_base64 = utils.str2bool(request_data['is_base64'])

    # Validación parámetro image
    if not 'image' in request_data:
        return jsonify(message='El parámetro image es requerido'), 400
    else:
        if is_base64:
            if not isinstance(request_data['image'], str) or len(
                    request_data['image']
            ) < 100 or request_data['image'] is None:
                return jsonify(
                    message=
                    'La cadena de texto en base64 image debe tener al menos 100 caracteres'
                ), 400
            else:
                image_path = os.path.join(affiliate_path,
                                          str(uuid.uuid4().hex) + '.jpg')
        else:
            image_path = os.path.join(affiliate_path, request_data['image'])
            if not os.path.exists(image_path):
                return jsonify(message='Archivo inexistente: ' +
                               image_path), 400

    # Validación parámetro gaze
    if not 'gaze' in request_data:
        get_gaze = True
    else:
        if not isinstance(request_data['gaze'], bool):
            get_gaze = True
        else:
            get_gaze = utils.str2bool(str(request_data['gaze']))

    # Validación parámetro emotion
    if not 'gaze' in request_data:
        get_emotion = True
    else:
        if not isinstance(request_data['emotion'], bool):
            get_emotion = True
        else:
            get_emotion = utils.str2bool(str(request_data['emotion']))

    try:
        if is_base64:
            contains_base64 = request_data['image'].find('base64,')
            if contains_base64 != -1:
                request_data['image'] = request_data['image'][contains_base64 +
                                                              7:]
            img = Image.open(BytesIO(base64.b64decode(request_data['image'])))
            img.save(image_path)
            img = cv2.cvtColor(np.array(img), cv2.COLOR_BGR2RGB)
        else:
            img = cv2.imread(image_path, 1)

        faces = detector(img, 0)
        # faces = face_recognition.face_locations(image)

        if len(faces) == 1:
            if get_gaze:
                face = faces[0]
                shape = predictor(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), face)
                refImgPts = utils.ref2dImagePoints(shape)
                height, width, channel = img.shape
                focalLength = 1 * width
                cameraMatrix = utils.cameraMatrix(focalLength,
                                                  (height / 2, width / 2))

                # Cálculo de vector de rotación y traslación mediante solvePnP
                success, rotationVector, translationVector = cv2.solvePnP(
                    face3Dmodel, refImgPts, cameraMatrix, mdists)

                # Cálculo del ángulo del rostro
                rmat, jac = cv2.Rodrigues(rotationVector)
                angles, mtxR, mtxQ, Qx, Qy, Qz = cv2.RQDecomp3x3(rmat)
                if angles[1] < (-1 * gaze_angle):
                    gaze = 'left'
                elif angles[1] > gaze_angle:
                    gaze = 'right'
                else:
                    gaze = 'forward'
            else:
                gaze = 'undefined'

            # DeepFace analysis
            if get_emotion:
                df_analysis = DeepFace.analyze(
                    image_path,
                    models=model_actions,
                    actions=selected_actions,
                    detector_backend=os.environ.get('DF_ANALYZE_BACKEND'))
            else:
                df_analysis = None

            return jsonify({
                'message': 'Imagen analizada',
                'data': {
                    'file': image_path,
                    'gaze': gaze,
                    'analysis': df_analysis
                }
            })
    except:
        return jsonify({
            'message': 'Imagen inválida',
        }), 400
Beispiel #2
0
import cv2
from deepface import DeepFace
import numpy as np

face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")

video = cv2.VideoCapture(0)

while video.isOpened():
    _, frame = video.read()

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    face = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5)

    for x, y, w, h in face:
        img = cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 1)
        try:
            analyze = DeepFace.analyze(frame, actions=['emotion'])
            print(analyze['dominant_emotion'])
        except:
            print("no face")

    cv2.imshow('video', frame)
    key = cv2.waitKey(1)
    if key == ord('q'):
        break

video.release()
Beispiel #3
0
 def __init__(self, config):
     obj = DeepFace.analyze(img_path="sample_data/matthew.jpg",
                            actions=['age', 'gender', 'race', 'emotion'])
    print("")
    print("")
    print(
        "**************************************************************************"
    )
    print("Analyzing image source:")
    print(
        "**************************************************************************"
    )
    print("")
    print("Analyzing image source ...")
    print("")

    # Analyzing frame
    facialAnalysisDetection2DResults = DeepFace.analyze(rgb_frame,
                                                        models=models)
    print("")
    print("[INFO] Image source analysis done correctly.")
    print("")

    # Get time Detection
    timeDetection = datetime.datetime.now()

    # Print processed data
    print("")
    print(
        "**************************************************************************"
    )
    print("Results resume:")
    print(
        "**************************************************************************"
Beispiel #5
0
print(resp_obj)
print(resp_obj["pair_1"]["verified"] == True)
print(resp_obj["pair_2"]["verified"] == True)

print("-----------------------------------------")

print("Bulk facial analysis tests")

dataset = [
	'dataset/img1.jpg',
	'dataset/img2.jpg',
	'dataset/img5.jpg',
	'dataset/img6.jpg'
]

resp_obj = DeepFace.analyze(dataset)
print(resp_obj["instance_1"]["age"]," years old ", resp_obj["instance_1"]["dominant_emotion"], " ",resp_obj["instance_1"]["gender"])
print(resp_obj["instance_2"]["age"]," years old ", resp_obj["instance_2"]["dominant_emotion"], " ",resp_obj["instance_2"]["gender"])
print(resp_obj["instance_3"]["age"]," years old ", resp_obj["instance_3"]["dominant_emotion"], " ",resp_obj["instance_3"]["gender"])
print(resp_obj["instance_4"]["age"]," years old ", resp_obj["instance_4"]["dominant_emotion"], " ",resp_obj["instance_4"]["gender"])


print("-----------------------------------------")

#-----------------------------------------

print("Facial analysis test. Passing nothing as an action")

img = "dataset/img4.jpg"
demography = DeepFace.analyze(img)
print(demography)
Beispiel #6
0
import os
from deepface import DeepFace
while True:

    p = input("Enter -----   ")

    demography = DeepFace.analyze((p + str(".jpg")),
                                  actions=['age', 'emotion'])
    #demographies = DeepFace.analyze(["img1.jpg", "img2.jpg", "img3.jpg"]) #analyzing multiple faces same time
    print("Age: ", demography["age"])
    ##print("Gender: ", demography["gender"])
    print("Emotion: ", demography["dominant_emotion"])
    ##print("Race: ", demography["dominant_race"])

    os.system(p + str(".jpg"))

##
##from deepface import DeepFace
##DeepFace.stream(r"C:\Users\admin\.deepface\weights")
Beispiel #7
0
                     max(0, faceBox[0] -
                         padding):min(faceBox[2] + padding, frame.shape[1] -
                                      1)]

        cv2.imwrite('data/test.jpg', face)

        # displaying the adjusted image with added facebox and text, font, BRG (Blue,green,red ~ 0-255), thickness
        cv2.putText(resultImg, "Ready to analyze!",
                    (faceBox[0], faceBox[1] - 10), cv2.FONT_HERSHEY_SIMPLEX,
                    0.8, (0, 255, 255), 2, cv2.LINE_4)
        cv2.imshow("BiasedAI POC", resultImg)

#demography = DeepFace.analyze("img4.jpg", ['age', 'gender', 'race', 'emotion'])
#identical to the line above passing nothing as 2nd argument will find everything
try:
    demography = DeepFace.analyze("data/test.jpg", ['age', 'gender', 'race'])
    print(
        f'{demography["age"]}, {demography["gender"]},{demography["dominant_race"]}'
    )
except Exception as e:
    print(e)

# Post data
try:
    api = "http://localhost:8083/PersonData"
    params = {
        "age": demography["age"],
        "gender": demography["gender"],
        "race": demography["dominant_race"],
    }
    r = requests.post(url=api, data=params)
Beispiel #8
0
from deepface import DeepFace

from PIL import Image

import cv2

import matplotlib.pyplot as plt
import speech_recognition as sr
from textblob import TextBlob

img_path = 'happy1.jpeg'

img = cv2.imread(img_path)

demography = DeepFace.analyze(img_path)

r = sr.Recognizer()

mic = sr.Microphone(device_index=1)

with sr.Microphone() as source:

    r.adjust_for_ambient_noise(source, duration=1)
    print()
    print("For which product you want to give review: ")
    print("listening...")
    audio = r.listen(source, timeout=3)
    print("Time's up, Thanks!!")

try:
Beispiel #9
0
from deepface import DeepFace
import os

path = input(' Enter path:  ')
images = os.listdir(path)

attributes = ['race']

output_file = open('output.txt', 'a')

for img in images:
    try:
        print(img)
        dct = DeepFace.analyze(f"{path}/{img}", attributes)
        asian = dct['race']['asian']
        indian = dct['race']['indian']
        latino = dct['race']['latino hispanic']
        black = dct['race']['black']
        white = dct['race']['white']
        middle_eastern = dct['race']['middle eastern']
        dominant_race = dct['dominant_race']

        output_file.write(img)
        output_file.write(', ')
        output_file.write('asian, ')
        output_file.write(str(asian))
        output_file.write(', ')
        output_file.write('indian, ')
        output_file.write(str(indian))
        output_file.write(', ')
        output_file.write('black, ')
Beispiel #10
0
from deepface import DeepFace
import json

#-----------------------------------------

print("Facial analysis tests")

img = "dataset/img4.jpg"
demography = DeepFace.analyze(img, ['age', 'gender', 'race', 'emotion'])

print("Demography:")
print(demography)
demography = json.loads(demography)

#check response is a valid json
print("Age: ", demography["age"])
print("Gender: ", demography["gender"])
print("Race: ", demography["dominant_race"])
print("Emotion: ", demography["dominant_emotion"])

print("-----------------------------------------")

print("Face recognition tests")

dataset = [
    ['dataset/img1.jpg', 'dataset/img2.jpg', True],
    ['dataset/img1.jpg', 'dataset/img3.jpg', False],
    ['dataset/img2.jpg', 'dataset/img3.jpg', False],
]

models = ['VGG-Face', 'Facenet', 'OpenFace']
from cv2 import cv2
import matplotlib.pyplot as plt
from deepface import DeepFace


cap = cv2.VideoCapture(0)

face_haar = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")

while True:

    ret, frame = cap.read()
    res = DeepFace.analyze(frame, actions=["race"])

    face_rect = face_haar.detectMultiScale(frame)
    for x, y, w, h in face_rect:
        cv2.rectangle(frame, (x, y), (x+w, y+h), (0,255,0), 2)
    
    cv2.putText(frame, res["dominant_race"], (x, y-20), cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),1)
    cv2.imshow("Emotion Detector", frame)

    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

cap.release()
cv2.destroyAllWindows()
from deepface import DeepFace
import cv2
import matplotlib.pyplot as plt

img = cv2.imread("")
plt.imshow(img[:, :, ::-1])
plt.show()

result = DeepFace.analyze(img, actions=['emotion'])

print(result)
from deepface_model import *
import warnings
import cv2
from deepface import DeepFace

if __name__ == "__main__":
    pre_models = {}
    pre_models['emotion'] = loadModel_emotion()
    pre_models['age'] = loadModel_age()
    pre_models['gender'] = loadModel_gender()
    pre_models['race'] = loadModel_race()

    src = cv2.imread('res/test-img/FinalData/001.jpg')
    demography = DeepFace.analyze('res/test-img/FinalData/001.jpg',
                                  actions=['age', 'gender', 'race', 'emotion'],
                                  models=pre_models)
    # demographies = DeepFace.analyze(["img1.jpg", "img2.jpg", "img3.jpg"]) #analyzing multiple faces same time
    print("Age: ", demography["age"])
    print("Gender: ", demography["gender"])
    print("Emotion: ", demography["dominant_emotion"])
    print("Race: ", demography["dominant_race"])
import cv2
import numpy as np
import matplotlib.pyplot as plt
from deepface import DeepFace


video = cv2.VideoCapture(0)
if not video.isOpened():
    print("Unable to open webcam...")

while True:
    ret, frame = video.read() # ret is a boolean that checks if the camera is on or not, frame is a variable the camera feed is being saved in
    width, height, channels = frame.shape

    try:
        predictions = DeepFace.analyze(frame)

        start_point = (predictions['region']['x'], predictions['region']['y'])
        end_point = (predictions['region']['x'] + predictions['region']['w'], predictions['region']['y'] + predictions['region']['h'])
        box_colour = (0,255,0)
        text_colour = (255,0,0)
        thickness = 8
        font = cv2.FONT_HERSHEY_DUPLEX

        gender_start_point = (int((predictions['region']['x']/3)),int(((predictions['region']['y']*0.75)*0.25)))
        age_start_point = (int((predictions['region']['x']/3)),int(((predictions['region']['y']*0.75)*0.5)))
        race_start_point = (int((predictions['region']['x']/3)),int(((predictions['region']['y']*0.75)*0.75)))
        emotion_start_point = (int((predictions['region']['x']/3)),int(((predictions['region']['y']*0.75)/1)))

        frame = cv2.rectangle(frame, start_point, end_point, box_colour, thickness)
        cv2.putText(frame, "Emotion: " + predictions['dominant_emotion'], emotion_start_point, font, 0.5, text_colour, 1)
                current_element_for_speech_output = element  #one / first name
            else:  #already at least one name in list
                current_element_for_speech_output = current_element_for_speech_output + " and " + element

    #Face Analysis including Emotion Detection
    #**********************************************
    if face_analysis == True:
        system_counter = system_counter + 1  # increment frame counter

        #dont do this every frame cause thats wasting a lot of resources
        if ((system_counter %
             frequence_face_analysis) == 0):  #only do every 8th time etc.

            # detect emotion and other parameters
            img_analysis = DeepFace.analyze(
                r"images_to_detect\unknown_person\frame%d.jpg" %
                imgcounter)  #is doing analysis with one person (which one?)

            emotion = img_analysis["emotion"]
            age = img_analysis["age"]
            gender = img_analysis["gender"]
            ethnicity = img_analysis["race"]
            dominant_emotion = img_analysis["dominant_emotion"]
            emotion_rounded = {k: round(v, 2) for k, v in emotion.items()}
            ethnicity_rounded = {k: round(v, 2) for k, v in ethnicity.items()}

            #check whether emotion is repetitive: use ringbuffer: if emotion is same several times in a row then use that information
            emotion_ringbuffer.append(
                dominant_emotion)  #add the dominant emotion to ringbuffer
            print("Emotion Ringbuffer (just for debugging)\n")
            print(emotion_ringbuffer)
Beispiel #16
0
#######################
# MAIN LOOP
#######################
while True:
    # STEP 1: TAKE OFF AND GET THE TELLO IMG FRAME
    if startContour == 0:
        myDrone.takeoff()
        startContour = 1
    img = getTelloFrame(myDrone, w, h)

    # STEP 2: FIND ALL FACES
    img, info = findFace(img)

    # STEP3: ANALYZE EMOTIONS
    emotions = DeepFace.analyze(img, actions=["emotion"])
    status = emotions["dominant_emotion"]

    # STEP 3: RECOGNIZE FACES
    img = faceRecognition(img, status, encodeListKnown, classNames)

    # STEP 4:
    pYaw_error, pUp_down_error = trackFaces(myDrone, info, w, h, pid,
                                            pYaw_error, pUp_down_error)

    # pError = trackFaces(myDrone, info, w, pid, pError)
    cv2.imshow("Tello Video", img)

    if cv2.waitKey(1) and 0xFF == ord("q"):
        myDrone.land()
        break
Beispiel #17
0
models["race"] = Race.loadModel()


# 영상의 의미지를 연속적으로 캡쳐할 수 있게 하는 class
vidcap = cv2.VideoCapture(testurl+'video.mp4')
count = 1

emotion_list=[]

while(vidcap.isOpened()):
    ret, image = vidcap.read()
    if(int(vidcap.get(1)) % vidcap.get(cv2.CAP_PROP_FPS)  == 0):
        # cv2.imwrite("./frame_image/frame%d.jpg" % count, image)
        # image_data = open("./frame_image/frame%d.jpg" % count, "rb")
        try:
            emotion=DeepFace.analyze(image, models=models)
        except:
            print("error")
            count+=1    
        pi_graph_dataset(emotion["dominant_emotion"])
        emlist=list(emotion['emotion'].values())
        emlist.insert(0,count)
        if(log_judge(emlist)):
            log=[]
            if(emotion["dominant_emotion"] != "happy" and emotion["dominant_emotion"] != "neutral"):
                log.append(emotion["dominant_emotion"])
                log.insert(0,count)
                emotion_log_list.append(log)    
        emotion_list.append(emlist)
        count += 1
    # if(count == 10):
Beispiel #18
0
from deepface import DeepFace

obj = DeepFace.analyze(img_path='michael.jpg', actions = ['race'])

print(obj)
# In[16]:

import matplotlib.pyplot as plt

# In[17]:

plt.imshow(img)

# In[18]:

plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))

# In[21]:

predictions = DeepFace.analyze(img)

# In[22]:

predictions

# In[23]:

type(predictions)

# In[24]:

predictions['dominant_emotion']

# In[25]:
Beispiel #20
0
age_group_2 = []
age_group_3 = []

gender_group = []

race_group = []

emotion_group = []

for img in images:

    imgs = cv2.imread(img)
    plt.imshow(imgs[:, :, ::-1])
    plt.show()

    age_result = DeepFace.analyze(imgs, actions=["age"])
    gender_result = DeepFace.analyze(imgs, actions=['gender'])
    race_result = DeepFace.analyze(imgs, actions=['race'])
    emotion_result = DeepFace.analyze(imgs, actions=['emotion'])

    #print(type(age_result))
    print("")
    print("predicted age is : ", age_result["age"])
    print("Predicted gender is :", gender_result['gender'])
    print("Predicted race is :", race_result['race'])
    print("Predicted emotion is :", emotion_result['emotion'])

    if age_result["age"] > 0 and age_result["age"] < 30:
        age_group_1.append(age_result["age"])

    if age_result["age"] > 30 and age_result["age"] < 60:
import cv2
from deepface import DeepFace
import json

cap = cv2.VideoCapture(0)
attributes = ['age', 'gender', 'race', 'emotion']

while True:
    # Read the frame
    _, img = cap.read()
    demography = DeepFace.analyze(img, attributes)
    print(demography)
from cv2 import cv2
import matplotlib.pyplot as plt
from deepface import DeepFace


cap = cv2.VideoCapture(0)

face_haar = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")

while True:

    ret, frame = cap.read()
    res = DeepFace.analyze(frame, actions=["emotion"])

    face_rect = face_haar.detectMultiScale(frame)
    for x, y, w, h in face_rect:
        cv2.rectangle(frame, (x, y), (x+w, y+h), (0,255,0), 2)
    
    cv2.putText(frame, res["dominant_emotion"], (x, y-20), cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),1)
    cv2.imshow("Emotion Detector", frame)

    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

cap.release()
cv2.destroyAllWindows()
Beispiel #23
0
def eye_counter(filename):
    # define two constants, one for the eye aspect ratio to indicate
    # blink and then a second constant for the number of consecutive
    # frames the eye must be below the threshold
    EYE_AR_THRESH = 0.35
    EYE_AR_CONSEC_FRAMES = 3

    # initialize the frame counters and the total number of blinks
    COUNTER = 0
    TOTAL = 0

    final_df = pd.DataFrame(columns=[
        'video_time_stamp', 'blink_counter', 'Emotion', 'list_of_emotions'
    ])

    # grab the indexes of the facial landmarks for the left and
    # right eye, respectively
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    # start the video stream thread
    print("[INFO] starting video stream thread...")
    # vs = FileVideoStream(args["video"]).start()
    fileStream = True
    vs = cv2.VideoCapture(filename)
    # fileStream = False
    time.sleep(1.0)
    total_frames_video = 0

    fps = vs.get(cv2.CAP_PROP_FPS)
    print(fps)

    # loop over frames from the video stream
    while True:
        # if this is a file video stream, then we need to check if
        # there any more frames left in the buffer to process
        # if fileStream and not vs.more():
        # 	break

        # grab the frame from the threaded video file stream, resize
        # it, and convert it to grayscale
        # channels)
        ret, frame = vs.read()
        if ret == False:
            break
        width, height = frame.shape[0], frame.shape[1]
        size = (width, height)
        # frame = imutils.resize(frame, width=450)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # detect faces in the grayscale frame
        rects = detector(gray, 0)

        # loop over the face detections
        for rect in rects:
            # determine the facial landmarks for the face region, then
            # convert the facial landmark (x, y)-coordinates to a NumPy
            # array
            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            # extract the left and right eye coordinates, then use the
            # coordinates to compute the eye aspect ratio for both eyes
            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = eye_aspect_ratio(leftEye)
            rightEAR = eye_aspect_ratio(rightEye)

            # average the eye aspect ratio together for both eyes
            ear = (leftEAR + rightEAR) / 2.0

            # compute the convex hull for the left and right eye, then
            # visualize each of the eyes
            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

            # check to see if the eye aspect ratio is below the blink
            # threshold, and if so, increment the blink frame counter
            if ear < EYE_AR_THRESH:
                COUNTER += 1

            # otherwise, the eye aspect ratio is not below the blink
            # threshold
            else:
                # if the eyes were closed for a sufficient number of
                # then increment the total number of blinks
                if COUNTER >= EYE_AR_CONSEC_FRAMES:
                    TOTAL += 1

                # reset the eye frame counter
                COUNTER = 0

            if total_frames_video % round(fps) == 0:
                print('true')
                emotion = DeepFace.analyze(frame, ['emotion'])
                video_time = (total_frames_video / round(fps))
                video_time = time.strftime('%H:%M:%S', time.gmtime(video_time))
                print(video_time)
                final_df = final_df.append(
                    {
                        "video_time_stamp": video_time,
                        "blink_counter": TOTAL,
                        "Emotion": emotion["dominant_emotion"],
                        "list_of_emotions": emotion
                    },
                    ignore_index=True)

            # draw the total number of blinks on the frame along with
            # the computed eye aspect ratio for the frame
            cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
            cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

        # # show the frame
        # cv2.imshow("Frame", frame)
        # key = cv2.waitKey(1) & 0xFF
        #
        # # if the `q` key was pressed, break from the loop
        # if key == ord("q"):
        # 	break

        total_frames_video += 1

        final_df.to_csv('eye_blinker.csv')

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.release()
def add_demographic_features(image_path, models):
    try:
        demography = DeepFace.analyze(image_path, ['age', 'race', 'emotion'],
                                      models=models)
        age = int(round(demography["age"]))
        emotion = emotion_index[demography["dominant_emotion"]]
        race = race_index[demography["dominant_race"]]
        print("Demographic Features Added.")
        return age, race, emotion
    except:
        print(
            "No Face detected, please ensure that the image doesn't include picture without a face or a partially covered Face"
        )
        print(
            "Press Y/y to add these features manually or press any other key to exit"
        )
        choice = input()
        if choice == "Y" or choice == "y":
            age = input("Enter your age:")
            try:
                age = int(age)
            except:
                print(
                    "Error: Age value entered is not a numeric value. Exiting."
                )
                sys.exit(2)

            print(
                "Following are the supported race classes. Please enter the appropriate number which best matches the person in the image"
            )
            print(
                "0: black, 1: latino hispanic, 2: middle eastern, 3: asian, 4: white, 5: indian"
            )
            race = input("Enter the race class: ")
            try:
                race = int(race)
                if not (race >= 0 and race <= 5):
                    sys.exit(2)
            except:
                print(
                    "Error: Please enter a numerical value between 0 and 5 as per the index. Exiting"
                )
                sys.exit(2)

            print(
                "Following are the supported emotion classes. Please enter the appropriate number which best matches the person in the image"
            )
            print(
                "0: neutral, 1: sad, 2: happy, 3: surprise, 4: angry, 5: fear, 6: disgust"
            )
            emotion = input("Enter the emotion class: ")
            try:
                emotion = int(emotion)
                if not (emotion >= 0 and emotion <= 6):
                    sys.exit(2)
            except:
                print(
                    "Error: Please enter a numerical value between 0 and 6 as per the index. Exiting"
                )
                sys.exit(2)

            return age, race, emotion

        else:
            print("Exiting")
            sys.exit()
Beispiel #25
0
# #image = cv2.imread("hanx.jpeg")
# image = cv2.imread("dicaprio.jpeg")
# #image = cv2.imread("brad.jpg")
# #image = cv2.imread("brad2.jpg")
# #image = cv2.imread("me.jpg")
# #image = cv2.imread("mamud.jpg")
image = cv2.imread(args["image"])
# #image = cv2.imread("tnzshownm.jpg")
# #image = cv2.imread("jan.jpg")
# #image = cv2.imread("emam.jpg")
# #image = cv2.imread("najm.jpg")
# #image = cv2.imread("trump.jpg")

#ALL ABOUT FACE
attributes = ['age', 'gender', 'race', 'emotion']
analysis = DeepFace.analyze(image, attributes)

print("AGE:", int(analysis['age']), ", GENDER:", analysis['gender'], ", RACE:",
      analysis['dominant_race'], ", EMOTION:", analysis['dominant_emotion'],
      ",")

#RESIZE AND CONVERT TO GRAYSCALE IMAGE
image = imutils.resize(image, width=2512)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# detect faces in the grayscale image
rects = detector(gray, 1)

# loop over the face detections
for (i, rect) in enumerate(rects):
    # determine the facial landmarks for the face region, then
Beispiel #26
0
from deepface import DeepFace
demography = DeepFace.analyze("kimyoujung.jpg",
                              actions=['age', 'gender', 'race', 'emotion'])
#demographies = DeepFace.analyze(["img1.jpg", "img2.jpg", "img3.jpg"]) #analyzing multiple faces same time
print("Age: ", demography["age"])
print("Gender: ", demography["gender"])
print("Emotion: ", demography["dominant_emotion"])
print("Race: ", demography["dominant_race"])
Beispiel #27
0
    for (x, y, w, h) in faces:
        detected_face = img[int(y):int(y + h), int(x):int(x + w)]
        margin_rate = 30
        #get some area around face to match position of face on images the model is trained on
        try:
            margin_x = int(w * margin_rate / 100)
            margin_y = int(h * margin_rate / 100)
            detected_face = img[int(y - margin_y):int(y + h + margin_y),
                                int(x - margin_x):int(x + w + margin_x)]
            detected_face = cv2.resize(detected_face,
                                       (224, 224))  #resize to 224x224
        except Exception as err:
            detected_face = img[int(y):int(y + h), int(x):int(x + w)]
            detected_face = cv2.resize(detected_face, (224, 224))
        obj = DeepFace.analyze(detected_face,
                               actions=['age'],
                               enforce_detection=False)  #feed to model
        cv2.rectangle(img, (x, y), (x + w, y + h), (36, 255, 12),
                      1)  #draw rectangle
        display_string = str(round(obj["age"]))
        cv2.putText(img, display_string, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1,
                    (36, 255, 12), 1, cv2.LINE_AA)  #add text

    cv2.imshow('img', img)
    k = cv2.waitKey(30) & 0xff
    if k == 27:
        break

cap.release()
cv2.destroyAllWindows()
Beispiel #28
0
from deepface import DeepFace
demography = DeepFace.analyze("juan.jpg", actions = ['age', 'gender', 'race', 'emotion'])
#demographies = DeepFace.analyze(["img1.jpg", "img2.jpg", "img3.jpg"]) #analyzing multiple faces same time
print("Age: ", demography["age"])
print("Gender: ", demography["gender"])
print("Emotion: ", demography["dominant_emotion"])
print("Race: ", demography["dominant_race"])


Beispiel #29
0
 def predict(self, image):
     print(image)
     return DeepFace.analyze(np.array(image),
                             actions=['emotion'],
                             enforce_detection=False)
Beispiel #30
0
from deepface import DeepFace
import cv2 as cv
import matplotlib.pyplot as plt

#Gui for choosing a file
sg.theme('DarkAmber')
fname = sg.popup_get_file('Select a video file')
startfile(fname)

#Deepface Emotional Analysis
cap = cv.VideoCapture(0)
result_new = []

while (True):
    _, img = cap.read()
    result_new.append(DeepFace.analyze(img, actions=['emotion']))
    cv.imshow('img', img)
    if (cv.waitKey(1) & 0xFF == ord('q')):
        break

cap.release()

#determining captured emotions
emotion_set = [i['dominant_emotion'] for i in result_new]
dominant_emotions = dict()
fear = 0
neutral = 0
sad = 0
happy = 0
angry = 0
surprise = 0