# pip install deepface

import cv2
from deepface import DeepFace
import numpy as np

imgpath = "D:\\1.program\\python\\python2\\python-practise\\I know python\\how to with python\\Real time emotion detection\\1.jpg"
image = cv2.imread(imgpath)

analyze = DeepFace.analyze(image, actions=['emotion'])
print(analyze)
Exemplo n.º 2
0
import tensorflow as tf
from PIL import Image
from io import BytesIO
from deepface import DeepFace

with open("main/test_image.png", "rb") as image:
    f = image.read()
    b = bytearray(f)
    img = Image.open(BytesIO(b))
image_array = tf.keras.preprocessing.image.img_to_array(img)

# demography = DeepFace.analyze(image_array, actions=["emotion"])
demography = DeepFace.analyze(image_array,
                              actions=["age", "gender", "race", "emotion"])

result = DeepFace.verify(image_array, image_array, model_name="Facenet")
print("Is verified: ", result["verified"])
Exemplo n.º 3
0
def analyze_face_image(image_path):
    obj = DeepFace.analyze(image_path,
                           actions=['age', 'gender', 'race', 'emotion'])
    print(obj["age"], " years old ", obj["dominant_race"], " ",
          obj["dominant_emotion"], " ", obj["dominant_gender"])
Exemplo n.º 4
0
    import logging
    tf.get_logger().setLevel(logging.ERROR)

print("Running unit tests for TF ", tf.__version__)

#-----------------------------------------

dataset = [['dataset/img1.jpg', 'dataset/img2.jpg', True],
           ['dataset/img1.jpg', 'dataset/img6.jpg', True]]

print("-----------------------------------------")

print("Face detectors test")

print("ssd detector")
res = DeepFace.verify(dataset, detector_backend='ssd')
print(res)

print("opencv detector")
res = DeepFace.verify(dataset, detector_backend='opencv')
print(res)

print("dlib detector")
res = DeepFace.verify(dataset, detector_backend='dlib')
print(res)

print("mtcnn detector")
res = DeepFace.verify(dataset, detector_backend='mtcnn')
print(res)

print("-----------------------------------------")
pretrained_models["DeepFace"] = FbDeepFace.loadModel()
print("FbDeepFace loaded")
pretrained_models["DeepID"] = DeepID.loadModel()
print("DeepID loaded")

instances = df[["file_x", "file_y"]].values.tolist()

models = ['VGG-Face']
metrics = ["cosine"]

if True:
    for model in models:
        for metric in metrics:

            resp_obj = DeepFace.verify(instances,
                                       model_name=model,
                                       model=pretrained_models[model],
                                       distance_metric=metric)

            distances = []

            for i in range(0, len(instances)):
                distance = round(resp_obj["pair_%s" % (i + 1)]["distance"], 4)
                distances.append(distance)

            df['%s_%s' % (model, metric)] = distances

    df.to_csv("face-recognition-pivot.csv", index=False)
else:
    df = pd.read_csv("face-recognition-pivot.csv")

df_raw = df.copy()
    # Save output of the function.
    filename  =  output_name.format(os.getpid())
    cv2.imwrite(filename, processed_img)

# Check the output of face_detect function.
img_path = '/content/drive/My Drive/IMG_20201007_200043.jpg'
output_name = 'pan_img.jpg'
pan = face_detect(img_path, output_name)

# Check the output of face_detect function.
img_path = '/content/drive/My Drive/IMG20201010221637.jpg'
output_name = 'clicked_img.jpg'
orig = face_detect(img_path, output_name)

#### Resize the image because output of face_detect func will be scaled according
#### to original image but for verification size of both the images should be
#### same(PAN card image is little squeezed, resizing will solve that issue).
def resize_img(path, size):
    img = cv2.imread(path)
    img = cv2.resize(img, (size, size))
    return img

clicked_img = resize_img('/content/clicked_img.jpg', 512)
pan_img = resize_img('/content/pan_img.jpg', 512)

# Set enforce_detection False: Face could not be detected. Please confirm that the picture
# is a face photo or consider to set enforce_detection param to False.
result  = DeepFace.verify(clicked_img, pan_img, enforce_detection = False)

print("Is verified: ", result["verified"])
Exemplo n.º 7
0
app = Flask(__name__)

#------------------------------

tic = time.time()

print("Loading Face Recognition Models...")

pbar = tqdm(range(0, 6), desc='Loading Face Recognition Models...')

for index in pbar:
	
	if index == 0:
		pbar.set_description("Loading VGG-Face")
		vggface_model = DeepFace.build_model("VGG-Face")
	elif index == 1:
		pbar.set_description("Loading OpenFace")
		openface_model = DeepFace.build_model("OpenFace")
	elif index == 2:
		pbar.set_description("Loading Google FaceNet")
		facenet_model = DeepFace.build_model("Facenet")
	elif index == 3:
		pbar.set_description("Loading Facebook DeepFace")
		deepface_model = DeepFace.build_model("DeepFace")
	elif index == 4:
		pbar.set_description("Loading DeepID DeepFace")
		deepid_model = DeepFace.build_model("DeepID")
	elif index == 5:
		pbar.set_description("Loading ArcFace DeepFace")
		arcface_model = DeepFace.build_model("ArcFace")
Exemplo n.º 8
0
#! /usr/bin/python3

from deepface import DeepFace
import time

tik = time.time()
result = DeepFace.verify("katya1.jpg",
                         "katya2.jpg",
                         model_name='ArcFace',
                         detector_backend='retinaface')  # 'retinaface'
print("Is verified: ", result["verified"], time.time() - tik)
Exemplo n.º 9
0
from deepface import DeepFace

# result = DeepFace.stream("face_images/Trump/4.jpg")
result = DeepFace.stream("face_images/Gates/1.jpg")
#detect your emotions via webcam realtime using deepface library

import cv2
import matplotlib.pyplot as plt
from deepface import DeepFace
#download preatrained CNN weights ->  https://drive.google.com/uc?id=1YCox_4kJ-BYeXq27uUbasu--yz28zUMV
# save .h5 at 'C:\Users\Steven_Verkest/.deepface/weights/age_model_weights.h5'

faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascades.xml')

cap = cv2.VideoCapture(0)

while True:
    ret, frame = cap.read()  #read one image from a video (1 frame)
    result = DeepFace.analyze(frame, actions=['emotion'])

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = faceCascade.detectMultiScale(gray, 1.1, 4)
    # draw rectangle around the face
    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)

    # draw text
    font = cv2.FONT_HERSHEY_SIMPLEX
    cv2.putText(frame, result['dominant_emotion'], (50, 50), font, 3,
                (0, 255, 0), 2, cv2.LINE_4)
    cv2.imshow('original video', frame)

    if cv2.waitKey(2) & 0xff == ord('q'):
        break
Exemplo n.º 11
0
from deepface import DeepFace

result = DeepFace.verify(
    r"C:\Users\bitcamp\Desktop\opencv_dnn_202005\image\ujung.jpg")

print("Is verified: ", result["verified"])
print()
Exemplo n.º 12
0
def test_cases():

    print("DeepFace.detectFace test")

    for detector in detectors:
        img = DeepFace.detectFace("dataset/img11.jpg",
                                  detector_backend=detector)
        evaluate(img.shape[0] > 0 and img.shape[1] > 0)
        print(detector, " test is done")

    print("-----------------------------------------")

    img_path = "dataset/img1.jpg"
    embedding = DeepFace.represent(img_path)
    print("Function returned ", len(embedding), "dimensional vector")
    evaluate(len(embedding) > 0)

    print("-----------------------------------------")

    print("Face detectors test")

    for detector in detectors:
        print(detector + " detector")
        res = DeepFace.verify(dataset[0][0],
                              dataset[0][1],
                              detector_backend=detector)
        print(res)
        assert res["verified"] == dataset[0][2]

    print("-----------------------------------------")

    print("Find function test")

    df = DeepFace.find(img_path="dataset/img1.jpg", db_path="dataset")
    print(df.head())
    evaluate(df.shape[0] > 0)

    print("-----------------------------------------")

    print("Facial analysis test. Passing nothing as an action")

    img = "dataset/img4.jpg"
    demography = DeepFace.analyze(img)
    print(demography)

    evaluate(demography["age"] > 20 and demography["age"] < 40)
    evaluate(demography["dominant_gender"] == "Woman")

    print("-----------------------------------------")

    print("Facial analysis test. Passing all to the action")
    demography = DeepFace.analyze(img, ['age', 'gender', 'race', 'emotion'])

    print("Demography:")
    print(demography)

    #check response is a valid json
    print("Age: ", demography["age"])
    print("Gender: ", demography["dominant_gender"])
    print("Race: ", demography["dominant_race"])
    print("Emotion: ", demography["dominant_emotion"])

    evaluate(demography.get("age") is not None)
    evaluate(demography.get("dominant_gender") is not None)
    evaluate(demography.get("dominant_race") is not None)
    evaluate(demography.get("dominant_emotion") is not None)

    print("-----------------------------------------")

    print(
        "Facial analysis test 2. Remove some actions and check they are not computed"
    )
    demography = DeepFace.analyze(img, ['age', 'gender'])

    print("Age: ", demography.get("age"))
    print("Gender: ", demography.get("dominant_gender"))
    print("Race: ", demography.get("dominant_race"))
    print("Emotion: ", demography.get("dominant_emotion"))

    evaluate(demography.get("age") is not None)
    evaluate(demography.get("dominant_gender") is not None)
    evaluate(demography.get("dominant_race") is None)
    evaluate(demography.get("dominant_emotion") is None)

    print("-----------------------------------------")

    print("Facial recognition tests")

    for model in models:
        for metric in metrics:
            for instance in dataset:
                img1 = instance[0]
                img2 = instance[1]
                result = instance[2]

                resp_obj = DeepFace.verify(img1,
                                           img2,
                                           model_name=model,
                                           distance_metric=metric)

                prediction = resp_obj["verified"]
                distance = round(resp_obj["distance"], 2)
                threshold = resp_obj["threshold"]

                passed = prediction == result

                evaluate(passed)

                if passed:
                    test_result_label = "passed"
                else:
                    test_result_label = "failed"

                if prediction == True:
                    classified_label = "verified"
                else:
                    classified_label = "unverified"

                print(
                    img1.split("/")[-1], "-",
                    img2.split("/")[-1], classified_label,
                    "as same person based on", model, "and", metric,
                    ". Distance:", distance, ", Threshold:", threshold, "(",
                    test_result_label, ")")

            print("--------------------------")

    # -----------------------------------------

    print("Passing numpy array to analyze function")

    img = cv2.imread("dataset/img1.jpg")
    resp_obj = DeepFace.analyze(img)
    print(resp_obj)

    evaluate(resp_obj["age"] > 20 and resp_obj["age"] < 40)
    evaluate(resp_obj["gender"] == "Woman")

    print("--------------------------")

    print("Passing numpy array to verify function")

    img1 = cv2.imread("dataset/img1.jpg")
    img2 = cv2.imread("dataset/img2.jpg")

    res = DeepFace.verify(img1, img2)
    print(res)

    evaluate(res["verified"] == True)

    print("--------------------------")

    print("Passing numpy array to find function")

    img1 = cv2.imread("dataset/img1.jpg")

    df = DeepFace.find(img1, db_path="dataset")

    print(df.head())

    evaluate(df.shape[0] > 0)

    print("--------------------------")

    print("non-binary gender tests")

    #interface validation - no need to call evaluate here

    for img1_path, img2_path, verified in dataset:
        for detector in detectors:
            result = DeepFace.analyze(img1_path,
                                      actions=('gender', ),
                                      detector_backend=detector,
                                      enforce_detection=False)

            print(result)

            assert 'gender' in result.keys()
            assert 'dominant_gender' in result.keys(
            ) and result["dominant_gender"] in ["Man", "Woman"]

            if result["dominant_gender"] == "Man":
                assert result["gender"]["Man"] > result["gender"]["Woman"]
            else:
                assert result["gender"]["Man"] < result["gender"]["Woman"]
Exemplo n.º 13
0
import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#-----------------------------------------

print("Bulk tests")

print("-----------------------------------------")

print("Bulk face recognition tests")

dataset = [['dataset/img1.jpg', 'dataset/img2.jpg', True],
           ['dataset/img5.jpg', 'dataset/img6.jpg', True]]

resp_obj = DeepFace.verify(dataset)
print(resp_obj["pair_1"]["verified"] == True)
print(resp_obj["pair_2"]["verified"] == True)

print("-----------------------------------------")

print("Bulk facial analysis tests")

dataset = [
    'dataset/img1.jpg', 'dataset/img2.jpg', 'dataset/img5.jpg',
    'dataset/img6.jpg'
]

resp_obj = DeepFace.analyze(dataset)
print(resp_obj["instance_1"]["age"], " years old ",
      resp_obj["instance_1"]["dominant_emotion"], " ",
Exemplo n.º 14
0
from deepface import DeepFace

result = DeepFace.verify("face_images/Gates/1.jpg", "face_images/Gates/2.jpg")
print(result)
Exemplo n.º 15
0
        # resize pixels to the model size
        face_image = Image.fromarray(face_boundary)
        face_image = face_image.resize(required_size)
        face_array = asarray(face_image)
        face_images.append(face_array)

    return face_images


extracted_face = extract_face_from_image('kaushiki2.jpg')

#Display the face from the extracted faces

plt.imshow(extracted_face[0])
plt.show()

#Compare the single/Multiple faces in two images

from deepface import DeepFace
import cv2
from matplotlib import pyplot as plt
img1 = cv2.imread("women.jpg")
plt.imshow(img1[:, :, ::-1])
plt.show()

img2 = cv2.imread("5 women.jpg")
plt.imshow(img2[:, :, ::-1])
plt.show()
result = DeepFace.verify(img1, img2)
print("Is same face", result["verified"])
Exemplo n.º 16
0
from deepface import DeepFace

nCustomers = 0
nMale = 0
nFemale = 0
nHappy = 0
nSad = 0

#taking image input
image_path = [
    'image1.jpeg', 'image2.jpeg', 'image3.jpeg', 'image4.jpeg', 'image5.jpeg',
    'image6.jpeg'
]

for imgPath in image_path:
    data = DeepFace.analyze(imgPath)
    if (data["gender"] == 'Man'):
        nMale += 1
    elif (data["gender"] == 'Woman'):
        nFemale += 1

    if (data["dominant_emotion"] == 'happy'):
        nHappy += 1
    elif (data["dominant_emotion"] == 'sad'):
        nSad += 1
    nCustomers += 1

print("\nNumber of customers visited:", nCustomers)
print("Number of males customer:", nMale)
print("Number of females customer:", nFemale)
print("Number of happy customer:", nHappy)
Exemplo n.º 17
0
from deepface import DeepFace
import os
import cv2

img1_path = 'image/ping_1.png'
img2_path = 'image/Oak_3.png'

img_1 = cv2.imread(img1_path)
img_2 = cv2.imread(img2_path)

result = DeepFace.verify(img1_path, img2_path, model_name = 'ArcFace')
print(result)
                   auto_size_text=True,
                   font='helvetica',
                   element_justification='center',
                   icon=icon_logo64).Layout(layout)

# Event Loop
while True:
    event, values = window.Read()
    if event in (None, 'Exit'):
        break

    if event == 'Submit':
        # Update the "output" text element to be the value of "input" element
        window['-OUTPUT-'].update(values['-INPUT-'])

    elif event == 'Start Human Emotion Detection':
        DeepFace.stream("DATABASE")
        text_input = values['-INPUT-']

    elif event == "Upload Subject's Image":
        sg.popup_get_file('Select Image:',
                          "Upload Subject's Image",
                          icon=icon_logo64)

    elif event == 'Onboard Subject':
        text_input = values['-INPUT-']
        subject_onboarding(text_input)

# Close Window
window.Close()
Exemplo n.º 19
0
from deepface import DeepFace

obj = DeepFace.analyze(r"static\uploads\sai_3.jpg")

print(type(obj))
#objs = DeepFace.analyze(["img1.jpg", "img2.jpg", "img3.jpg"]) #analyzing multiple faces same time
print(int(obj["age"]), " years old ", obj["dominant_race"], " ",
      obj["dominant_emotion"], " ", obj["gender"])
Exemplo n.º 20
0
def extract_align_face(img):  # it return align face
    result = DeepFace.detectFace(img)
    # print(result)
    return result
Exemplo n.º 21
0
def verifyWrapper(req, trx_id = 0):
	
	resp_obj = jsonify({'success': False})
	
	model_name = "VGG-Face"; distance_metric = "cosine"
	if "model_name" in list(req.keys()):
		model_name = req["model_name"]
	if "distance_metric" in list(req.keys()):
		distance_metric = req["distance_metric"]
	
	#----------------------
	
	instances = []
	if "img" in list(req.keys()):
		raw_content = req["img"] #list

		for item in raw_content: #item is in type of dict
			instance = []
			img1 = item["img1"]; img2 = item["img2"]

			validate_img1 = False
			if len(img1) > 11 and img1[0:11] == "data:image/":
				validate_img1 = True
			
			validate_img2 = False
			if len(img2) > 11 and img2[0:11] == "data:image/":
				validate_img2 = True

			if validate_img1 != True or validate_img2 != True:
				return jsonify({'success': False, 'error': 'you must pass both img1 and img2 as base64 encoded string'}), 205

			instance.append(img1); instance.append(img2)
			instances.append(instance)
		
	#--------------------------

	if len(instances) == 0:
		return jsonify({'success': False, 'error': 'you must pass at least one img object in your request'}), 205
	
	print("Input request of ", trx_id, " has ",len(instances)," pairs to verify")
	
	#--------------------------
	
	if model_name == "VGG-Face":
		resp_obj = DeepFace.verify(instances, model_name = model_name, distance_metric = distance_metric, model = vggface_model)
	elif model_name == "Facenet":
		resp_obj = DeepFace.verify(instances, model_name = model_name, distance_metric = distance_metric, model = facenet_model)
	elif model_name == "OpenFace":
		resp_obj = DeepFace.verify(instances, model_name = model_name, distance_metric = distance_metric, model = openface_model)
	elif model_name == "DeepFace":
		resp_obj = DeepFace.verify(instances, model_name = model_name, distance_metric = distance_metric, model = deepface_model)
	elif model_name == "x":
		resp_obj = DeepFace.verify(instances, model_name = modxel_name, distance_metric = distance_metric, model = deepid_model)
	elif model_name == "ArcFace":
		resp_obj = DeepFace.verify(instances, model_name = model_name, distance_metric = distance_metric, model = arcface_model)
	elif model_name == "Ensemble":
		models =  {}
		models["VGG-Face"] = vggface_model
		models["Facenet"] = facenet_model
		models["OpenFace"] = openface_model
		models["DeepFace"] = deepface_model
		resp_obj = DeepFace.verify(instances, model_name = model_name, model = models)
	else:
		resp_obj = jsonify({'success': False, 'error': 'You must pass a valid model name. You passed %s' % (model_name)}), 205
	
	return resp_obj
Exemplo n.º 22
0
 def get_face_dict(img_path: str) -> dict:
     try:
         return DeepFace.analyze(img_path)
     except Exception as e:
         msg = LogTool.pp_exception(e)
         print(msg)
Exemplo n.º 23
0
from deepface import DeepFace

demography = DeepFace.analyze("face_images/Gates/1.jpg")

print("Age: ", demography["age"])
print("Gender: ", demography["gender"])
print("Emotion: ", demography["dominant_emotion"])
print("Race: ", demography["dominant_race"])
# print(demography)
Exemplo n.º 24
0
def detect_deepface_cropped(img, person_boxes, old_ballots, original_boxes):
    """
    takes in a list of person bounding boxes and image
    crops the image to the bounding boxes and detects from database
    returns drawn img, person bounding boxes and id-ed name: [(x1, y1), (x2, y2), "ZuoLin"]
    """
    bboxs = []
    person_face_detected = []
    output_img = img.copy()
    for i in range(len(person_boxes)):
        p_bbox = person_boxes[i]
        o_bbox = original_boxes[i]
        (xmin, ymin), (xmax, ymax) = p_bbox
        cropped_img = img[ymin:ymax, xmin:xmax]
        detections = detect_face2(img=cropped_img,
                                  detector_backend=detector_backend,
                                  grayscale=grayscale,
                                  enforce_detection=enforce_detection)
        (oxmin, oymin), (oxmax, oymax) = p_bbox
        o_cropped_img = img[oymin:oymax, oxmin:oxmax]

        face_detected = None
        if len(detections) == 1:  # Only retain images that has 1 detected face

            x, y, w, h = detections[0]["box"]
            y_buffer = int(h * 0.5)
            x_buffer = int(w * 0.5)
            detected_face = cropped_img[
                max(0, int(y - y_buffer)):min(cropped_img.
                                              shape[0], int(y + h + y_buffer)),
                max(0, int(x - x_buffer)):min(cropped_img.
                                              shape[1], int(x + w + x_buffer))]

            try:
                ## calls reid_processor to confirm identity
                # best_body_guess, body_confidence = reid.detect_body_cropped(cropped_img)

                detected_face = detected_face[:, :, ::-1]
                new_im = Image.fromarray(detected_face)
                tmp_image = str(int(time.time())) + ".jpg"
                new_im.save(tmp_image)
                df = DeepFace.find(img_path=tmp_image,
                                   db_path=db_folder,
                                   enforce_detection=False)
                os.remove(tmp_image)
                df.sort_values('VGG-Face_cosine', inplace=True, ascending=True)
                face_detected = re.split(r' |/|\\', df['identity'].iloc[0])[1]
                dist = float(df['VGG-Face_cosine'].iloc[0])

                ## need code when face id is not confident, use reid to confirm/dispute
                if dist > 0.035:
                    face_detected = None
                    pass
                else:
                    print('p detected by face:', face_detected)

            except Exception as err:
                print("ERROR:", err)

        if face_detected == None:
            best_identity, best_confidence = reid.detect_body_cropped(
                o_cropped_img[:, :, ::-1])
            face_detected = best_identity
            print('p detected by body:', face_detected)
            bboxs.append(
                [None, None, None, None, best_confidence, face_detected])
        else:
            bboxs.append([
                xmin + x, ymin + y, w, h, detections[0]['confidence'],
                face_detected
            ])
        person_face_detected.append([(xmin, ymin), (xmax, ymax),
                                     face_detected])

    old_ballots = assign_new_to_old(person_face_detected, old_ballots, bboxs,
                                    original_boxes)
    for face_detected in old_ballots:
        print(f"person detected: {face_detected.name}",
              f"confidence: {face_detected.best_count}")
        if face_detected.info[0] is not None:
            output_img = drawBBox(output_img, face_detected.info, col_dict,
                                  "yellow")
        cc = face_detected.o_coords
        output_img = drawBBox2(output_img, cc[0][0], cc[1][0], cc[0][1],
                               cc[1][1], face_detected.name, col_dict,
                               "yellow")

    person_detected = [[x.coords[0], x.coords[1], x.name] for x in old_ballots
                       if x.name]
    return output_img, person_detected, old_ballots
Exemplo n.º 25
0
def analysis(db_path,
             model_name,
             distance_metric,
             enable_face_analysis=True,
             source=0,
             time_threshold=1,
             frame_threshold=30):

    input_shape = (224, 224)
    input_shape_x = input_shape[0]
    input_shape_y = input_shape[1]

    text_color = (255, 255, 255)

    # employees = []
    # #check passed db folder exists
    # if os.path.isdir(db_path) == True:
    # 	for r, d, f in os.walk(db_path): # r=root, d=directories, f = files
    # 		for file in f:
    # 			if ('.jpg' in file):
    # 				#exact_path = os.path.join(r, file)
    # 				exact_path = r + "/" + file
    # 				#print(exact_path)
    # 				employees.append(exact_path)
    #
    # if len(employees) == 0:
    # 	print("WARNING: There is no image in this path ( ", db_path,") . Face recognition will not be performed.")
    #
    # #------------------------
    #
    # if len(employees) > 0:
    #
    # 	model = DeepFace.build_model(model_name)
    # 	print(model_name," is built")
    #
    # 	#------------------------
    #
    # 	input_shape = functions.find_input_shape(model)
    # 	input_shape_x = input_shape[0]
    # 	input_shape_y = input_shape[1]
    #
    # 	#tuned thresholds for model and metric pair
    # 	threshold = dst.findThreshold(model_name, distance_metric)
    #
    # #------------------------
    # #facial attribute analysis models
    df8 = pd.DataFrame(columns=['label', 'score'])

    if enable_face_analysis == True:

        tic = time.time()

        emotion_model = DeepFace.build_model('Emotion')
        print("Emotion model loaded")

        # age_model = DeepFace.build_model('Age')
        # print("Age model loaded")
        #
        # gender_model = DeepFace.build_model('Gender')
        # print("Gender model loaded")

        toc = time.time()

        print("Facial attibute analysis models loaded in ", toc - tic,
              " seconds")

    #------------------------

    #find embeddings for employee list

    #tic = time.time()

    # pbar = tqdm(range(0, len(employees)), desc='Finding embeddings')

    embeddings = []
    # #for employee in employees:
    # for index in pbar:
    # 	employee = employees[index]
    # 	pbar.set_description("Finding embedding for %s" % (employee.split("/")[-1]))
    # 	embedding = []
    # 	img = functions.preprocess_face(img = employee, target_size = (input_shape_y, input_shape_x), enforce_detection = False)
    # 	img_representation = model.predict(img)[0,:]
    #
    # 	embedding.append(employee)
    # 	embedding.append(img_representation)
    # 	embeddings.append(embedding)
    #
    df = pd.DataFrame(embeddings, columns=['employee', 'embedding'])
    df['distance_metric'] = distance_metric
    #
    # toc = time.time()
    #
    # print("Embeddings found for given data set in ", toc-tic," seconds")
    #
    # #-----------------------

    pivot_img_size = 112  #face recognition result image

    #-----------------------

    opencv_path = functions.get_opencv_path()
    face_detector_path = opencv_path + "haarcascade_frontalface_default.xml"
    face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades +
                                         'haarcascade_frontalface_default.xml')

    #-----------------------

    freeze = False
    face_detected = False
    face_included_frames = 0  #freeze screen if face detected sequantially 5 frames
    freezed_frame = 0
    tic = time.time()

    cap = cv2.VideoCapture(source)  #webcam

    while (True):
        ret, img = cap.read()

        #cv2.namedWindow('img', cv2.WINDOW_FREERATIO)
        #cv2.setWindowProperty('img', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)

        raw_img = img.copy()
        resolution = img.shape

        resolution_x = img.shape[1]
        resolution_y = img.shape[0]

        if freeze == False:
            faces = face_cascade.detectMultiScale(img, 1.3, 5)

            if len(faces) == 0:
                face_included_frames = 0
        else:
            faces = []

        detected_faces = []
        face_index = 0
        for (x, y, w, h) in faces:
            if w > 60:  #discard small detected faces

                face_detected = True
                if face_index == 0:
                    face_included_frames = face_included_frames + 1  #increase frame for a single face

                cv2.rectangle(img, (x, y), (x + w, y + h), (67, 67, 67),
                              1)  #draw rectangle to main image

                #cv2.putText(img, str(frame_threshold - face_included_frames), (int(x+w/4),int(y+h/1.5)), cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 255, 255), 2)

                detected_face = img[int(y):int(y + h),
                                    int(x):int(x + w)]  #crop detected face

                #-------------------------------------

                detected_faces.append((x, y, w, h))
                face_index = face_index + 1

                #-------------------------------------

        if face_detected == True and face_included_frames == frame_threshold and freeze == False:
            freeze = True
            #base_img = img.copy()
            base_img = raw_img.copy()
            detected_faces_final = detected_faces.copy()
            tic = time.time()

        if freeze == True:

            toc = time.time()
            if (toc - tic) < time_threshold:

                if freezed_frame == 0:
                    freeze_img = base_img.copy()
                    #freeze_img = np.zeros(resolution, np.uint8) #here, np.uint8 handles showing white area issue

                    for detected_face in detected_faces_final:
                        x = detected_face[0]
                        y = detected_face[1]
                        w = detected_face[2]
                        h = detected_face[3]

                        cv2.rectangle(freeze_img, (x, y), (x + w, y + h),
                                      (67, 67, 67),
                                      1)  #draw rectangle to main image

                        #-------------------------------

                        #apply deep learning for custom_face

                        custom_face = base_img[y:y + h, x:x + w]

                        #-------------------------------
                        #facial attribute analysis

                        if enable_face_analysis == True:

                            gray_img = functions.preprocess_face(
                                img=custom_face,
                                target_size=(48, 48),
                                grayscale=True,
                                enforce_detection=False)
                            emotion_labels = [
                                'Angry', 'Disgust', 'Fear', 'Happy', 'Sad',
                                'Surprise', 'Neutral'
                            ]
                            emotion_predictions = emotion_model.predict(
                                gray_img)[0, :]
                            sum_of_predictions = emotion_predictions.sum()

                            mood_items = []
                            for i in range(0, len(emotion_labels)):
                                mood_item = []
                                emotion_label = emotion_labels[i]
                                emotion_prediction = 100 * emotion_predictions[
                                    i] / sum_of_predictions
                                mood_item.append(emotion_label)
                                mood_item.append(emotion_prediction)
                                mood_items.append(mood_item)

                            emotion_df = pd.DataFrame(
                                mood_items, columns=["emotion", "score"])
                            emotion_df = emotion_df.sort_values(
                                by=["score"],
                                ascending=False).reset_index(drop=True)

                            #background of mood box

                            #transparency
                            overlay = freeze_img.copy()
                            opacity = 0.4

                            if x + w + pivot_img_size < resolution_x:
                                #right
                                cv2.rectangle(
                                    freeze_img
                                    #, (x+w,y+20)
                                    ,
                                    (x + w, y),
                                    (x + w + pivot_img_size, y + h),
                                    (64, 64, 64),
                                    cv2.FILLED)

                                cv2.addWeighted(overlay, opacity, freeze_img,
                                                1 - opacity, 0, freeze_img)

                            elif x - pivot_img_size > 0:
                                #left
                                cv2.rectangle(
                                    freeze_img
                                    #, (x-pivot_img_size,y+20)
                                    ,
                                    (x - pivot_img_size, y),
                                    (x, y + h),
                                    (64, 64, 64),
                                    cv2.FILLED)

                                cv2.addWeighted(overlay, opacity, freeze_img,
                                                1 - opacity, 0, freeze_img)

                            for index, instance in emotion_df.iterrows():
                                emotion_label = "%s " % (instance['emotion'])
                                emotion_score = instance['score'] / 100

                                bar_x = 35  #this is the size if an emotion is 100%

                                bar_x = int(bar_x * emotion_score)
                                labelx = {
                                    'label': emotion_label,
                                    'score': emotion_score
                                }

                                df8 = df8.append(labelx, ignore_index=True)

                                ser = pd.Series(data=labelx,
                                                index=['label', 'score'])

                                if x + w + pivot_img_size < resolution_x:

                                    text_location_y = y + 20 + (index + 1) * 20
                                    text_location_x = x + w

                                    if text_location_y < y + h:
                                        cv2.putText(
                                            freeze_img, emotion_label,
                                            (text_location_x, text_location_y),
                                            cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                            (255, 255, 255), 1)
                                        cv2.rectangle(
                                            freeze_img, (x + w + 70, y + 13 +
                                                         (index + 1) * 20),
                                            (x + w + 70 + bar_x, y + 13 +
                                             (index + 1) * 20 + 5),
                                            (255, 255, 255), cv2.FILLED)

                                elif x - pivot_img_size > 0:

                                    text_location_y = y + 20 + (index + 1) * 20
                                    text_location_x = x - pivot_img_size

                                    if text_location_y <= y + h:
                                        cv2.putText(
                                            freeze_img, emotion_label,
                                            (text_location_x, text_location_y),
                                            cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                            (255, 255, 255), 1)

                                        cv2.rectangle(
                                            freeze_img,
                                            (x - pivot_img_size + 70, y + 13 +
                                             (index + 1) * 20),
                                            (x - pivot_img_size + 70 + bar_x,
                                             y + 13 + (index + 1) * 20 + 5),
                                            (255, 255, 255), cv2.FILLED)
                                        if emotion_label != 'Happy':
                                            cv2.cv2.putText(
                                                freeze_img, "Smile", (40, 70),
                                                cv2.FONT_HERSHEY_SIMPLEX, 3,
                                                (255, 255, 255), 1)

                            #-------------------------------

                            # face_224 = functions.preprocess_face(img = custom_face, target_size = (224, 224), grayscale = False, enforce_detection = False)
                            #
                            # age_predictions = awebcamge_model.predict(face_224)[0,:]
                            # apparent_age = Age.findApparentAge(age_predictions)

                            #-------------------------------

                            # gender_prediction = gender_model.predict(face_224)[0,:]
                            #
                            # if np.argmax(gender_prediction) == 0:
                            # 	gender = "W"
                            # elif np.argmax(gender_prediction) == 1:
                            # 	gender = "M"
                            #
                            # #print(str(int(apparent_age))," years old ", dominant_emotion, " ", gender)
                            #
                            # analysis_report = str(int(apparent_age))+" "+gender

                            #-------------------------------

                            info_box_color = (46, 200, 255)

                            #top
                            # if y - pivot_img_size + int(pivot_img_size/5) > 0:
                            #
                            # 	triangle_coordinates = np.array( [
                            # 		(x+int(w/2), y)
                            # 		, (x+int(w/2)-int(w/10), y-int(pivot_img_size/3))
                            # 		, (x+int(w/2)+int(w/10), y-int(pivot_img_size/3))
                            # 	] )
                            #
                            # 	cv2.drawContours(freeze_img, [triangle_coordinates], 0, info_box_color, -1)
                            #
                            # 	cv2.rectangle(freeze_img, (x+int(w/5), y-pivot_img_size+int(pivot_img_size/5)), (x+w-int(w/5), y-int(pivot_img_size/3)), info_box_color, cv2.FILLED)
                            #
                            # 	cv2.putText(freeze_img, analysis_report, (x+int(w/3.5), y - int(pivot_img_size/2.1)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 111, 255), 2)

                            #bottom
                            # elif y + h + pivot_img_size - int(pivot_img_size/5) < resolution_y:
                            #
                            # 	triangle_coordinates = np.array( [
                            # 		(x+int(w/2), y+h)
                            # 		, (x+int(w/2)-int(w/10), y+h+int(pivot_img_size/3))
                            # 		, (x+int(w/2)+int(w/10), y+h+int(pivot_img_size/3))
                            # 	] )
                            #
                            # 	cv2.drawContours(freeze_img, [triangle_coordinates], 0, info_box_color, -1)
                            #
                            # 	cv2.rectangle(freeze_img, (x+int(w/5), y + h + int(pivot_img_size/3)), (x+w-int(w/5), y+h+pivot_img_size-int(pivot_img_size/5)), info_box_color, cv2.FILLED)
                            #
                            # 	cv2.putText(freeze_img, analysis_report, (x+int(w/3.5), y + h + int(pivot_img_size/1.5)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 111, 255), 2)

                        #-------------------------------
                        #face recognition

                        custom_face = functions.preprocess_face(
                            img=custom_face,
                            target_size=(input_shape_y, input_shape_x),
                            enforce_detection=False)

                        #check preprocess_face function handled
                        if custom_face.shape[1:3] == input_shape:
                            if df.shape[
                                    0] > 0:  #if there are images to verify, apply face recognition
                                img1_representation = model.predict(
                                    custom_face)[0, :]

                                #print(freezed_frame," - ",img1_representation[0:5])

                                def findDistance(row):
                                    distance_metric = row['distance_metric']
                                    img2_representation = row['embedding']

                                    distance = 1000  #initialize very large value
                                    if distance_metric == 'cosine':
                                        distance = dst.findCosineDistance(
                                            img1_representation,
                                            img2_representation)
                                    elif distance_metric == 'euclidean':
                                        distance = dst.findEuclideanDistance(
                                            img1_representation,
                                            img2_representation)
                                    elif distance_metric == 'euclidean_l2':
                                        distance = dst.findEuclideanDistance(
                                            dst.l2_normalize(
                                                img1_representation),
                                            dst.l2_normalize(
                                                img2_representation))

                                    return distance

                                df['distance'] = df.apply(findDistance, axis=1)
                                df = df.sort_values(by=["distance"])

                                candidate = df.iloc[0]
                                employee_name = candidate['employee']
                                best_distance = candidate['distance']

                                #print(candidate[['employee', 'distance']].values)

                                #if True:
                                if best_distance <= threshold:
                                    #print(employee_name)
                                    display_img = cv2.imread(employee_name)

                                    display_img = cv2.resize(
                                        display_img,
                                        (pivot_img_size, pivot_img_size))

                                    label = employee_name.split(
                                        "/")[-1].replace(".jpg", "")
                                    label = re.sub('[0-9]', '', label)

                                    try:
                                        if y - pivot_img_size > 0 and x + w + pivot_img_size < resolution_x:
                                            #top right
                                            freeze_img[
                                                y - pivot_img_size:y,
                                                x + w:x + w +
                                                pivot_img_size] = display_img

                                            overlay = freeze_img.copy()
                                            opacity = 0.4
                                            cv2.rectangle(
                                                freeze_img, (x + w, y),
                                                (x + w + pivot_img_size,
                                                 y + 20), (46, 200, 255),
                                                cv2.FILLED)
                                            cv2.addWeighted(
                                                overlay, opacity, freeze_img,
                                                1 - opacity, 0, freeze_img)

                                            cv2.putText(
                                                freeze_img, label,
                                                (x + w, y + 10),
                                                cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                                text_color, 1)

                                            #connect face and text
                                            cv2.line(freeze_img,
                                                     (x + int(w / 2), y),
                                                     (x + 3 * int(w / 4), y -
                                                      int(pivot_img_size / 2)),
                                                     (67, 67, 67), 1)
                                            cv2.line(freeze_img,
                                                     (x + 3 * int(w / 4), y -
                                                      int(pivot_img_size / 2)),
                                                     (x + w, y -
                                                      int(pivot_img_size / 2)),
                                                     (67, 67, 67), 1)

                                        elif y + h + pivot_img_size < resolution_y and x - pivot_img_size > 0:
                                            #bottom left
                                            freeze_img[
                                                y + h:y + h + pivot_img_size,
                                                x -
                                                pivot_img_size:x] = display_img

                                            overlay = freeze_img.copy()
                                            opacity = 0.4
                                            cv2.rectangle(
                                                freeze_img,
                                                (x - pivot_img_size,
                                                 y + h - 20), (x, y + h),
                                                (46, 200, 255), cv2.FILLED)
                                            cv2.addWeighted(
                                                overlay, opacity, freeze_img,
                                                1 - opacity, 0, freeze_img)

                                            cv2.putText(
                                                freeze_img, label,
                                                (x - pivot_img_size,
                                                 y + h - 10),
                                                cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                                text_color, 1)

                                            #connect face and text
                                            cv2.line(freeze_img,
                                                     (x + int(w / 2), y + h),
                                                     (x + int(w / 2) -
                                                      int(w / 4), y + h +
                                                      int(pivot_img_size / 2)),
                                                     (67, 67, 67), 1)
                                            cv2.line(freeze_img,
                                                     (x + int(w / 2) -
                                                      int(w / 4), y + h +
                                                      int(pivot_img_size / 2)),
                                                     (x, y + h +
                                                      int(pivot_img_size / 2)),
                                                     (67, 67, 67), 1)

                                        elif y - pivot_img_size > 0 and x - pivot_img_size > 0:
                                            #top left
                                            freeze_img[
                                                y - pivot_img_size:y, x -
                                                pivot_img_size:x] = display_img

                                            overlay = freeze_img.copy()
                                            opacity = 0.4
                                            cv2.rectangle(
                                                freeze_img,
                                                (x - pivot_img_size, y),
                                                (x, y + 20), (46, 200, 255),
                                                cv2.FILLED)
                                            cv2.addWeighted(
                                                overlay, opacity, freeze_img,
                                                1 - opacity, 0, freeze_img)

                                            cv2.putText(
                                                freeze_img, label,
                                                (x - pivot_img_size, y + 10),
                                                cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                                text_color, 1)

                                            #connect face and text
                                            cv2.line(
                                                freeze_img,
                                                (x + int(w / 2), y),
                                                (x + int(w / 2) - int(w / 4),
                                                 y - int(pivot_img_size / 2)),
                                                (67, 67, 67), 1)
                                            cv2.line(
                                                freeze_img,
                                                (x + int(w / 2) - int(w / 4),
                                                 y - int(pivot_img_size / 2)),
                                                (x,
                                                 y - int(pivot_img_size / 2)),
                                                (67, 67, 67), 1)

                                        elif x + w + pivot_img_size < resolution_x and y + h + pivot_img_size < resolution_y:
                                            #bottom righ
                                            freeze_img[
                                                y + h:y + h + pivot_img_size,
                                                x + w:x + w +
                                                pivot_img_size] = display_img

                                            overlay = freeze_img.copy()
                                            opacity = 0.4
                                            cv2.rectangle(
                                                freeze_img,
                                                (x + w, y + h - 20),
                                                (x + w + pivot_img_size,
                                                 y + h), (46, 200, 255),
                                                cv2.FILLED)
                                            cv2.addWeighted(
                                                overlay, opacity, freeze_img,
                                                1 - opacity, 0, freeze_img)

                                            cv2.putText(
                                                freeze_img, label,
                                                (x + w, y + h - 10),
                                                cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                                text_color, 1)

                                            #connect face and text
                                            cv2.line(freeze_img,
                                                     (x + int(w / 2), y + h),
                                                     (x + int(w / 2) +
                                                      int(w / 4), y + h +
                                                      int(pivot_img_size / 2)),
                                                     (67, 67, 67), 1)
                                            cv2.line(freeze_img,
                                                     (x + int(w / 2) +
                                                      int(w / 4), y + h +
                                                      int(pivot_img_size / 2)),
                                                     (x + w, y + h +
                                                      int(pivot_img_size / 2)),
                                                     (67, 67, 67), 1)
                                    except Exception as err:
                                        print(str(err))

                        tic = time.time(
                        )  #in this way, freezed image can show 5 seconds

                        #-------------------------------

                time_left = int(time_threshold - (toc - tic) + 1)

                cv2.rectangle(freeze_img, (10, 10), (90, 50), (67, 67, 67),
                              -10)
                cv2.putText(freeze_img, str(time_left), (40, 40),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1)
                df9 = df8.tail(7)

                cv2.imshow('Title Of Th Project', freeze_img)

                df9.to_csv('out.csv')
                df8.to_csv('history.csv')
                df_final = pd.read_csv('out.csv')

                freezed_frame = freezed_frame + 1
            else:
                face_detected = False
                face_included_frames = 0
                freeze = False
                freezed_frame = 0

        else:
            cv2.imshow('Title Of Th Project', img)

        if cv2.waitKey(1) & 0xFF == ord('q'):  #press q to quit
            break

    #kill open cv things
    cap.release()
    cv2.destroyAllWindows()
Exemplo n.º 26
0
def verify():

    global graph

    tic = time.time()
    req = request.get_json()
    trx_id = uuid.uuid4()

    resp_obj = jsonify({'success': False})

    with graph.as_default():

        model_name = "VGG-Face"
        distance_metric = "cosine"
        if "model_name" in list(req.keys()):
            model_name = req["model_name"]
        if "distance_metric" in list(req.keys()):
            distance_metric = req["distance_metric"]

        #----------------------

        instances = []
        if "img" in list(req.keys()):
            raw_content = req["img"]  #list

            for item in raw_content:  #item is in type of dict
                instance = []
                img1 = item["img1"]
                img2 = item["img2"]

                validate_img1 = False
                if len(img1) > 11 and img1[0:11] == "data:image/":
                    validate_img1 = True

                validate_img2 = False
                if len(img2) > 11 and img2[0:11] == "data:image/":
                    validate_img2 = True

                if validate_img1 != True or validate_img2 != True:
                    return jsonify({
                        'success':
                        False,
                        'error':
                        'you must pass both img1 and img2 as base64 encoded string'
                    }), 205

                instance.append(img1)
                instance.append(img2)
                instances.append(instance)

        #--------------------------

        if len(instances) == 0:
            return jsonify({
                'success':
                False,
                'error':
                'you must pass at least one img object in your request'
            }), 205

        print("Input request of ", trx_id, " has ", len(instances),
              " pairs to verify")

        #--------------------------

        if model_name == "VGG-Face":
            resp_obj = DeepFace.verify(instances,
                                       model_name=model_name,
                                       distance_metric=distance_metric,
                                       model=vggface_model)
        elif model_name == "Facenet":
            resp_obj = DeepFace.verify(instances,
                                       model_name=model_name,
                                       distance_metric=distance_metric,
                                       model=facenet_model)
        elif model_name == "OpenFace":
            resp_obj = DeepFace.verify(instances,
                                       model_name=model_name,
                                       distance_metric=distance_metric,
                                       model=openface_model)
        elif model_name == "DeepFace":
            resp_obj = DeepFace.verify(instances,
                                       model_name=model_name,
                                       distance_metric=distance_metric,
                                       model=deepface_model)
        elif model_name == "Ensemble":
            models = {}
            models["VGG-Face"] = vggface_model
            models["Facenet"] = facenet_model
            models["OpenFace"] = openface_model
            models["DeepFace"] = deepface_model

            resp_obj = DeepFace.verify(instances,
                                       model_name=model_name,
                                       model=models)

        else:
            return jsonify({
                'success':
                False,
                'error':
                'You must pass a valid model name. Available models are VGG-Face, Facenet, OpenFace, DeepFace but you passed %s'
                % (model_name)
            }), 205

    #--------------------------

    toc = time.time()

    resp_obj["trx_id"] = trx_id
    resp_obj["seconds"] = toc - tic

    return resp_obj, 200
Exemplo n.º 27
0
if __name__ == "__main__":
    dspth = 'res/test-img/FinalData'

    pre_models = {}
    pre_models['emotion'] = loadModel_emotion()
    pre_models['age'] = loadModel_age()
    pre_models['gender'] = loadModel_gender()
    pre_models['race'] = loadModel_race()
    if not writeFile:
        image_name = '101.jpeg'
        src = cv2.imread(osp.join(dspth, image_name))
        all_contour(src)

        demography = DeepFace.analyze(
            osp.join(dspth, image_name),
            actions=['age', 'gender', 'race', 'emotion'],
            models=pre_models)
        race_judge = 'asian'
        for key, value in demography['race'].items():
            if value == max(demography['race'].values()):
                race_judge = key
        if race_judge == 'asian':
            feature['race'] = 1
        elif race_judge == 'black':
            feature['race'] = 0
        elif race_judge == 'white':
            feature['race'] = 2
        else:
            feature['race'] = 1

        feature['eye'] = 1 if description['glasses'] == 1 else 0
Exemplo n.º 28
0
#pip install deepface
from deepface import DeepFace
DeepFace.stream('dataset')
#             pred = []
#     pair = all_pairs[i]
#     print(len(pair))
#     """result = DeepFace.verify([pair], enforce_detection = False)
#     pred.append(result["pair_1"]["verified"])"""
#     try:
#         result = DeepFace.verify([pair])
#         pred.append(result["pair_1"]["verified"])

#     except ValueError:
#         pred.append(2)

### first line runs deepface with default vgg model and second line runs it with Facenet model models will be automaticly downloaded
# result = DeepFace.verify(all_pairs, enforce_detection= False)
result = DeepFace.verify(all_pairs,
                         enforce_detection=False,
                         model_name="Facenet")

#print(result)

### write results to txt file
with open("facenet_model_output.txt", "w") as file:
    for i in range(0, 4000):
        # for i in range(0,len(all_pairs[0:10])):
        file.write(str(result["pair_" + str(i + 1)]["verified"]) + "\n")
    # i = 1
    # for res in result:
    #     print(res)
    #     file.write(str(res["pair_" + str(i)]["verified"]) + "\n")
    #     i += 1
""""try:
Exemplo n.º 30
0
def get_emotion():
    DATAFRAMESOUT = os.getenv("DATA_FRAMES_OUT")
    PATHIN = os.getenv("PATH_IN")
    PATHOUT = os.getenv("PATH_OUT")
    img_array = []
    dfs = []
    video_paths = []
    video_names = []
    person_folder_names = []

    # added the video counter here so it does not depend on the previous function
    video_counter = 0  # how many videos there are
    for i in os.listdir(PATHIN):
        person_folder = PATHIN + i
        for vid_folder in os.listdir(person_folder):
            vid_folder = person_folder + "/" + vid_folder
            for video in os.listdir(vid_folder):
                video_names.append(
                    video)  # saving just the name of the video into an array
                video = vid_folder + "/" + video
                video_paths.append(
                    video
                )  # saving the whole path of the video into another array
                person_folder_names.append(
                    i)  # add an instance of 'folder name' for each video
                video_counter += 1
    for i in range(0, video_counter, 1):
        for filename in glob.glob(PATHOUT + 'video%d' % i + 'frame*.jpg'):
            # Read in the relevant images
            img = cv2.imread(filename)
            height, width, layers = img.shape
            size = (width, height)
            img_array.append(img)
        # Pass them through deepface
        face_FER = DeepFace.analyze(img_path=img_array,
                                    actions=['emotion'],
                                    enforce_detection=False)
        img_array = []  # reset image array to be blank
        data = face_FER
        # Turning arrays into pandas dataframes and labelling emotions
        emotions = set()
        # First we need to find out all unique emotions
        for key, value in data.items():
            for emotion in value['emotion'].keys():
                emotions.add(emotion)
        rows = []
        columns = ['instance'] + list(emotions)
        for key, value in data.items():
            rows.append([0] *
                        len(columns))  # Start creating a new row with zeros
            key = key.split('_')[1]  # Get the 1,2,3 out of the instance
            rows[-1][0] = key
            for emotion, emotion_value in value['emotion'].items():
                rows[-1][columns.index(
                    emotion
                )] = emotion_value  # place the emotion in the correct index
        df = pd.DataFrame(rows, columns=columns)
        df.set_index('instance', inplace=True)
        dfs.append(df)  # TODO: need to index this with 'video names'
    #
    index_arrays = (person_folder_names, video_names)
    dfs_index = pd.MultiIndex.from_arrays(index_arrays,
                                          names=["person", "video"])
    dfs_frame = pd.DataFrame(dfs, index=dfs_index, columns=[
        'video_emotion'
    ])  #dataframe indexed by folder name and video name (two levels)
    dfs_frame.to_pickle(DATAFRAMESOUT + 'dfs_frame.pkl')