def create_model(db_path, model_name): if model_name == 'VGG-Face': print("Using VGG-Face model backend and") model = VGGFace.loadModel() input_shape = (224, 224) elif model_name == 'OpenFace': print("Using OpenFace model backend") model = OpenFace.loadModel() input_shape = (96, 96) elif model_name == 'Facenet': print("Using Facenet model backend") model = Facenet.loadModel() input_shape = (160, 160) elif model_name == 'DeepFace': print("Using FB DeepFace model backend") model = FbDeepFace.loadModel() input_shape = (152, 152) elif model_name == 'DeepID': print("Using DeepID model backend") model = DeepID.loadModel() input_shape = (55, 47) elif model_name == 'Dlib': print("Using Dlib model backend") from deepface.basemodels.DlibResNet import DlibResNet model = DlibResNet() input_shape = (150, 150) else: raise ValueError("Invalid model_name passed - ", model_name) return model, input_shape
def find(img_path, db_path, model_name='VGG-Face', distance_metric='cosine', model=None, enforce_detection=True, detector_backend='opencv'): model_names = ['VGG-Face', 'Facenet', 'OpenFace', 'DeepFace'] metric_names = ['cosine', 'euclidean', 'euclidean_l2'] tic = time.time() if type(img_path) == list: bulkProcess = True img_paths = img_path.copy() else: bulkProcess = False img_paths = [img_path] if os.path.isdir(db_path) == True: #--------------------------------------- if model == None: if model_name == 'VGG-Face': print("Using VGG-Face model backend and", distance_metric, "distance.") model = VGGFace.loadModel() elif model_name == 'OpenFace': print("Using OpenFace model backend", distance_metric, "distance.") model = OpenFace.loadModel() elif model_name == 'Facenet': print("Using Facenet model backend", distance_metric, "distance.") model = Facenet.loadModel() elif model_name == 'DeepFace': print("Using FB DeepFace model backend", distance_metric, "distance.") model = FbDeepFace.loadModel() elif model_name == 'DeepID': print("Using DeepID model backend", distance_metric, "distance.") model = DeepID.loadModel() elif model_name == 'Dlib': print("Using Dlib ResNet model backend", distance_metric, "distance.") model = DlibResNet() elif model_name == 'Ensemble': print("Ensemble learning enabled") #TODO: include DeepID in ensemble method import lightgbm as lgb #lightgbm==2.3.1 models = {} pbar = tqdm(range(0, len(model_names)), desc='Face recognition models') for index in pbar: if index == 0: pbar.set_description("Loading VGG-Face") models['VGG-Face'] = VGGFace.loadModel() elif index == 1: pbar.set_description("Loading FaceNet") models['Facenet'] = Facenet.loadModel() elif index == 2: pbar.set_description("Loading OpenFace") models['OpenFace'] = OpenFace.loadModel() elif index == 3: pbar.set_description("Loading DeepFace") models['DeepFace'] = FbDeepFace.loadModel() else: raise ValueError("Invalid model_name passed - ", model_name) else: #model != None print("Already built model is passed") if model_name == 'Ensemble': import lightgbm as lgb #lightgbm==2.3.1 #validate model dictionary because it might be passed from input as pre-trained found_models = [] for key, value in model.items(): found_models.append(key) if ('VGG-Face' in found_models) and ('Facenet' in found_models) and ( 'OpenFace' in found_models) and ('DeepFace' in found_models): print("Ensemble learning will be applied for ", found_models, " models") else: raise ValueError( "You would like to apply ensemble learning and pass pre-built models but models must contain [VGG-Face, Facenet, OpenFace, DeepFace] but you passed " + found_models) models = model.copy() #threshold = functions.findThreshold(model_name, distance_metric) #--------------------------------------- file_name = "representations_%s.pkl" % (model_name) file_name = file_name.replace("-", "_").lower() if path.exists(db_path + "/" + file_name): print( "WARNING: Representations for images in ", db_path, " folder were previously stored in ", file_name, ". If you added new instances after this file creation, then please delete this file and call find function again. It will create it again." ) f = open(db_path + '/' + file_name, 'rb') representations = pickle.load(f) print("There are ", len(representations), " representations found in ", file_name) else: employees = [] for r, d, f in os.walk( db_path): # r=root, d=directories, f = files for file in f: if ('.jpg' in file): exact_path = r + "/" + file employees.append(exact_path) if len(employees) == 0: raise ValueError("There is no image in ", db_path, " folder!") #------------------------ #find representations for db images representations = [] pbar = tqdm(range(0, len(employees)), desc='Finding representations') #for employee in employees: for index in pbar: employee = employees[index] if model_name != 'Ensemble': if model_name == 'Dlib': #non-keras model input_shape = (150, 150, 3) else: #input_shape = model.layers[0].input_shape[1:3] #my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue. input_shape = model.layers[0].input_shape if type(input_shape) == list: input_shape = input_shape[0][1:3] else: input_shape = input_shape[1:3] #--------------------- input_shape_x = input_shape[0] input_shape_y = input_shape[1] img = functions.preprocess_face( img=employee, target_size=(input_shape_y, input_shape_x), enforce_detection=enforce_detection, detector_backend=detector_backend) representation = model.predict(img)[0, :] instance = [] instance.append(employee) instance.append(representation) else: #ensemble learning instance = [] instance.append(employee) for j in model_names: ensemble_model = models[j] #input_shape = model.layers[0].input_shape[1:3] #my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue. input_shape = ensemble_model.layers[0].input_shape if type(input_shape) == list: input_shape = input_shape[0][1:3] else: input_shape = input_shape[1:3] input_shape_x = input_shape[0] input_shape_y = input_shape[1] img = functions.preprocess_face( img=employee, target_size=(input_shape_y, input_shape_x), enforce_detection=enforce_detection, detector_backend=detector_backend) representation = ensemble_model.predict(img)[0, :] instance.append(representation) #------------------------------- representations.append(instance) f = open(db_path + '/' + file_name, "wb") pickle.dump(representations, f) f.close() print( "Representations stored in ", db_path, "/", file_name, " file. Please delete this file when you add new identities in your database." ) #---------------------------- #we got representations for database if model_name != 'Ensemble': df = pd.DataFrame(representations, columns=["identity", "representation"]) else: #ensemble learning df = pd.DataFrame(representations, columns=[ "identity", "VGG-Face_representation", "Facenet_representation", "OpenFace_representation", "DeepFace_representation" ]) df_base = df.copy() resp_obj = [] global_pbar = tqdm(range(0, len(img_paths)), desc='Analyzing') for j in global_pbar: img_path = img_paths[j] #find representation for passed image if model_name == 'Ensemble': for j in model_names: ensemble_model = models[j] #input_shape = ensemble_model.layers[0].input_shape[1:3] #my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue. input_shape = ensemble_model.layers[0].input_shape if type(input_shape) == list: input_shape = input_shape[0][1:3] else: input_shape = input_shape[1:3] img = functions.preprocess_face( img=img_path, target_size=input_shape, enforce_detection=enforce_detection, detector_backend=detector_backend) target_representation = ensemble_model.predict(img)[0, :] for k in metric_names: distances = [] for index, instance in df.iterrows(): source_representation = instance[ "%s_representation" % (j)] if k == 'cosine': distance = dst.findCosineDistance( source_representation, target_representation) elif k == 'euclidean': distance = dst.findEuclideanDistance( source_representation, target_representation) elif k == 'euclidean_l2': distance = dst.findEuclideanDistance( dst.l2_normalize(source_representation), dst.l2_normalize(target_representation)) distances.append(distance) if j == 'OpenFace' and k == 'euclidean': continue else: df["%s_%s" % (j, k)] = distances #---------------------------------- feature_names = [] for j in model_names: for k in metric_names: if j == 'OpenFace' and k == 'euclidean': continue else: feature = '%s_%s' % (j, k) feature_names.append(feature) #print(df[feature_names].head()) x = df[feature_names].values #---------------------------------- #lightgbm model home = str(Path.home()) if os.path.isfile( home + '/.deepface/weights/face-recognition-ensemble-model.txt' ) != True: print( "face-recognition-ensemble-model.txt will be downloaded..." ) url = 'https://raw.githubusercontent.com/serengil/deepface/master/deepface/models/face-recognition-ensemble-model.txt' output = home + '/.deepface/weights/face-recognition-ensemble-model.txt' gdown.download(url, output, quiet=False) ensemble_model_path = home + '/.deepface/weights/face-recognition-ensemble-model.txt' deepface_ensemble = lgb.Booster(model_file=ensemble_model_path) y = deepface_ensemble.predict(x) verified_labels = [] scores = [] for i in y: verified = np.argmax(i) == 1 score = i[np.argmax(i)] verified_labels.append(verified) scores.append(score) df['verified'] = verified_labels df['score'] = scores df = df[df.verified == True] #df = df[df.score > 0.99] #confidence score df = df.sort_values(by=["score"], ascending=False).reset_index(drop=True) df = df[['identity', 'verified', 'score']] resp_obj.append(df) df = df_base.copy() #restore df for the next iteration #---------------------------------- if model_name != 'Ensemble': if model_name == 'Dlib': #non-keras model input_shape = (150, 150, 3) else: #input_shape = model.layers[0].input_shape[1:3] #my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue. input_shape = model.layers[0].input_shape if type(input_shape) == list: input_shape = input_shape[0][1:3] else: input_shape = input_shape[1:3] #------------------------ input_shape_x = input_shape[0] input_shape_y = input_shape[1] img = functions.preprocess_face( img=img_path, target_size=(input_shape_y, input_shape_x), enforce_detection=enforce_detection, detector_backend=detector_backend) target_representation = model.predict(img)[0, :] distances = [] for index, instance in df.iterrows(): source_representation = instance["representation"] if distance_metric == 'cosine': distance = dst.findCosineDistance( source_representation, target_representation) elif distance_metric == 'euclidean': distance = dst.findEuclideanDistance( source_representation, target_representation) elif distance_metric == 'euclidean_l2': distance = dst.findEuclideanDistance( dst.l2_normalize(source_representation), dst.l2_normalize(target_representation)) else: raise ValueError("Invalid distance_metric passed - ", distance_metric) distances.append(distance) threshold = functions.findThreshold(model_name, distance_metric) df["distance"] = distances df = df.drop(columns=["representation"]) df = df[df.distance <= threshold] df = df.sort_values(by=["distance"], ascending=True).reset_index(drop=True) resp_obj.append(df) df = df_base.copy() #restore df for the next iteration toc = time.time() print("find function lasts ", toc - tic, " seconds") if len(resp_obj) == 1: return resp_obj[0] return resp_obj else: raise ValueError("Passed db_path does not exist!") return None
def verify(img1_path, img2_path='', model_name='VGG-Face', distance_metric='cosine', model=None, enforce_detection=True, detector_backend='opencv'): tic = time.time() if type(img1_path) == list: bulkProcess = True img_list = img1_path.copy() else: bulkProcess = False img_list = [[img1_path, img2_path]] #------------------------------ resp_objects = [] if model_name == 'Ensemble': print("Ensemble learning enabled") import lightgbm as lgb #lightgbm==2.3.1 if model == None: model = {} model_pbar = tqdm(range(0, 4), desc='Face recognition models') for index in model_pbar: if index == 0: model_pbar.set_description("Loading VGG-Face") model["VGG-Face"] = VGGFace.loadModel() elif index == 1: model_pbar.set_description("Loading Google FaceNet") model["Facenet"] = Facenet.loadModel() elif index == 2: model_pbar.set_description("Loading OpenFace") model["OpenFace"] = OpenFace.loadModel() elif index == 3: model_pbar.set_description("Loading Facebook DeepFace") model["DeepFace"] = FbDeepFace.loadModel() #-------------------------- #validate model dictionary because it might be passed from input as pre-trained found_models = [] for key, value in model.items(): found_models.append(key) if ('VGG-Face' in found_models) and ('Facenet' in found_models) and ( 'OpenFace' in found_models) and ('DeepFace' in found_models): print("Ensemble learning will be applied for ", found_models, " models") else: raise ValueError( "You would like to apply ensemble learning and pass pre-built models but models must contain [VGG-Face, Facenet, OpenFace, DeepFace] but you passed " + found_models) #-------------------------- model_names = ["VGG-Face", "Facenet", "OpenFace", "DeepFace"] metrics = ["cosine", "euclidean", "euclidean_l2"] pbar = tqdm(range(0, len(img_list)), desc='Verification') #for instance in img_list: for index in pbar: instance = img_list[index] if type(instance) == list and len(instance) >= 2: img1_path = instance[0] img2_path = instance[1] ensemble_features = [] ensemble_features_string = "[" for i in model_names: custom_model = model[i] #input_shape = custom_model.layers[0].input_shape[1:3] #my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue. input_shape = custom_model.layers[0].input_shape if type(input_shape) == list: input_shape = input_shape[0][1:3] else: input_shape = input_shape[1:3] img1 = functions.preprocess_face( img=img1_path, target_size=input_shape, enforce_detection=enforce_detection, detector_backend=detector_backend) img2 = functions.preprocess_face( img=img2_path, target_size=input_shape, enforce_detection=enforce_detection, detector_backend=detector_backend) img1_representation = custom_model.predict(img1)[0, :] img2_representation = custom_model.predict(img2)[0, :] for j in metrics: if j == 'cosine': distance = dst.findCosineDistance( img1_representation, img2_representation) elif j == 'euclidean': distance = dst.findEuclideanDistance( img1_representation, img2_representation) elif j == 'euclidean_l2': distance = dst.findEuclideanDistance( dst.l2_normalize(img1_representation), dst.l2_normalize(img2_representation)) if i == 'OpenFace' and j == 'euclidean': #this returns same with OpenFace - euclidean_l2 continue else: ensemble_features.append(distance) if len(ensemble_features) > 1: ensemble_features_string += ", " ensemble_features_string += str(distance) #print("ensemble_features: ", ensemble_features) ensemble_features_string += "]" #------------------------------- #find deepface path home = str(Path.home()) if os.path.isfile( home + '/.deepface/weights/face-recognition-ensemble-model.txt' ) != True: print( "face-recognition-ensemble-model.txt will be downloaded..." ) url = 'https://raw.githubusercontent.com/serengil/deepface/master/deepface/models/face-recognition-ensemble-model.txt' output = home + '/.deepface/weights/face-recognition-ensemble-model.txt' gdown.download(url, output, quiet=False) ensemble_model_path = home + '/.deepface/weights/face-recognition-ensemble-model.txt' #print(ensemble_model_path) #------------------------------- deepface_ensemble = lgb.Booster(model_file=ensemble_model_path) prediction = deepface_ensemble.predict( np.expand_dims(np.array(ensemble_features), axis=0))[0] verified = np.argmax(prediction) == 1 if verified: identified = "true" else: identified = "false" score = prediction[np.argmax(prediction)] #print("verified: ", verified,", score: ", score) resp_obj = "{" resp_obj += "\"verified\": " + identified resp_obj += ", \"score\": " + str(score) resp_obj += ", \"distance\": " + ensemble_features_string resp_obj += ", \"model\": [\"VGG-Face\", \"Facenet\", \"OpenFace\", \"DeepFace\"]" resp_obj += ", \"similarity_metric\": [\"cosine\", \"euclidean\", \"euclidean_l2\"]" resp_obj += "}" #print(resp_obj) resp_obj = json.loads(resp_obj) #string to json if bulkProcess == True: resp_objects.append(resp_obj) else: return resp_obj #------------------------------- if bulkProcess == True: resp_obj = "{" for i in range(0, len(resp_objects)): resp_item = json.dumps(resp_objects[i]) if i > 0: resp_obj += ", " resp_obj += "\"pair_" + str(i + 1) + "\": " + resp_item resp_obj += "}" resp_obj = json.loads(resp_obj) return resp_obj return None #ensemble learning block end #-------------------------------- #ensemble learning disabled if model == None: if model_name == 'VGG-Face': print("Using VGG-Face model backend and", distance_metric, "distance.") model = VGGFace.loadModel() elif model_name == 'OpenFace': print("Using OpenFace model backend", distance_metric, "distance.") model = OpenFace.loadModel() elif model_name == 'Facenet': print("Using Facenet model backend", distance_metric, "distance.") model = Facenet.loadModel() elif model_name == 'DeepFace': print("Using FB DeepFace model backend", distance_metric, "distance.") model = FbDeepFace.loadModel() elif model_name == 'DeepID': print("Using DeepID2 model backend", distance_metric, "distance.") model = DeepID.loadModel() elif model_name == 'Dlib': print("Using Dlib ResNet model backend", distance_metric, "distance.") model = DlibResNet() else: raise ValueError("Invalid model_name passed - ", model_name) else: #model != None print("Already built model is passed") #------------------------------ #face recognition models have different size of inputs #my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue. if model_name == 'Dlib': #this is not a regular keras model input_shape = (150, 150, 3) else: #keras based models input_shape = model.layers[0].input_shape if type(input_shape) == list: input_shape = input_shape[0][1:3] else: input_shape = input_shape[1:3] input_shape_x = input_shape[0] input_shape_y = input_shape[1] #------------------------------ #tuned thresholds for model and metric pair threshold = functions.findThreshold(model_name, distance_metric) #------------------------------ if len(img_list) > 1: disable_option = False else: #calling deepface in a for loop causes lots of progress bars. this block prevents this problem. disable_option = True pbar = tqdm(range(0, len(img_list)), desc='Verification', disable=disable_option) #for instance in img_list: for index in pbar: instance = img_list[index] if type(instance) == list and len(instance) >= 2: img1_path = instance[0] img2_path = instance[1] #---------------------- #crop and align faces img1 = functions.preprocess_face( img=img1_path, target_size=(input_shape_y, input_shape_x), enforce_detection=enforce_detection, detector_backend=detector_backend) img2 = functions.preprocess_face( img=img2_path, target_size=(input_shape_y, input_shape_x), enforce_detection=enforce_detection, detector_backend=detector_backend) #---------------------- #find embeddings img1_representation = model.predict(img1)[0, :] img2_representation = model.predict(img2)[0, :] #---------------------- #find distances between embeddings if distance_metric == 'cosine': distance = dst.findCosineDistance(img1_representation, img2_representation) elif distance_metric == 'euclidean': distance = dst.findEuclideanDistance(img1_representation, img2_representation) elif distance_metric == 'euclidean_l2': distance = dst.findEuclideanDistance( dst.l2_normalize(img1_representation), dst.l2_normalize(img2_representation)) else: raise ValueError("Invalid distance_metric passed - ", distance_metric) #---------------------- #decision if distance <= threshold: identified = "true" else: identified = "false" #---------------------- #response object resp_obj = "{" resp_obj += "\"verified\": " + identified resp_obj += ", \"distance\": " + str(distance) resp_obj += ", \"max_threshold_to_verify\": " + str(threshold) resp_obj += ", \"model\": \"" + model_name + "\"" resp_obj += ", \"similarity_metric\": \"" + distance_metric + "\"" resp_obj += "}" resp_obj = json.loads(resp_obj) #string to json if bulkProcess == True: resp_objects.append(resp_obj) else: #K.clear_session() return resp_obj #---------------------- else: raise ValueError("Invalid arguments passed to verify function: ", instance) #------------------------- toc = time.time() #print("identification lasts ",toc-tic," seconds") if bulkProcess == True: resp_obj = "{" for i in range(0, len(resp_objects)): resp_item = json.dumps(resp_objects[i]) if i > 0: resp_obj += ", " resp_obj += "\"pair_" + str(i + 1) + "\": " + resp_item resp_obj += "}" resp_obj = json.loads(resp_obj) return resp_obj
def loadModel(): return DlibResNet()
def DlibResNet_(): from deepface.basemodels.DlibResNet import DlibResNet return DlibResNet()
def analysis(db_path, model_name, distance_metric, enable_face_analysis=True): input_shape = (224, 224) text_color = (255, 255, 255) employees = [] #check passed db folder exists if os.path.isdir(db_path) == True: for r, d, f in os.walk(db_path): # r=root, d=directories, f = files for file in f: if ('.jpg' in file): #exact_path = os.path.join(r, file) exact_path = r + "/" + file #print(exact_path) employees.append(exact_path) #------------------------ if len(employees) > 0: if model_name == 'VGG-Face': print("Using VGG-Face model backend and", distance_metric, "distance.") model = VGGFace.loadModel() input_shape = (224, 224) elif model_name == 'OpenFace': print("Using OpenFace model backend", distance_metric, "distance.") model = OpenFace.loadModel() input_shape = (96, 96) elif model_name == 'Facenet': print("Using Facenet model backend", distance_metric, "distance.") model = Facenet.loadModel() input_shape = (160, 160) elif model_name == 'DeepFace': print("Using FB DeepFace model backend", distance_metric, "distance.") model = FbDeepFace.loadModel() input_shape = (152, 152) elif model_name == 'DeepID': print("Using DeepID model backend", distance_metric, "distance.") model = DeepID.loadModel() input_shape = (55, 47) elif model_name == 'Dlib': print("Using Dlib model backend", distance_metric, "distance.") model = DlibResNet() input_shape = (150, 150) else: raise ValueError("Invalid model_name passed - ", model_name) #------------------------ input_shape_x = input_shape[0] input_shape_y = input_shape[1] #tuned thresholds for model and metric pair threshold = functions.findThreshold(model_name, distance_metric) #------------------------ #facial attribute analysis models if enable_face_analysis == True: tic = time.time() emotion_model = Emotion.loadModel() print("Emotion model loaded") age_model = Age.loadModel() print("Age model loaded") gender_model = Gender.loadModel() print("Gender model loaded") toc = time.time() print("Facial attibute analysis models loaded in ", toc - tic, " seconds") #------------------------ #find embeddings for employee list tic = time.time() pbar = tqdm(range(0, len(employees)), desc='Finding embeddings') embeddings = [] #for employee in employees: for index in pbar: employee = employees[index] pbar.set_description("Finding embedding for %s" % (employee.split("/")[-1])) embedding = [] img = functions.detectFace(employee, (input_shape_y, input_shape_x)) img_representation = model.predict(img)[0, :] embedding.append(employee) embedding.append(img_representation) embeddings.append(embedding) df = pd.DataFrame(embeddings, columns=['employee', 'embedding']) df['distance_metric'] = distance_metric toc = time.time() print("Embeddings found for given data set in ", toc - tic, " seconds") #----------------------- time_threshold = 5 frame_threshold = 5 pivot_img_size = 112 #face recognition result image #----------------------- opencv_path = functions.get_opencv_path() face_detector_path = opencv_path + "haarcascade_frontalface_default.xml" face_cascade = cv2.CascadeClassifier(face_detector_path) #----------------------- freeze = False face_detected = False face_included_frames = 0 #freeze screen if face detected sequantially 5 frames freezed_frame = 0 tic = time.time() cap = cv2.VideoCapture(0) #webcam #cap = cv2.VideoCapture("C:/Users/IS96273/Desktop/skype-video-1.mp4") #video while (True): ret, img = cap.read() #cv2.namedWindow('img', cv2.WINDOW_FREERATIO) #cv2.setWindowProperty('img', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) raw_img = img.copy() resolution = img.shape resolution_x = img.shape[1] resolution_y = img.shape[0] if freeze == False: faces = face_cascade.detectMultiScale(img, 1.3, 5) if len(faces) == 0: face_included_frames = 0 else: faces = [] detected_faces = [] face_index = 0 for (x, y, w, h) in faces: if w > 130: #discard small detected faces face_detected = True if face_index == 0: face_included_frames = face_included_frames + 1 #increase frame for a single face cv2.rectangle(img, (x, y), (x + w, y + h), (67, 67, 67), 1) #draw rectangle to main image cv2.putText(img, str(frame_threshold - face_included_frames), (int(x + w / 4), int(y + h / 1.5)), cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 255, 255), 2) detected_face = img[int(y):int(y + h), int(x):int(x + w)] #crop detected face #------------------------------------- detected_faces.append((x, y, w, h)) face_index = face_index + 1 #------------------------------------- if face_detected == True and face_included_frames == frame_threshold and freeze == False: freeze = True #base_img = img.copy() base_img = raw_img.copy() detected_faces_final = detected_faces.copy() tic = time.time() if freeze == True: toc = time.time() if (toc - tic) < time_threshold: if freezed_frame == 0: freeze_img = base_img.copy() #freeze_img = np.zeros(resolution, np.uint8) #here, np.uint8 handles showing white area issue for detected_face in detected_faces_final: x = detected_face[0] y = detected_face[1] w = detected_face[2] h = detected_face[3] cv2.rectangle(freeze_img, (x, y), (x + w, y + h), (67, 67, 67), 1) #draw rectangle to main image #------------------------------- #apply deep learning for custom_face custom_face = base_img[y:y + h, x:x + w] #------------------------------- #facial attribute analysis if enable_face_analysis == True: gray_img = functions.detectFace( custom_face, (48, 48), True) emotion_labels = [ 'Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral' ] emotion_predictions = emotion_model.predict( gray_img)[0, :] sum_of_predictions = emotion_predictions.sum() mood_items = [] for i in range(0, len(emotion_labels)): mood_item = [] emotion_label = emotion_labels[i] emotion_prediction = 100 * emotion_predictions[ i] / sum_of_predictions mood_item.append(emotion_label) mood_item.append(emotion_prediction) mood_items.append(mood_item) emotion_df = pd.DataFrame( mood_items, columns=["emotion", "score"]) emotion_df = emotion_df.sort_values( by=["score"], ascending=False).reset_index(drop=True) #background of mood box #transparency overlay = freeze_img.copy() opacity = 0.4 if x + w + pivot_img_size < resolution_x: #right cv2.rectangle( freeze_img #, (x+w,y+20) , (x + w, y), (x + w + pivot_img_size, y + h), (64, 64, 64), cv2.FILLED) cv2.addWeighted(overlay, opacity, freeze_img, 1 - opacity, 0, freeze_img) elif x - pivot_img_size > 0: #left cv2.rectangle( freeze_img #, (x-pivot_img_size,y+20) , (x - pivot_img_size, y), (x, y + h), (64, 64, 64), cv2.FILLED) cv2.addWeighted(overlay, opacity, freeze_img, 1 - opacity, 0, freeze_img) for index, instance in emotion_df.iterrows(): emotion_label = "%s " % (instance['emotion']) emotion_score = instance['score'] / 100 bar_x = 35 #this is the size if an emotion is 100% bar_x = int(bar_x * emotion_score) if x + w + pivot_img_size < resolution_x: text_location_y = y + 20 + (index + 1) * 20 text_location_x = x + w if text_location_y < y + h: cv2.putText( freeze_img, emotion_label, (text_location_x, text_location_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1) cv2.rectangle( freeze_img, (x + w + 70, y + 13 + (index + 1) * 20), (x + w + 70 + bar_x, y + 13 + (index + 1) * 20 + 5), (255, 255, 255), cv2.FILLED) elif x - pivot_img_size > 0: text_location_y = y + 20 + (index + 1) * 20 text_location_x = x - pivot_img_size if text_location_y <= y + h: cv2.putText( freeze_img, emotion_label, (text_location_x, text_location_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1) cv2.rectangle( freeze_img, (x - pivot_img_size + 70, y + 13 + (index + 1) * 20), (x - pivot_img_size + 70 + bar_x, y + 13 + (index + 1) * 20 + 5), (255, 255, 255), cv2.FILLED) #------------------------------- face_224 = functions.detectFace( custom_face, (224, 224), False) age_predictions = age_model.predict(face_224)[0, :] apparent_age = Age.findApparentAge(age_predictions) #------------------------------- gender_prediction = gender_model.predict(face_224)[ 0, :] if np.argmax(gender_prediction) == 0: gender = "W" elif np.argmax(gender_prediction) == 1: gender = "M" #print(str(int(apparent_age))," years old ", dominant_emotion, " ", gender) analysis_report = str( int(apparent_age)) + " " + gender #------------------------------- info_box_color = (46, 200, 255) #top if y - pivot_img_size + int( pivot_img_size / 5) > 0: triangle_coordinates = np.array([ (x + int(w / 2), y), (x + int(w / 2) - int(w / 10), y - int(pivot_img_size / 3)), (x + int(w / 2) + int(w / 10), y - int(pivot_img_size / 3)) ]) cv2.drawContours(freeze_img, [triangle_coordinates], 0, info_box_color, -1) cv2.rectangle( freeze_img, (x + int(w / 5), y - pivot_img_size + int(pivot_img_size / 5)), (x + w - int(w / 5), y - int(pivot_img_size / 3)), info_box_color, cv2.FILLED) cv2.putText(freeze_img, analysis_report, (x + int(w / 3.5), y - int(pivot_img_size / 2.1)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 111, 255), 2) #bottom elif y + h + pivot_img_size - int( pivot_img_size / 5) < resolution_y: triangle_coordinates = np.array([ (x + int(w / 2), y + h), (x + int(w / 2) - int(w / 10), y + h + int(pivot_img_size / 3)), (x + int(w / 2) + int(w / 10), y + h + int(pivot_img_size / 3)) ]) cv2.drawContours(freeze_img, [triangle_coordinates], 0, info_box_color, -1) cv2.rectangle( freeze_img, (x + int(w / 5), y + h + int(pivot_img_size / 3)), (x + w - int(w / 5), y + h + pivot_img_size - int(pivot_img_size / 5)), info_box_color, cv2.FILLED) cv2.putText(freeze_img, analysis_report, (x + int(w / 3.5), y + h + int(pivot_img_size / 1.5)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 111, 255), 2) #------------------------------- #face recognition custom_face = functions.detectFace( custom_face, (input_shape_y, input_shape_x)) #check detectFace function handled if custom_face.shape[1:3] == input_shape: if df.shape[ 0] > 0: #if there are images to verify, apply face recognition img1_representation = model.predict( custom_face)[0, :] #print(freezed_frame," - ",img1_representation[0:5]) def findDistance(row): distance_metric = row['distance_metric'] img2_representation = row['embedding'] distance = 1000 #initialize very large value if distance_metric == 'cosine': distance = dst.findCosineDistance( img1_representation, img2_representation) elif distance_metric == 'euclidean': distance = dst.findEuclideanDistance( img1_representation, img2_representation) elif distance_metric == 'euclidean_l2': distance = dst.findEuclideanDistance( dst.l2_normalize( img1_representation), dst.l2_normalize( img2_representation)) return distance df['distance'] = df.apply(findDistance, axis=1) df = df.sort_values(by=["distance"]) candidate = df.iloc[0] employee_name = candidate['employee'] best_distance = candidate['distance'] #print(candidate[['employee', 'distance']].values) #if True: if best_distance <= threshold: #print(employee_name) display_img = cv2.imread(employee_name) display_img = cv2.resize( display_img, (pivot_img_size, pivot_img_size)) label = employee_name.split( "/")[-1].replace(".jpg", "") label = re.sub('[0-9]', '', label) try: if y - pivot_img_size > 0 and x + w + pivot_img_size < resolution_x: #top right freeze_img[ y - pivot_img_size:y, x + w:x + w + pivot_img_size] = display_img overlay = freeze_img.copy() opacity = 0.4 cv2.rectangle( freeze_img, (x + w, y), (x + w + pivot_img_size, y + 20), (46, 200, 255), cv2.FILLED) cv2.addWeighted( overlay, opacity, freeze_img, 1 - opacity, 0, freeze_img) cv2.putText( freeze_img, label, (x + w, y + 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, text_color, 1) #connect face and text cv2.line(freeze_img, (x + int(w / 2), y), (x + 3 * int(w / 4), y - int(pivot_img_size / 2)), (67, 67, 67), 1) cv2.line(freeze_img, (x + 3 * int(w / 4), y - int(pivot_img_size / 2)), (x + w, y - int(pivot_img_size / 2)), (67, 67, 67), 1) elif y + h + pivot_img_size < resolution_y and x - pivot_img_size > 0: #bottom left freeze_img[ y + h:y + h + pivot_img_size, x - pivot_img_size:x] = display_img overlay = freeze_img.copy() opacity = 0.4 cv2.rectangle( freeze_img, (x - pivot_img_size, y + h - 20), (x, y + h), (46, 200, 255), cv2.FILLED) cv2.addWeighted( overlay, opacity, freeze_img, 1 - opacity, 0, freeze_img) cv2.putText( freeze_img, label, (x - pivot_img_size, y + h - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, text_color, 1) #connect face and text cv2.line(freeze_img, (x + int(w / 2), y + h), (x + int(w / 2) - int(w / 4), y + h + int(pivot_img_size / 2)), (67, 67, 67), 1) cv2.line(freeze_img, (x + int(w / 2) - int(w / 4), y + h + int(pivot_img_size / 2)), (x, y + h + int(pivot_img_size / 2)), (67, 67, 67), 1) elif y - pivot_img_size > 0 and x - pivot_img_size > 0: #top left freeze_img[ y - pivot_img_size:y, x - pivot_img_size:x] = display_img overlay = freeze_img.copy() opacity = 0.4 cv2.rectangle( freeze_img, (x - pivot_img_size, y), (x, y + 20), (46, 200, 255), cv2.FILLED) cv2.addWeighted( overlay, opacity, freeze_img, 1 - opacity, 0, freeze_img) cv2.putText( freeze_img, label, (x - pivot_img_size, y + 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, text_color, 1) #connect face and text cv2.line( freeze_img, (x + int(w / 2), y), (x + int(w / 2) - int(w / 4), y - int(pivot_img_size / 2)), (67, 67, 67), 1) cv2.line( freeze_img, (x + int(w / 2) - int(w / 4), y - int(pivot_img_size / 2)), (x, y - int(pivot_img_size / 2)), (67, 67, 67), 1) elif x + w + pivot_img_size < resolution_x and y + h + pivot_img_size < resolution_y: #bottom righ freeze_img[ y + h:y + h + pivot_img_size, x + w:x + w + pivot_img_size] = display_img overlay = freeze_img.copy() opacity = 0.4 cv2.rectangle( freeze_img, (x + w, y + h - 20), (x + w + pivot_img_size, y + h), (46, 200, 255), cv2.FILLED) cv2.addWeighted( overlay, opacity, freeze_img, 1 - opacity, 0, freeze_img) cv2.putText( freeze_img, label, (x + w, y + h - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, text_color, 1) #connect face and text cv2.line(freeze_img, (x + int(w / 2), y + h), (x + int(w / 2) + int(w / 4), y + h + int(pivot_img_size / 2)), (67, 67, 67), 1) cv2.line(freeze_img, (x + int(w / 2) + int(w / 4), y + h + int(pivot_img_size / 2)), (x + w, y + h + int(pivot_img_size / 2)), (67, 67, 67), 1) except Exception as err: print(str(err)) tic = time.time( ) #in this way, freezed image can show 5 seconds #------------------------------- time_left = int(time_threshold - (toc - tic) + 1) cv2.rectangle(freeze_img, (10, 10), (90, 50), (67, 67, 67), -10) cv2.putText(freeze_img, str(time_left), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1) cv2.imshow('img', freeze_img) freezed_frame = freezed_frame + 1 else: face_detected = False face_included_frames = 0 freeze = False freezed_frame = 0 else: cv2.imshow('img', img) if cv2.waitKey(1) & 0xFF == ord('q'): #press q to quit break #kill open cv things cap.release() cv2.destroyAllWindows()
vggface_model = VGGFace.loadModel() elif index == 1: pbar.set_description("Loading OpenFace") openface_model = OpenFace.loadModel() elif index == 2: pbar.set_description("Loading Google FaceNet") facenet_model = Facenet.loadModel() elif index == 3: pbar.set_description("Loading Facebook DeepFace") deepface_model = FbDeepFace.loadModel() elif index == 4: pbar.set_description("Loading DeepID DeepFace") deepid_model = DeepID.loadModel() elif index == 5: pbar.set_description("Loading Dlib ResNet DeepFace") dlib_model = DlibResNet() toc = time.time() print("Face recognition models are built in ", toc - tic, " seconds") #------------------------------ tic = time.time() print("Loading Facial Attribute Analysis Models...") pbar = tqdm(range(0, 4), desc='Loading Facial Attribute Analysis Models...') for index in pbar: if index == 0: