Esempio n. 1
0
def create_model(db_path, model_name):
    if model_name == 'VGG-Face':
        print("Using VGG-Face model backend and")
        model = VGGFace.loadModel()
        input_shape = (224, 224)

    elif model_name == 'OpenFace':
        print("Using OpenFace model backend")
        model = OpenFace.loadModel()
        input_shape = (96, 96)

    elif model_name == 'Facenet':
        print("Using Facenet model backend")
        model = Facenet.loadModel()
        input_shape = (160, 160)

    elif model_name == 'DeepFace':
        print("Using FB DeepFace model backend")
        model = FbDeepFace.loadModel()
        input_shape = (152, 152)

    elif model_name == 'DeepID':
        print("Using DeepID model backend")
        model = DeepID.loadModel()
        input_shape = (55, 47)

    elif model_name == 'Dlib':
        print("Using Dlib model backend")
        from deepface.basemodels.DlibResNet import DlibResNet
        model = DlibResNet()
        input_shape = (150, 150)

    else:
        raise ValueError("Invalid model_name passed - ", model_name)
    return model, input_shape
Esempio n. 2
0
def loadModel():

    model = VGGFace.baseModel()

    #--------------------------

    classes = 101
    base_model_output = Sequential()
    base_model_output = Convolution2D(classes, (1, 1), name='predictions')(
        model.layers[-4].output)
    base_model_output = Flatten()(base_model_output)
    base_model_output = Activation('softmax')(base_model_output)

    #--------------------------

    age_model = Model(inputs=model.input, outputs=base_model_output)

    #--------------------------

    #load weights

    home = str(Path.home())

    if os.path.isfile(home +
                      '/.deepface/weights/age_model_weights.h5') != True:
        print("age_model_weights.h5 will be downloaded...")

        url = 'https://drive.google.com/uc?id=1YCox_4kJ-BYeXq27uUbasu--yz28zUMV'
        output = home + '/.deepface/weights/age_model_weights.h5'
        gdown.download(url, output, quiet=False)

    age_model.load_weights(home + '/.deepface/weights/age_model_weights.h5')

    return age_model
Esempio n. 3
0
def loadModel():
	
	model = VGGFace.baseModel()
	
	#--------------------------
	
	classes = 6
	base_model_output = Sequential()
	base_model_output = Convolution2D(classes, (1, 1), name='predictions')(model.layers[-4].output)
	base_model_output = Flatten()(base_model_output)
	base_model_output = Activation('softmax')(base_model_output)
	
	#--------------------------

	race_model = Model(inputs=model.input, outputs=base_model_output)
	
	#--------------------------
	
	#load weights
	
	home = str(Path.home())

	
	race_model.load_weights('race_model_single_batch.h5')
	
	return race_model
Esempio n. 4
0
def loadModel(url = 'https://github.com/serengil/deepface_models/releases/download/v1.0/gender_model_weights.h5'):

	model = VGGFace.baseModel()

	#--------------------------

	classes = 2
	base_model_output = Sequential()
	base_model_output = Convolution2D(classes, (1, 1), name='predictions')(model.layers[-4].output)
	base_model_output = Flatten()(base_model_output)
	base_model_output = Activation('softmax')(base_model_output)

	#--------------------------

	gender_model = Model(inputs=model.input, outputs=base_model_output)

	#--------------------------

	#load weights

	home = str(Path.home())

	if os.path.isfile(home+'/.deepface/weights/gender_model_weights.h5') != True:
		print("gender_model_weights.h5 will be downloaded...")

		output = home+'/.deepface/weights/gender_model_weights.h5'
		gdown.download(url, output, quiet=False)

	gender_model.load_weights(home+'/.deepface/weights/gender_model_weights.h5')

	return gender_model
Esempio n. 5
0
def loadModel():

    model = VGGFace.baseModel()

    #--------------------------

    classes = 101
    base_model_output = Sequential()
    base_model_output = Convolution2D(classes, (1, 1), name='predictions')(
        model.layers[-4].output)
    base_model_output = Flatten()(base_model_output)
    base_model_output = Activation('softmax')(base_model_output)

    #--------------------------

    age_model = Model(inputs=model.input, outputs=base_model_output)

    #--------------------------

    #load weights

    home = str(Path.home())

    if os.path.isfile(home +
                      '/.deepface/weights/age_model_weights.h5') != True:
        print("age_model_weights.h5 will be downloaded...")

        # https://drive.google.com/file/d/1_yy137mGjpPU2rpqT1sSsUEPkfq90jbk/view?usp=sharing
        url = 'https://drive.google.com/uc?id=1_yy137mGjpPU2rpqT1sSsUEPkfq90jbk'
        output = home + '/.deepface/weights/age_model_weights.h5'
        gdown.download(url, output, quiet=False)

    age_model.load_weights(home + '/.deepface/weights/age_model_weights.h5')

    return age_model
Esempio n. 6
0
def loadModel():

    model = VGGFace.baseModel()

    #--------------------------

    classes = 6
    base_model_output = Sequential()
    base_model_output = Convolution2D(classes, (1, 1), name='predictions')(
        model.layers[-4].output)
    base_model_output = Flatten()(base_model_output)
    base_model_output = Activation('softmax')(base_model_output)

    #--------------------------

    race_model = Model(inputs=model.input, outputs=base_model_output)

    #--------------------------

    #load weights

    # home = str(Path.home())

    # if os.path.isfile(home+'/.deepface/weights/race_model_single_batch.h5') != True:
    # 	print("race_model_single_batch.h5 will be downloaded...")

    # 	#zip
    # 	url = 'https://drive.google.com/uc?id=1nz-WDhghGQBC4biwShQ9kYjvQMpO6smj'
    # 	output = home+'/.deepface/weights/race_model_single_batch.zip'
    # 	gdown.download(url, output, quiet=False)

    # 	#unzip race_model_single_batch.zip
    # 	with zipfile.ZipFile(output, 'r') as zip_ref:
    # 		zip_ref.extractall(home+'/.deepface/weights/')

    # race_model.load_weights(home+'/.deepface/weights/race_model_single_batch.h5')

    home = str(Path(__file__).parent.absolute())
    #print(home)
    if os.path.isfile(home + '/weights/race_model_single_batch.h5') != True:
        print("race_model_single_batch.h5 will be downloaded...")

        #zip
        url = 'https://drive.google.com/uc?id=1nz-WDhghGQBC4biwShQ9kYjvQMpO6smj'
        output = home + '/weights/race_model_single_batch.zip'
        gdown.download(url, output, quiet=False)

        #unzip race_model_single_batch.zip
        with zipfile.ZipFile(output, 'r') as zip_ref:
            zip_ref.extractall(home + '/weights/')

    race_model.load_weights(home + '/weights/race_model_single_batch.h5')

    return race_model
def loadModel_age():
    model = VGGFace.baseModel()
    # --------------------------
    classes = 101
    base_model_output = Sequential()
    base_model_output = Convolution2D(classes, (1, 1), name='predictions')(
        model.layers[-4].output)
    base_model_output = Flatten()(base_model_output)
    base_model_output = Activation('softmax')(base_model_output)
    # --------------------------
    age_model = Model(inputs=model.input, outputs=base_model_output)
    # --------------------------
    # load weights
    age_model.load_weights('network/age_model_weights.h5')

    return age_model
Esempio n. 8
0
def loadModel(
    url='https://github.com/serengil/deepface_models/releases/download/v1.0/race_model_single_batch.h5'
):

    model = VGGFace.baseModel()

    #--------------------------

    classes = 6
    base_model_output = Sequential()
    base_model_output = Convolution2D(classes, (1, 1), name='predictions')(
        model.layers[-4].output)
    base_model_output = Flatten()(base_model_output)
    base_model_output = Activation('softmax')(base_model_output)

    #--------------------------

    race_model = Model(inputs=model.input, outputs=base_model_output)

    #--------------------------

    #load weights

    home = str(Path.home())

    if os.path.isfile(home +
                      '/.deepface/weights/race_model_single_batch.h5') != True:
        print("race_model_single_batch.h5 will be downloaded...")

        output = home + '/.deepface/weights/race_model_single_batch.h5'
        gdown.download(url, output, quiet=False)
        """
		#google drive source downloads zip
		output = home+'/.deepface/weights/race_model_single_batch.zip'
		gdown.download(url, output, quiet=False)

		#unzip race_model_single_batch.zip
		with zipfile.ZipFile(output, 'r') as zip_ref:
			zip_ref.extractall(home+'/.deepface/weights/')
		"""

    race_model.load_weights(home +
                            '/.deepface/weights/race_model_single_batch.h5')

    return race_model
Esempio n. 9
0
 def loadModel(self):
     model = VGGFace.baseModel()
     #--------------------------
     classes = 2
     base_model_output = Sequential()
     base_model_output = Convolution2D(classes, (1, 1), name='predictions')(model.layers[-4].output)
     base_model_output = Flatten()(base_model_output)
     base_model_output = Activation('softmax')(base_model_output)
     #--------------------------
     gender_model = Model(inputs=model.input, outputs=base_model_output)
     #--------------------------
     #load weights
     home = str(Path.home())
     if os.path.isfile(home+'/.deepface/weights/gender_model_weights.h5') != True:
         print("gender_model_weights.h5 will be downloaded...")
         url = 'https://drive.google.com/uc?id=1wUXRVlbsni2FN9-jkS_f4UTUrm1bRLyk'
         output = home+'/.deepface/weights/gender_model_weights.h5'
         gdown.download(url, output, quiet=False)
     gender_model.load_weights(home+'/.deepface/weights/gender_model_weights.h5')
     return gender_model
Esempio n. 10
0
def loadModel():

    model = VGGFace.baseModel()

    #--------------------------

    classes = 6
    base_model_output = Sequential()
    base_model_output = Convolution2D(classes, (1, 1), name='predictions')(
        model.layers[-4].output)
    base_model_output = Flatten()(base_model_output)
    base_model_output = Activation('softmax')(base_model_output)

    #--------------------------

    race_model = Model(inputs=model.input, outputs=base_model_output)

    #--------------------------

    #load weights

    home = str(Path.home())

    if os.path.isfile(home +
                      '/.deepface/weights/race_model_single_batch.h5') != True:
        print("race_model_single_batch.h5 will be downloaded...")

        #zip https://drive.google.com/file/d/18BRraHOmlmXPYA5PiFIjeFNUkFDO69sn/view?usp=sharing
        url = 'https://drive.google.com/uc?id=18BRraHOmlmXPYA5PiFIjeFNUkFDO69sn'
        output = home + '/.deepface/weights/race_model_single_batch.zip'
        gdown.download(url, output, quiet=False)

        #unzip race_model_single_batch.zip
        with zipfile.ZipFile(output, 'r') as zip_ref:
            zip_ref.extractall(home + '/.deepface/weights/')

    race_model.load_weights(home +
                            '/.deepface/weights/race_model_single_batch.h5')

    return race_model
Esempio n. 11
0
def loadModel():

    model = VGGFace.baseModel()

    #--------------------------

    classes = 2
    base_model_output = Sequential()
    base_model_output = Convolution2D(classes, (1, 1), name='predictions')(
        model.layers[-4].output)
    base_model_output = Flatten()(base_model_output)
    base_model_output = Activation('softmax')(base_model_output)

    #--------------------------

    gender_model = Model(inputs=model.input, outputs=base_model_output)

    #--------------------------

    #load weights

    home = str(Path.home())

    if os.path.isfile(home +
                      '/.deepface/weights/gender_model_weights.h5') != True:
        print("gender_model_weights.h5 will be downloaded...")

        # https://drive.google.com/file/d/1T5qnV4s-au24MAkjZ53SZtCk8vup8Wi-/view?usp=sharing
        url = 'https://drive.google.com/uc?id=1T5qnV4s-au24MAkjZ53SZtCk8vup8Wi-'
        output = home + '/.deepface/weights/gender_model_weights.h5'
        gdown.download(url, output, quiet=False)

    gender_model.load_weights(home +
                              '/.deepface/weights/gender_model_weights.h5')

    return gender_model

    #--------------------------
# Merge positive and negative ones

df = pd.concat([positives, negatives]).reset_index(drop=True)

print(df.decision.value_counts())
df.file_x = "deepface/tests/dataset/" + df.file_x
df.file_y = "deepface/tests/dataset/" + df.file_y
# --------------------------
# DeepFace

from deepface import DeepFace
from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace, DeepID

pretrained_models = {}

pretrained_models["VGG-Face"] = VGGFace.loadModel()
print("VGG-Face loaded")
pretrained_models["Facenet"] = Facenet.loadModel()
print("Facenet loaded")
pretrained_models["OpenFace"] = OpenFace.loadModel()
print("OpenFace loaded")
pretrained_models["DeepFace"] = FbDeepFace.loadModel()
print("FbDeepFace loaded")
pretrained_models["DeepID"] = DeepID.loadModel()
print("DeepID loaded")

instances = df[["file_x", "file_y"]].values.tolist()

models = ['VGG-Face']
metrics = ["cosine"]
Esempio n. 13
0
#------------------------------

app = Flask(__name__)

#------------------------------

tic = time.time()

print("Loading Face Recognition Models...")

pbar = tqdm(range(0, 4), desc='Loading Face Recognition Models...')

for index in pbar:
    if index == 0:
        pbar.set_description("Loading VGG-Face")
        vggface_model = VGGFace.loadModel()
    elif index == 1:
        pbar.set_description("Loading OpenFace")
        openface_model = OpenFace.loadModel()
    elif index == 2:
        pbar.set_description("Loading Google FaceNet")
        facenet_model = Facenet.loadModel()
    elif index == 3:
        pbar.set_description("Loading Facebook DeepFace")
        deepface_model = FbDeepFace.loadModel()

toc = time.time()

print("Face recognition models are built in ", toc - tic, " seconds")

#------------------------------
Esempio n. 14
0
def verify(img1_path, img2_path
	, model_name ='VGG-Face', distance_metric = 'cosine'):
	
	tic = time.time()
	
	if os.path.isfile(img1_path) != True:
		raise ValueError("Confirm that ",img1_path," exists")
	
	if os.path.isfile(img2_path) != True:
		raise ValueError("Confirm that ",img2_path," exists")
		
	#-------------------------
	
	#print("Face verification will be applied on ",model_name," model and ",distance_metric," metric")
	
	functions.validateInputs(model_name, distance_metric)
	
	#-------------------------
	
	#tuned thresholds for model and metric pair
	threshold = functions.findThreshold(model_name, distance_metric)
	
	#-------------------------
	
	if model_name == 'VGG-Face':
		model = VGGFace.loadModel()
		input_shape = (224, 224)	
	
	elif model_name == 'OpenFace':
		model = OpenFace.loadModel()
		input_shape = (96, 96)
	
	elif model_name == 'Facenet':
		model = Facenet.loadModel()
		input_shape = (160, 160)
	
	#-------------------------
	#crop face
	
	img1 = functions.detectFace(img1_path, input_shape)
	img2 = functions.detectFace(img2_path, input_shape)
	
	#-------------------------
	#TO-DO: Apply face alignment here. Experiments show that aligment increases accuracy 1%.
	
	#-------------------------
	#find embeddings
	
	img1_representation = model.predict(img1)[0,:]
	img2_representation = model.predict(img2)[0,:]
	
	#-------------------------
	#find distances between embeddings
	
	if distance_metric == 'cosine':
		distance = dst.findCosineDistance(img1_representation, img2_representation)
	elif distance_metric == 'euclidean':
		distance = dst.findEuclideanDistance(img1_representation, img2_representation)
	elif distance_metric == 'euclidean_l2':
		distance = dst.findEuclideanDistance(dst.l2_normalize(img1_representation), dst.l2_normalize(img2_representation))
	
	#-------------------------
	#decision
	
	if distance <= threshold:
		identified =  True
		message = "The both face photos are same person."
	else:
		identified =  False
		message = "The both face photos are not same person!"
	
	#-------------------------
	
	plot = False
	
	if plot:
		label = "Distance is "+str(round(distance, 2))+"\nwhereas max threshold is "+ str(threshold)+ ".\n"+ message
		
		fig = plt.figure()
		fig.add_subplot(1,2, 1)
		plt.imshow(img1[0][:, :, ::-1])
		plt.xticks([]); plt.yticks([])
		fig.add_subplot(1,2, 2)
		plt.imshow(img2[0][:, :, ::-1])
		plt.xticks([]); plt.yticks([])
		fig.suptitle(label, fontsize=17)
		plt.show(block=True)
	
	#-------------------------
	
	toc = time.time()
	
	#print("identification lasts ",toc-tic," seconds")
	
	#Return a tuple. First item is the identification result based on tuned threshold.
	#Second item is the threshold. You might want to customize this threshold to identify faces.
	return (identified, distance, threshold)
Esempio n. 15
0
#!pip install deepface
from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace
from deepface.commons import functions

import matplotlib.pyplot as plt
import numpy as np

#----------------------------------------------
#build face recognition model

model = VGGFace.loadModel()
#model = Facenet.loadModel()
#model = OpenFace.loadModel()
#model = FbDeepFace.loadModel()

input_shape = model.layers[0].input_shape[1:3]

print("model input shape: ", model.layers[0].input_shape[1:])
print("model output shape: ", model.layers[-1].input_shape[-1])

#----------------------------------------------
#load images and find embeddings

#img1 = functions.detectFace("dataset/img1.jpg", input_shape)
img1 = functions.preprocess_face("dataset/img1.jpg", input_shape)
img1_representation = model.predict(img1)[0, :]

#img2 = functions.detectFace("dataset/img3.jpg", input_shape)
img2 = functions.preprocess_face("dataset/img3.jpg", input_shape)
img2_representation = model.predict(img2)[0, :]
Esempio n. 16
0
for i in range(0, len(dataset)):
    item = resp_obj['pair_%s' % (i + 1)]
    verified = item["verified"]
    score = item["score"]
    print(verified)

#-----------------------------------
print("--------------------------")

print("Pre-trained ensemble method - find")

from deepface import DeepFace
from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace

model = {}
model["VGG-Face"] = VGGFace.loadModel()
print("VGG loaded")
model["Facenet"] = Facenet.loadModel()
print("Facenet loaded")
model["OpenFace"] = OpenFace.loadModel()
print("OpenFace loaded")
model["DeepFace"] = FbDeepFace.loadModel()
print("DeepFace loaded")

df = DeepFace.find("dataset/img1.jpg",
                   db_path="dataset",
                   model_name='Ensemble',
                   model=model,
                   enforce_detection=False)

print(df)
Esempio n. 17
0
def find(img_path,
         db_path,
         model_name='VGG-Face',
         distance_metric='cosine',
         model=None,
         enforce_detection=True):

    tic = time.time()

    if type(img_path) == list:
        bulkProcess = True
        img_paths = img_path.copy()
    else:
        bulkProcess = False
        img_paths = [img_path]

    if os.path.isdir(db_path) == True:

        #---------------------------------------

        if model == None:
            if model_name == 'VGG-Face':
                print("Using VGG-Face model backend and", distance_metric,
                      "distance.")
                model = VGGFace.loadModel()
            elif model_name == 'OpenFace':
                print("Using OpenFace model backend", distance_metric,
                      "distance.")
                model = OpenFace.loadModel()
            elif model_name == 'Facenet':
                print("Using Facenet model backend", distance_metric,
                      "distance.")
                model = Facenet.loadModel()
            elif model_name == 'DeepFace':
                print("Using FB DeepFace model backend", distance_metric,
                      "distance.")
                model = FbDeepFace.loadModel()
            else:
                raise ValueError("Invalid model_name passed - ", model_name)
        else:  #model != None
            print("Already built model is passed")

        input_shape = model.layers[0].input_shape[1:3]
        threshold = functions.findThreshold(model_name, distance_metric)

        #---------------------------------------

        file_name = "representations_%s.pkl" % (model_name)
        file_name = file_name.replace("-", "_").lower()

        if path.exists(db_path + "/" + file_name):

            print(
                "WARNING: Representations for images in ", db_path,
                " folder were previously stored in ", file_name,
                ". If you added new instances after this file creation, then please delete this file and call find function again. It will create it again."
            )

            f = open(db_path + '/' + file_name, 'rb')
            representations = pickle.load(f)

            print("There are ", len(representations),
                  " representations found in ", file_name)

        else:
            employees = []

            for r, d, f in os.walk(
                    db_path):  # r=root, d=directories, f = files
                for file in f:
                    if ('.jpg' in file):
                        exact_path = r + "/" + file
                        employees.append(exact_path)

            if len(employees) == 0:
                raise ValueError("There is no image in ", db_path, " folder!")

            #------------------------
            #find representations for db images

            representations = []

            pbar = tqdm(range(0, len(employees)),
                        desc='Finding representations')

            #for employee in employees:
            for index in pbar:
                employee = employees[index]
                img = functions.detectFace(employee,
                                           input_shape,
                                           enforce_detection=enforce_detection)
                representation = model.predict(img)[0, :]

                instance = []
                instance.append(employee)
                instance.append(representation)

                representations.append(instance)

            f = open(db_path + '/' + file_name, "wb")
            pickle.dump(representations, f)
            f.close()

            print(
                "Representations stored in ", db_path, "/", file_name,
                " file. Please delete this file when you add new identities in your database."
            )

        #----------------------------
        #we got representations for database
        df = pd.DataFrame(representations,
                          columns=["identity", "representation"])
        df_base = df.copy()

        resp_obj = []

        global_pbar = tqdm(range(0, len(img_paths)), desc='Analyzing')
        for j in global_pbar:
            img_path = img_paths[j]

            #find representation for passed image
            img = functions.detectFace(img_path,
                                       input_shape,
                                       enforce_detection=enforce_detection)
            target_representation = model.predict(img)[0, :]

            distances = []
            for index, instance in df.iterrows():
                source_representation = instance["representation"]

                if distance_metric == 'cosine':
                    distance = dst.findCosineDistance(source_representation,
                                                      target_representation)
                elif distance_metric == 'euclidean':
                    distance = dst.findEuclideanDistance(
                        source_representation, target_representation)
                elif distance_metric == 'euclidean_l2':
                    distance = dst.findEuclideanDistance(
                        dst.l2_normalize(source_representation),
                        dst.l2_normalize(target_representation))
                else:
                    raise ValueError("Invalid distance_metric passed - ",
                                     distance_metric)

                distances.append(distance)

            df["distance"] = distances
            df = df.drop(columns=["representation"])
            df = df[df.distance <= threshold]

            df = df.sort_values(by=["distance"],
                                ascending=True).reset_index(drop=True)
            resp_obj.append(df)
            df = df_base.copy()  #restore df for the next iteration

        toc = time.time()

        print("find function lasts ", toc - tic, " seconds")

        if len(resp_obj) == 1:
            return resp_obj[0]

        return resp_obj

    else:
        raise ValueError("Passed db_path does not exist!")

    return None
Esempio n. 18
0
def verify(img1_path,
           img2_path,
           model_name='VGG-Face',
           distance_metric='cosine',
           plot=False):

    tic = time.time()

    if os.path.isfile(img1_path) != True:
        raise ValueError("Confirm that ", img1_path, " exists")

    if os.path.isfile(img2_path) != True:
        raise ValueError("Confirm that ", img2_path, " exists")

    #-------------------------

    #tuned thresholds for model and metric pair
    threshold = functions.findThreshold(model_name, distance_metric)

    #-------------------------

    if model_name == 'VGG-Face':
        print("Using VGG-Face model backend and", distance_metric, "distance.")
        model = VGGFace.loadModel()
        input_shape = (224, 224)

    elif model_name == 'OpenFace':
        print("Using OpenFace model backend", distance_metric, "distance.")
        model = OpenFace.loadModel()
        input_shape = (96, 96)

    elif model_name == 'Facenet':
        print("Using Facenet model backend", distance_metric, "distance.")
        model = Facenet.loadModel()
        input_shape = (160, 160)

    elif model_name == 'DeepFace':
        print("Using FB DeepFace model backend", distance_metric, "distance.")
        model = FbDeepFace.loadModel()
        input_shape = (152, 152)

    else:
        raise ValueError("Invalid model_name passed - ", model_name)

    #-------------------------
    #crop face

    img1 = functions.detectFace(img1_path, input_shape)
    img2 = functions.detectFace(img2_path, input_shape)

    #-------------------------
    #find embeddings

    img1_representation = model.predict(img1)[0, :]
    img2_representation = model.predict(img2)[0, :]

    #-------------------------
    #find distances between embeddings

    if distance_metric == 'cosine':
        distance = dst.findCosineDistance(img1_representation,
                                          img2_representation)
    elif distance_metric == 'euclidean':
        distance = dst.findEuclideanDistance(img1_representation,
                                             img2_representation)
    elif distance_metric == 'euclidean_l2':
        distance = dst.findEuclideanDistance(
            dst.l2_normalize(img1_representation),
            dst.l2_normalize(img2_representation))
    else:
        raise ValueError("Invalid distance_metric passed - ", distance_metric)

    #-------------------------
    #decision

    if distance <= threshold:
        identified = "true"
    else:
        identified = "false"

    #-------------------------

    if plot:
        label = "Verified: " + identified
        label += "\nThreshold: " + str(round(distance, 2))
        label += ", Max Threshold to Verify: " + str(threshold)
        label += "\nModel: " + model_name
        label += ", Similarity metric: " + distance_metric

        fig = plt.figure()
        fig.add_subplot(1, 2, 1)
        plt.imshow(img1[0][:, :, ::-1])
        plt.xticks([])
        plt.yticks([])
        fig.add_subplot(1, 2, 2)
        plt.imshow(img2[0][:, :, ::-1])
        plt.xticks([])
        plt.yticks([])
        fig.suptitle(label, fontsize=17)
        plt.show(block=True)

    #-------------------------

    toc = time.time()

    resp_obj = "{"
    resp_obj += "\"verified\": " + identified
    resp_obj += ", \"distance\": " + str(distance)
    resp_obj += ", \"max_threshold_to_verify\": " + str(threshold)
    resp_obj += ", \"model\": \"" + model_name + "\""
    resp_obj += ", \"similarity_metric\": \"" + distance_metric + "\""
    resp_obj += "}"

    resp_obj = json.loads(resp_obj)  #string to json

    #print("identification lasts ",toc-tic," seconds")

    return resp_obj
Esempio n. 19
0
def analysis(db_path, model_name, distance_metric, enable_face_analysis=True):

    input_shape = (224, 224)
    text_color = (255, 255, 255)

    employees = []
    #check passed db folder exists
    if os.path.isdir(db_path) == True:
        for r, d, f in os.walk(db_path):  # r=root, d=directories, f = files
            for file in f:
                if ('.jpg' in file):
                    #exact_path = os.path.join(r, file)
                    exact_path = r + "/" + file
                    #print(exact_path)
                    employees.append(exact_path)

    #------------------------

    if len(employees) > 0:
        if model_name == 'VGG-Face':
            print("Using VGG-Face model backend and", distance_metric,
                  "distance.")
            model = VGGFace.loadModel()
            input_shape = (224, 224)

        elif model_name == 'OpenFace':
            print("Using OpenFace model backend", distance_metric, "distance.")
            model = OpenFace.loadModel()
            input_shape = (96, 96)

        elif model_name == 'Facenet':
            print("Using Facenet model backend", distance_metric, "distance.")
            model = Facenet.loadModel()
            input_shape = (160, 160)

        elif model_name == 'DeepFace':
            print("Using FB DeepFace model backend", distance_metric,
                  "distance.")
            model = FbDeepFace.loadModel()
            input_shape = (152, 152)

        else:
            raise ValueError("Invalid model_name passed - ", model_name)
        #------------------------

        #tuned thresholds for model and metric pair
        threshold = functions.findThreshold(model_name, distance_metric)

    #------------------------
    #facial attribute analysis models

    if enable_face_analysis == True:

        tic = time.time()

        emotion_model = Emotion.loadModel()
        print("Emotion model loaded")

        age_model = Age.loadModel()
        print("Age model loaded")

        gender_model = Gender.loadModel()
        print("Gender model loaded")

        toc = time.time()

        print("Facial attibute analysis models loaded in ", toc - tic,
              " seconds")

    #------------------------

    #find embeddings for employee list

    tic = time.time()

    pbar = tqdm(range(0, len(employees)), desc='Finding embeddings')

    embeddings = []
    #for employee in employees:
    for index in pbar:
        employee = employees[index]
        pbar.set_description("Finding embedding for %s" %
                             (employee.split("/")[-1]))
        embedding = []
        img = functions.detectFace(employee, input_shape)
        img_representation = model.predict(img)[0, :]

        embedding.append(employee)
        embedding.append(img_representation)
        embeddings.append(embedding)

    df = pd.DataFrame(embeddings, columns=['employee', 'embedding'])
    df['distance_metric'] = distance_metric

    toc = time.time()

    print("Embeddings found for given data set in ", toc - tic, " seconds")

    #-----------------------

    time_threshold = 5
    frame_threshold = 5
    pivot_img_size = 112  #face recognition result image

    #-----------------------

    opencv_path = functions.get_opencv_path()
    face_detector_path = opencv_path + "haarcascade_frontalface_default.xml"
    face_cascade = cv2.CascadeClassifier(face_detector_path)

    #-----------------------

    freeze = False
    face_detected = False
    face_included_frames = 0  #freeze screen if face detected sequantially 5 frames
    freezed_frame = 0
    tic = time.time()

    cap = cv2.VideoCapture(0)  #webcam
    #cap = cv2.VideoCapture("C:/Users/IS96273/Desktop/skype-video-1.mp4") #video

    while (True):
        ret, img = cap.read()

        #cv2.namedWindow('img', cv2.WINDOW_FREERATIO)
        #cv2.setWindowProperty('img', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)

        raw_img = img.copy()
        resolution = img.shape

        resolution_x = img.shape[1]
        resolution_y = img.shape[0]

        if freeze == False:
            faces = face_cascade.detectMultiScale(img, 1.3, 5)

            if len(faces) == 0:
                face_included_frames = 0
        else:
            faces = []

        detected_faces = []
        face_index = 0
        for (x, y, w, h) in faces:
            if w > 130:  #discard small detected faces

                face_detected = True
                if face_index == 0:
                    face_included_frames = face_included_frames + 1  #increase frame for a single face

                cv2.rectangle(img, (x, y), (x + w, y + h), (67, 67, 67),
                              1)  #draw rectangle to main image

                cv2.putText(img, str(frame_threshold - face_included_frames),
                            (int(x + w / 4), int(y + h / 1.5)),
                            cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 255, 255), 2)

                detected_face = img[int(y):int(y + h),
                                    int(x):int(x + w)]  #crop detected face

                #-------------------------------------

                detected_faces.append((x, y, w, h))
                face_index = face_index + 1

                #-------------------------------------

        if face_detected == True and face_included_frames == frame_threshold and freeze == False:
            freeze = True
            #base_img = img.copy()
            base_img = raw_img.copy()
            detected_faces_final = detected_faces.copy()
            tic = time.time()

        if freeze == True:

            toc = time.time()
            if (toc - tic) < time_threshold:

                if freezed_frame == 0:
                    freeze_img = base_img.copy()
                    #freeze_img = np.zeros(resolution, np.uint8) #here, np.uint8 handles showing white area issue

                    for detected_face in detected_faces_final:
                        x = detected_face[0]
                        y = detected_face[1]
                        w = detected_face[2]
                        h = detected_face[3]

                        cv2.rectangle(freeze_img, (x, y), (x + w, y + h),
                                      (67, 67, 67),
                                      1)  #draw rectangle to main image

                        #-------------------------------

                        #apply deep learning for custom_face

                        custom_face = base_img[y:y + h, x:x + w]

                        #-------------------------------
                        #facial attribute analysis

                        if enable_face_analysis == True:

                            gray_img = functions.detectFace(
                                custom_face, (48, 48), True)
                            emotion_labels = [
                                'Angry', 'Disgust', 'Fear', 'Happy', 'Sad',
                                'Surprise', 'Neutral'
                            ]
                            emotion_predictions = emotion_model.predict(
                                gray_img)[0, :]
                            sum_of_predictions = emotion_predictions.sum()

                            mood_items = []
                            for i in range(0, len(emotion_labels)):
                                mood_item = []
                                emotion_label = emotion_labels[i]
                                emotion_prediction = 100 * emotion_predictions[
                                    i] / sum_of_predictions
                                mood_item.append(emotion_label)
                                mood_item.append(emotion_prediction)
                                mood_items.append(mood_item)

                            emotion_df = pd.DataFrame(
                                mood_items, columns=["emotion", "score"])
                            emotion_df = emotion_df.sort_values(
                                by=["score"],
                                ascending=False).reset_index(drop=True)

                            #background of mood box

                            #transparency
                            overlay = freeze_img.copy()
                            opacity = 0.4

                            if x + w + pivot_img_size < resolution_x:
                                #right
                                cv2.rectangle(
                                    freeze_img
                                    #, (x+w,y+20)
                                    ,
                                    (x + w, y),
                                    (x + w + pivot_img_size, y + h),
                                    (64, 64, 64),
                                    cv2.FILLED)

                                cv2.addWeighted(overlay, opacity, freeze_img,
                                                1 - opacity, 0, freeze_img)

                            elif x - pivot_img_size > 0:
                                #left
                                cv2.rectangle(
                                    freeze_img
                                    #, (x-pivot_img_size,y+20)
                                    ,
                                    (x - pivot_img_size, y),
                                    (x, y + h),
                                    (64, 64, 64),
                                    cv2.FILLED)

                                cv2.addWeighted(overlay, opacity, freeze_img,
                                                1 - opacity, 0, freeze_img)

                            for index, instance in emotion_df.iterrows():
                                emotion_label = "%s " % (instance['emotion'])
                                emotion_score = instance['score'] / 100

                                bar_x = 35  #this is the size if an emotion is 100%
                                bar_x = int(bar_x * emotion_score)

                                if x + w + pivot_img_size < resolution_x:

                                    text_location_y = y + 20 + (index + 1) * 20
                                    text_location_x = x + w

                                    if text_location_y < y + h:
                                        cv2.putText(
                                            freeze_img, emotion_label,
                                            (text_location_x, text_location_y),
                                            cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                            (255, 255, 255), 1)

                                        cv2.rectangle(
                                            freeze_img, (x + w + 70, y + 13 +
                                                         (index + 1) * 20),
                                            (x + w + 70 + bar_x, y + 13 +
                                             (index + 1) * 20 + 5),
                                            (255, 255, 255), cv2.FILLED)

                                elif x - pivot_img_size > 0:

                                    text_location_y = y + 20 + (index + 1) * 20
                                    text_location_x = x - pivot_img_size

                                    if text_location_y <= y + h:
                                        cv2.putText(
                                            freeze_img, emotion_label,
                                            (text_location_x, text_location_y),
                                            cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                            (255, 255, 255), 1)

                                        cv2.rectangle(
                                            freeze_img,
                                            (x - pivot_img_size + 70, y + 13 +
                                             (index + 1) * 20),
                                            (x - pivot_img_size + 70 + bar_x,
                                             y + 13 + (index + 1) * 20 + 5),
                                            (255, 255, 255), cv2.FILLED)

                            #-------------------------------

                            face_224 = functions.detectFace(
                                custom_face, (224, 224), False)

                            age_predictions = age_model.predict(face_224)[0, :]
                            apparent_age = Age.findApparentAge(age_predictions)

                            #-------------------------------

                            gender_prediction = gender_model.predict(face_224)[
                                0, :]

                            if np.argmax(gender_prediction) == 0:
                                gender = "W"
                            elif np.argmax(gender_prediction) == 1:
                                gender = "M"

                            #print(str(int(apparent_age))," years old ", dominant_emotion, " ", gender)

                            analysis_report = str(
                                int(apparent_age)) + " " + gender

                            #-------------------------------

                            info_box_color = (46, 200, 255)

                            #top
                            if y - pivot_img_size + int(
                                    pivot_img_size / 5) > 0:

                                triangle_coordinates = np.array([
                                    (x + int(w / 2), y),
                                    (x + int(w / 2) - int(w / 10),
                                     y - int(pivot_img_size / 3)),
                                    (x + int(w / 2) + int(w / 10),
                                     y - int(pivot_img_size / 3))
                                ])

                                cv2.drawContours(freeze_img,
                                                 [triangle_coordinates], 0,
                                                 info_box_color, -1)

                                cv2.rectangle(
                                    freeze_img,
                                    (x + int(w / 5), y - pivot_img_size +
                                     int(pivot_img_size / 5)),
                                    (x + w - int(w / 5),
                                     y - int(pivot_img_size / 3)),
                                    info_box_color, cv2.FILLED)

                                cv2.putText(freeze_img, analysis_report,
                                            (x + int(w / 3.5),
                                             y - int(pivot_img_size / 2.1)),
                                            cv2.FONT_HERSHEY_SIMPLEX, 1,
                                            (0, 111, 255), 2)

                            #bottom
                            elif y + h + pivot_img_size - int(
                                    pivot_img_size / 5) < resolution_y:

                                triangle_coordinates = np.array([
                                    (x + int(w / 2), y + h),
                                    (x + int(w / 2) - int(w / 10),
                                     y + h + int(pivot_img_size / 3)),
                                    (x + int(w / 2) + int(w / 10),
                                     y + h + int(pivot_img_size / 3))
                                ])

                                cv2.drawContours(freeze_img,
                                                 [triangle_coordinates], 0,
                                                 info_box_color, -1)

                                cv2.rectangle(
                                    freeze_img,
                                    (x + int(w / 5),
                                     y + h + int(pivot_img_size / 3)),
                                    (x + w - int(w / 5), y + h +
                                     pivot_img_size - int(pivot_img_size / 5)),
                                    info_box_color, cv2.FILLED)

                                cv2.putText(freeze_img, analysis_report,
                                            (x + int(w / 3.5), y + h +
                                             int(pivot_img_size / 1.5)),
                                            cv2.FONT_HERSHEY_SIMPLEX, 1,
                                            (0, 111, 255), 2)

                        #-------------------------------
                        #face recognition

                        custom_face = functions.detectFace(
                            custom_face, input_shape)

                        #check detectFace function handled
                        if custom_face.shape[1:3] == input_shape:
                            if df.shape[
                                    0] > 0:  #if there are images to verify, apply face recognition
                                img1_representation = model.predict(
                                    custom_face)[0, :]

                                #print(freezed_frame," - ",img1_representation[0:5])

                                def findDistance(row):
                                    distance_metric = row['distance_metric']
                                    img2_representation = row['embedding']

                                    distance = 1000  #initialize very large value
                                    if distance_metric == 'cosine':
                                        distance = dst.findCosineDistance(
                                            img1_representation,
                                            img2_representation)
                                    elif distance_metric == 'euclidean':
                                        distance = dst.findEuclideanDistance(
                                            img1_representation,
                                            img2_representation)
                                    elif distance_metric == 'euclidean_l2':
                                        distance = dst.findEuclideanDistance(
                                            dst.l2_normalize(
                                                img1_representation),
                                            dst.l2_normalize(
                                                img2_representation))

                                    return distance

                                df['distance'] = df.apply(findDistance, axis=1)
                                df = df.sort_values(by=["distance"])

                                candidate = df.iloc[0]
                                employee_name = candidate['employee']
                                best_distance = candidate['distance']

                                if best_distance <= threshold:
                                    #print(employee_name)
                                    display_img = cv2.imread(employee_name)

                                    display_img = cv2.resize(
                                        display_img,
                                        (pivot_img_size, pivot_img_size))

                                    label = employee_name.split(
                                        "/")[-1].replace(".jpg", "")
                                    label = re.sub('[0-9]', '', label)

                                    try:
                                        if y - pivot_img_size > 0 and x + w + pivot_img_size < resolution_x:
                                            #top right
                                            freeze_img[
                                                y - pivot_img_size:y,
                                                x + w:x + w +
                                                pivot_img_size] = display_img

                                            overlay = freeze_img.copy()
                                            opacity = 0.4
                                            cv2.rectangle(
                                                freeze_img, (x + w, y),
                                                (x + w + pivot_img_size,
                                                 y + 20), (46, 200, 255),
                                                cv2.FILLED)
                                            cv2.addWeighted(
                                                overlay, opacity, freeze_img,
                                                1 - opacity, 0, freeze_img)

                                            cv2.putText(
                                                freeze_img, label,
                                                (x + w, y + 10),
                                                cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                                text_color, 1)

                                            #connect face and text
                                            cv2.line(freeze_img,
                                                     (x + int(w / 2), y),
                                                     (x + 3 * int(w / 4), y -
                                                      int(pivot_img_size / 2)),
                                                     (67, 67, 67), 1)
                                            cv2.line(freeze_img,
                                                     (x + 3 * int(w / 4), y -
                                                      int(pivot_img_size / 2)),
                                                     (x + w, y -
                                                      int(pivot_img_size / 2)),
                                                     (67, 67, 67), 1)

                                        elif y + h + pivot_img_size < resolution_y and x - pivot_img_size > 0:
                                            #bottom left
                                            freeze_img[
                                                y + h:y + h + pivot_img_size,
                                                x -
                                                pivot_img_size:x] = display_img

                                            overlay = freeze_img.copy()
                                            opacity = 0.4
                                            cv2.rectangle(
                                                freeze_img,
                                                (x - pivot_img_size,
                                                 y + h - 20), (x, y + h),
                                                (46, 200, 255), cv2.FILLED)
                                            cv2.addWeighted(
                                                overlay, opacity, freeze_img,
                                                1 - opacity, 0, freeze_img)

                                            cv2.putText(
                                                freeze_img, label,
                                                (x - pivot_img_size,
                                                 y + h - 10),
                                                cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                                text_color, 1)

                                            #connect face and text
                                            cv2.line(freeze_img,
                                                     (x + int(w / 2), y + h),
                                                     (x + int(w / 2) -
                                                      int(w / 4), y + h +
                                                      int(pivot_img_size / 2)),
                                                     (67, 67, 67), 1)
                                            cv2.line(freeze_img,
                                                     (x + int(w / 2) -
                                                      int(w / 4), y + h +
                                                      int(pivot_img_size / 2)),
                                                     (x, y + h +
                                                      int(pivot_img_size / 2)),
                                                     (67, 67, 67), 1)

                                        elif y - pivot_img_size > 0 and x - pivot_img_size > 0:
                                            #top left
                                            freeze_img[
                                                y - pivot_img_size:y, x -
                                                pivot_img_size:x] = display_img

                                            overlay = freeze_img.copy()
                                            opacity = 0.4
                                            cv2.rectangle(
                                                freeze_img,
                                                (x - pivot_img_size, y),
                                                (x, y + 20), (46, 200, 255),
                                                cv2.FILLED)
                                            cv2.addWeighted(
                                                overlay, opacity, freeze_img,
                                                1 - opacity, 0, freeze_img)

                                            cv2.putText(
                                                freeze_img, label,
                                                (x - pivot_img_size, y + 10),
                                                cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                                text_color, 1)

                                            #connect face and text
                                            cv2.line(
                                                freeze_img,
                                                (x + int(w / 2), y),
                                                (x + int(w / 2) - int(w / 4),
                                                 y - int(pivot_img_size / 2)),
                                                (67, 67, 67), 1)
                                            cv2.line(
                                                freeze_img,
                                                (x + int(w / 2) - int(w / 4),
                                                 y - int(pivot_img_size / 2)),
                                                (x,
                                                 y - int(pivot_img_size / 2)),
                                                (67, 67, 67), 1)

                                        elif x + w + pivot_img_size < resolution_x and y + h + pivot_img_size < resolution_y:
                                            #bottom righ
                                            freeze_img[
                                                y + h:y + h + pivot_img_size,
                                                x + w:x + w +
                                                pivot_img_size] = display_img

                                            overlay = freeze_img.copy()
                                            opacity = 0.4
                                            cv2.rectangle(
                                                freeze_img,
                                                (x + w, y + h - 20),
                                                (x + w + pivot_img_size,
                                                 y + h), (46, 200, 255),
                                                cv2.FILLED)
                                            cv2.addWeighted(
                                                overlay, opacity, freeze_img,
                                                1 - opacity, 0, freeze_img)

                                            cv2.putText(
                                                freeze_img, label,
                                                (x + w, y + h - 10),
                                                cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                                text_color, 1)

                                            #connect face and text
                                            cv2.line(freeze_img,
                                                     (x + int(w / 2), y + h),
                                                     (x + int(w / 2) +
                                                      int(w / 4), y + h +
                                                      int(pivot_img_size / 2)),
                                                     (67, 67, 67), 1)
                                            cv2.line(freeze_img,
                                                     (x + int(w / 2) +
                                                      int(w / 4), y + h +
                                                      int(pivot_img_size / 2)),
                                                     (x + w, y + h +
                                                      int(pivot_img_size / 2)),
                                                     (67, 67, 67), 1)
                                    except Exception as err:
                                        print(str(err))

                        tic = time.time(
                        )  #in this way, freezed image can show 5 seconds

                        #-------------------------------

                time_left = int(time_threshold - (toc - tic) + 1)

                cv2.rectangle(freeze_img, (10, 10), (90, 50), (67, 67, 67),
                              -10)
                cv2.putText(freeze_img, str(time_left), (40, 40),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1)

                cv2.imshow('img', freeze_img)

                freezed_frame = freezed_frame + 1
            else:
                face_detected = False
                face_included_frames = 0
                freeze = False
                freezed_frame = 0

        else:
            cv2.imshow('img', img)

        if cv2.waitKey(1) & 0xFF == ord('q'):  #press q to quit
            break

    #kill open cv things
    cap.release()
    cv2.destroyAllWindows()
def corridor_enviornment(path=0, gender="man"):

    #Import necessary libraries

    import numpy as np
    import time
    import cv2
    import requests
    import threading
    import configparser
    from datetime import datetime
    from keras.preprocessing.image import img_to_array
    from keras.models import load_model
    from keras.utils import get_file
    import os
    import cvlib as cv
    from deepface.basemodels import VGGFace

    import os
    from pathlib import Path
    import gdown
    import numpy as np
    from keras.models import Model, Sequential
    from keras.layers import Convolution2D, Flatten, Activation

    def telegram():

        config = configparser.ConfigParser()
        config.read('DATA/Keys/config.ini')

        config_viewer = config.items('TOKEN')
        token = config_viewer[0][1]
        up_url = config_viewer[1][1]

        print("start")

        try:

            time.sleep(2)

            token = str(token)
            chat_id = str(up_url)  # chat id
            file = 'Fraud.jpg'

            url = f"https://api.telegram.org/bot{token}/sendPhoto"

            print(url)
            files = {}
            files["photo"] = open(file, "rb")
            print(requests.get(url, params={"chat_id": chat_id}, files=files))
        except:
            pass

        print("end")

    model = VGGFace.baseModel()

    # --------------------------

    classes = 2
    base_model_output = Sequential()
    base_model_output = Convolution2D(classes, (1, 1), name='predictions')(
        model.layers[-4].output)
    base_model_output = Flatten()(base_model_output)
    base_model_output = Activation('softmax')(base_model_output)

    gender_model = Model(inputs=model.input, outputs=base_model_output)

    # load model
    gender_model.load_weights('Data/Models/gender_model_weights.h5')
    net = gender_model

    print(gender)

    classes = ['woman', 'man']

    unauthorised = 0

    if gender == "man":
        var_male = (0, 255, 0)
        var_female = (0, 0, 255)

    elif gender == "woman":
        var_male = (0, 0, 255)
        var_female = (0, 255, 0)

    video_capture = cv2.VideoCapture(path)

    font = cv2.FONT_HERSHEY_SIMPLEX

    man = 0
    woman = 0
    telegx = 0
    iter = 0

    while (True):
        #Read each frame and flip it, and convert to grayscale
        ret, frame = video_capture.read()
        frame = cv2.flip(frame, 1)
        (H, W) = frame.shape[:2]
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        cv2.putText(frame, "INTELEGIX (Washroom Corridor Monitoring System)",
                    (400, 40), font, 0.7, (0, 0, 0), 2)
        cv2.rectangle(frame, (20, 50), (W - 20, 15), (0, 0, 0), 2)

        now = datetime.now()

        current_time = now.strftime("%H:%M:%S")

        tot_str = "Washroom Corridor for : " + str(gender).capitalize()
        high_str = "Male Detected : " + str(man)
        low_str = "Female Detected : " + str(woman)
        safe_str = "Total Persons: " + str(man + woman)

        man = 0
        woman = 0
        unauthorised = 0

        sub_img = frame[H - 100:H, 0:260]
        black_rect = np.ones(sub_img.shape, dtype=np.uint8) * 0

        res = cv2.addWeighted(sub_img, 0.8, black_rect, 0.2, 1.0)

        frame[H - 100:H, 0:260] = res

        cv2.putText(frame, tot_str, (10, H - 80), font, 0.5, (255, 255, 255),
                    1)
        cv2.putText(frame, high_str, (10, H - 55), font, 0.5, (0, 255, 0), 1)
        cv2.putText(frame, low_str, (10, H - 30), font, 0.5, (0, 120, 255), 1)
        cv2.putText(frame, safe_str, (10, H - 5), font, 0.5, (0, 0, 150), 1)

        now = datetime.now()
        # cv2.imwrite(str("Data/Saved_Images/CLASS_ENVIRONMENT/") + str(now.strftime("%Y%m%d%H%M%S") + str(".jpg")), img)

        # cv2.putText(img, str(now.strftime("%d-%m-%Y% %H:%M:%S")), (W-10, H - 5),
        #             font, 0.5, (0, 0, 150), 1)
        timex = str(now.strftime("%d/%m/%Y %H:%M:%S"))
        cv2.putText(frame, timex, (W - 200, H - 10), font, 0.5,
                    (255, 255, 255), 1)

        # apply face detection
        face, confidence = cv.detect_face(frame)

        print(face)
        print(confidence)

        # loop through detected faces
        for idx, f in enumerate(face):

            # get corner points of face rectangle
            (startX, startY) = f[0], f[1]
            (endX, endY) = f[2], f[3]

            # # draw rectangle over face
            # cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 255, 0), 2)

            # crop the detected face region
            face_crop = np.copy(frame[startY:endY, startX:endX])

            if (face_crop.shape[0]) < 10 or (face_crop.shape[1]) < 10:
                continue

            # preprocessing for gender detection model
            face_crop = cv2.resize(face_crop, (224, 224))
            face_crop = face_crop.astype("float") / 255.0
            face_crop = img_to_array(face_crop)
            face_crop = np.expand_dims(face_crop, axis=0)

            # apply gender detection on face
            conf = net.predict(face_crop)[0]
            print(conf)
            print(classes)

            # get label with max accuracy
            idx = np.argmax(conf)
            label = classes[idx]
            print(label)

            print(label, gender)
            if str(label) != str(gender):
                unauthorised += 1

            if label == "man":
                # draw rectangle over face
                cv2.rectangle(frame, (startX, startY), (endX, endY), var_male,
                              2)

                label = "{}: {:.2f}%".format(label, conf[idx] * 100)

                Y = startY - 10 if startY - 10 > 10 else startY + 10

                # write label and confidence above face rectangle
                cv2.putText(frame, label, (startX, Y), font, 0.7, var_male, 2)
                man += 1

            if label == "woman":

                # draw rectangle over face
                cv2.rectangle(frame, (startX, startY), (endX, endY),
                              var_female, 2)

                label = "{}: {:.2f}%".format(label, conf[idx] * 100)

                Y = startY - 10 if startY - 10 > 10 else startY + 10

                # write label and confidence above face rectangle
                cv2.putText(frame, label, (startX, Y), font, 0.7, var_female,
                            2)

                woman += 1

        if unauthorised == 0:

            #image = draw_outputs(img, (boxes, scores, classes, nums), class_names, color=(0, 255, 0))
            cv2.circle(frame, (25, 80), 10, (0, 255, 0), -1)
            cv2.putText(frame, "All Ok", (50, 85), font, 0.5, (0, 255, 0), 2)

        else:

            if unauthorised > 0:
                #image = draw_outputs(img, (boxes, scores, classes, nums), class_names, color=(0, 0, 255))
                cv2.circle(frame, (25, 80), 10, (0, 0, 255), -1)
                cv2.putText(
                    frame,
                    "Unrestricted Access at " + str(gender) + " washroom",
                    (50, 85), font, 0.5, (0, 0, 255), 2)

                telegx += 1
                print(telegx)
                if telegx > 3:
                    cv2.imwrite("Fraud.jpg", frame)
                    threading.Thread(target=telegram).start()
                    telegx = 0

                    now = datetime.now()
                    cv2.imwrite(
                        str("Data/Saved_Images/CORRIDOR_ENVIRONMENT/") +
                        str(now.strftime("%Y%m%d%H%M%S") + str(".jpg")), frame)

        cv2.namedWindow("Output", cv2.WINDOW_NORMAL)
        cv2.setWindowProperty("Output", cv2.WND_PROP_FULLSCREEN,
                              cv2.WINDOW_FULLSCREEN)
        cv2.imshow("Output", frame)

        if (cv2.waitKey(1) & 0xFF == ord('q')):
            break

    #Finally when video capture is over, release the video capture and destroyAllWindows
    video_capture.release()
    cv2.destroyAllWindows()


# if __name__=="__main__":
#     corridor_enviornment(path=0,gender="man")
Esempio n. 21
0
def verify(img1_path,
           img2_path='',
           model_name='VGG-Face',
           distance_metric='cosine',
           model=None,
           enforce_detection=True):

    tic = time.time()

    if type(img1_path) == list:
        bulkProcess = True
        img_list = img1_path.copy()
    else:
        bulkProcess = False
        img_list = [[img1_path, img2_path]]

    #------------------------------

    if model == None:
        if model_name == 'VGG-Face':
            print("Using VGG-Face model backend and", distance_metric,
                  "distance.")
            model = VGGFace.loadModel()

        elif model_name == 'OpenFace':
            print("Using OpenFace model backend", distance_metric, "distance.")
            model = OpenFace.loadModel()

        elif model_name == 'Facenet':
            print("Using Facenet model backend", distance_metric, "distance.")
            model = Facenet.loadModel()

        elif model_name == 'DeepFace':
            print("Using FB DeepFace model backend", distance_metric,
                  "distance.")
            model = FbDeepFace.loadModel()

        else:
            raise ValueError("Invalid model_name passed - ", model_name)
    else:  #model != None
        print("Already built model is passed")

    #------------------------------
    #face recognition models have different size of inputs
    input_shape = model.layers[0].input_shape[1:3]

    #------------------------------

    #tuned thresholds for model and metric pair
    threshold = functions.findThreshold(model_name, distance_metric)

    #------------------------------
    pbar = tqdm(range(0, len(img_list)), desc='Verification')

    resp_objects = []

    #for instance in img_list:
    for index in pbar:

        instance = img_list[index]

        if type(instance) == list and len(instance) >= 2:
            img1_path = instance[0]
            img2_path = instance[1]

            #----------------------
            #crop and align faces

            img1 = functions.detectFace(img1_path,
                                        input_shape,
                                        enforce_detection=enforce_detection)
            img2 = functions.detectFace(img2_path,
                                        input_shape,
                                        enforce_detection=enforce_detection)

            #----------------------
            #find embeddings

            img1_representation = model.predict(img1)[0, :]
            img2_representation = model.predict(img2)[0, :]

            #----------------------
            #find distances between embeddings

            if distance_metric == 'cosine':
                distance = dst.findCosineDistance(img1_representation,
                                                  img2_representation)
            elif distance_metric == 'euclidean':
                distance = dst.findEuclideanDistance(img1_representation,
                                                     img2_representation)
            elif distance_metric == 'euclidean_l2':
                distance = dst.findEuclideanDistance(
                    dst.l2_normalize(img1_representation),
                    dst.l2_normalize(img2_representation))
            else:
                raise ValueError("Invalid distance_metric passed - ",
                                 distance_metric)

            #----------------------
            #decision

            if distance <= threshold:
                identified = "true"
            else:
                identified = "false"

            #----------------------
            #response object

            resp_obj = "{"
            resp_obj += "\"verified\": " + identified
            resp_obj += ", \"distance\": " + str(distance)
            resp_obj += ", \"max_threshold_to_verify\": " + str(threshold)
            resp_obj += ", \"model\": \"" + model_name + "\""
            resp_obj += ", \"similarity_metric\": \"" + distance_metric + "\""
            resp_obj += "}"

            resp_obj = json.loads(resp_obj)  #string to json

            if bulkProcess == True:
                resp_objects.append(resp_obj)
            else:
                #K.clear_session()
                return resp_obj
            #----------------------

        else:
            raise ValueError("Invalid arguments passed to verify function: ",
                             instance)

    #-------------------------

    toc = time.time()

    #print("identification lasts ",toc-tic," seconds")

    if bulkProcess == True:
        resp_obj = "{"

        for i in range(0, len(resp_objects)):
            resp_item = json.dumps(resp_objects[i])

            if i > 0:
                resp_obj += ", "

            resp_obj += "\"pair_" + str(i + 1) + "\": " + resp_item
        resp_obj += "}"
        resp_obj = json.loads(resp_obj)
        return resp_obj
Esempio n. 22
0
def find(img_path,
         db_path,
         model_name='VGG-Face',
         distance_metric='cosine',
         model=None,
         enforce_detection=True):

    tic = time.time()

    if type(img_path) == list:
        bulkProcess = True
        img_paths = img_path.copy()
    else:
        bulkProcess = False
        img_paths = [img_path]

    if os.path.isdir(db_path) == True:

        #---------------------------------------

        if model == None:
            if model_name == 'VGG-Face':
                print("Using VGG-Face model backend and", distance_metric,
                      "distance.")
                model = VGGFace.loadModel()
            elif model_name == 'OpenFace':
                print("Using OpenFace model backend", distance_metric,
                      "distance.")
                model = OpenFace.loadModel()
            elif model_name == 'Facenet':
                print("Using Facenet model backend", distance_metric,
                      "distance.")
                model = Facenet.loadModel()
            elif model_name == 'DeepFace':
                print("Using FB DeepFace model backend", distance_metric,
                      "distance.")
                model = FbDeepFace.loadModel()
            elif model_name == 'DeepID':
                print("Using DeepID model backend", distance_metric,
                      "distance.")
                model = DeepID.loadModel()
            elif model_name == 'Ensemble':

                print("Ensemble learning enabled")
                #TODO: include DeepID in ensemble method

                import lightgbm as lgb  #lightgbm==2.3.1

                model_names = ['VGG-Face', 'Facenet', 'OpenFace', 'DeepFace']
                metric_names = ['cosine', 'euclidean', 'euclidean_l2']
                models = {}

                pbar = tqdm(range(0, len(model_names)),
                            desc='Face recognition models')

                for index in pbar:
                    if index == 0:
                        pbar.set_description("Loading VGG-Face")
                        models['VGG-Face'] = VGGFace.loadModel()
                    elif index == 1:
                        pbar.set_description("Loading FaceNet")
                        models['Facenet'] = Facenet.loadModel()
                    elif index == 2:
                        pbar.set_description("Loading OpenFace")
                        models['OpenFace'] = OpenFace.loadModel()
                    elif index == 3:
                        pbar.set_description("Loading DeepFace")
                        models['DeepFace'] = FbDeepFace.loadModel()

            else:
                raise ValueError("Invalid model_name passed - ", model_name)
        else:  #model != None
            print("Already built model is passed")

            if model_name == 'Ensemble':

                #validate model dictionary because it might be passed from input as pre-trained

                found_models = []
                for key, value in model.items():
                    found_models.append(key)

                if ('VGG-Face'
                        in found_models) and ('Facenet' in found_models) and (
                            'OpenFace' in found_models) and ('DeepFace'
                                                             in found_models):
                    print("Ensemble learning will be applied for ",
                          found_models, " models")
                else:
                    raise ValueError(
                        "You would like to apply ensemble learning and pass pre-built models but models must contain [VGG-Face, Facenet, OpenFace, DeepFace] but you passed "
                        + found_models)

        #threshold = functions.findThreshold(model_name, distance_metric)

        #---------------------------------------

        file_name = "representations_%s.pkl" % (model_name)
        file_name = file_name.replace("-", "_").lower()

        if path.exists(db_path + "/" + file_name):

            print(
                "WARNING: Representations for images in ", db_path,
                " folder were previously stored in ", file_name,
                ". If you added new instances after this file creation, then please delete this file and call find function again. It will create it again."
            )

            f = open(db_path + '/' + file_name, 'rb')
            representations = pickle.load(f)

            print("There are ", len(representations),
                  " representations found in ", file_name)

        else:
            employees = []

            for r, d, f in os.walk(
                    db_path):  # r=root, d=directories, f = files
                for file in f:
                    if ('.jpg' in file):
                        exact_path = r + "/" + file
                        employees.append(exact_path)

            if len(employees) == 0:
                raise ValueError("There is no image in ", db_path, " folder!")

            #------------------------
            #find representations for db images

            representations = []

            pbar = tqdm(range(0, len(employees)),
                        desc='Finding representations')

            #for employee in employees:
            for index in pbar:
                employee = employees[index]

                if model_name != 'Ensemble':

                    #input_shape = model.layers[0].input_shape[1:3] #my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue.

                    input_shape = model.layers[0].input_shape

                    if type(input_shape) == list:
                        input_shape = input_shape[0][1:3]
                    else:
                        input_shape = input_shape[1:3]

                    input_shape_x = input_shape[0]
                    input_shape_y = input_shape[1]

                    img = functions.detectFace(
                        employee, (input_shape_y, input_shape_x),
                        enforce_detection=enforce_detection)
                    representation = model.predict(img)[0, :]

                    instance = []
                    instance.append(employee)
                    instance.append(representation)

                else:  #ensemble learning

                    instance = []
                    instance.append(employee)

                    for j in model_names:
                        model = models[j]

                        #input_shape = model.layers[0].input_shape[1:3] #my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue.

                        input_shape = model.layers[0].input_shape

                        if type(input_shape) == list:
                            input_shape = input_shape[0][1:3]
                        else:
                            input_shape = input_shape[1:3]

                        input_shape_x = input_shape[0]
                        input_shape_y = input_shape[1]

                        img = functions.detectFace(
                            employee, (input_shape_y, input_shape_x),
                            enforce_detection=enforce_detection)
                        representation = model.predict(img)[0, :]
                        instance.append(representation)

                #-------------------------------

                representations.append(instance)

            f = open(db_path + '/' + file_name, "wb")
            pickle.dump(representations, f)
            f.close()

            print(
                "Representations stored in ", db_path, "/", file_name,
                " file. Please delete this file when you add new identities in your database."
            )

        #----------------------------
        #we got representations for database

        if model_name != 'Ensemble':
            df = pd.DataFrame(representations,
                              columns=["identity", "representation"])
        else:  #ensemble learning
            df = pd.DataFrame(representations,
                              columns=[
                                  "identity", "VGG-Face_representation",
                                  "Facenet_representation",
                                  "OpenFace_representation",
                                  "DeepFace_representation"
                              ])

        df_base = df.copy()

        resp_obj = []

        global_pbar = tqdm(range(0, len(img_paths)), desc='Analyzing')
        for j in global_pbar:
            img_path = img_paths[j]

            #find representation for passed image

            if model_name == 'Ensemble':
                for j in model_names:
                    model = models[j]

                    #input_shape = model.layers[0].input_shape[1:3] #my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue.

                    input_shape = model.layers[0].input_shape

                    if type(input_shape) == list:
                        input_shape = input_shape[0][1:3]
                    else:
                        input_shape = input_shape[1:3]

                    img = functions.detectFace(
                        img_path,
                        input_shape,
                        enforce_detection=enforce_detection)
                    target_representation = model.predict(img)[0, :]

                    for k in metric_names:
                        distances = []
                        for index, instance in df.iterrows():
                            source_representation = instance[
                                "%s_representation" % (j)]

                            if k == 'cosine':
                                distance = dst.findCosineDistance(
                                    source_representation,
                                    target_representation)
                            elif k == 'euclidean':
                                distance = dst.findEuclideanDistance(
                                    source_representation,
                                    target_representation)
                            elif k == 'euclidean_l2':
                                distance = dst.findEuclideanDistance(
                                    dst.l2_normalize(source_representation),
                                    dst.l2_normalize(target_representation))

                            distances.append(distance)

                        if j == 'OpenFace' and k == 'euclidean':
                            continue
                        else:
                            df["%s_%s" % (j, k)] = distances

                #----------------------------------

                feature_names = []
                for j in model_names:
                    for k in metric_names:
                        if j == 'OpenFace' and k == 'euclidean':
                            continue
                        else:
                            feature = '%s_%s' % (j, k)
                            feature_names.append(feature)

                #print(df[feature_names].head())

                x = df[feature_names].values

                #----------------------------------
                #lightgbm model
                home = str(Path.home())

                if os.path.isfile(
                        home +
                        '/.deepface/weights/face-recognition-ensemble-model.txt'
                ) != True:
                    print(
                        "face-recognition-ensemble-model.txt will be downloaded..."
                    )
                    url = 'https://raw.githubusercontent.com/serengil/deepface/master/deepface/models/face-recognition-ensemble-model.txt'
                    output = home + '/.deepface/weights/face-recognition-ensemble-model.txt'
                    gdown.download(url, output, quiet=False)

                ensemble_model_path = home + '/.deepface/weights/face-recognition-ensemble-model.txt'

                deepface_ensemble = lgb.Booster(model_file=ensemble_model_path)

                y = deepface_ensemble.predict(x)

                verified_labels = []
                scores = []
                for i in y:
                    verified = np.argmax(i) == 1
                    score = i[np.argmax(i)]

                    verified_labels.append(verified)
                    scores.append(score)

                df['verified'] = verified_labels
                df['score'] = scores

                df = df[df.verified == True]
                #df = df[df.score > 0.99] #confidence score
                df = df.sort_values(by=["score"],
                                    ascending=False).reset_index(drop=True)
                df = df[['identity', 'verified', 'score']]

                resp_obj.append(df)
                df = df_base.copy()  #restore df for the next iteration

                #----------------------------------

            if model_name != 'Ensemble':

                #input_shape = model.layers[0].input_shape[1:3] #my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue.

                input_shape = model.layers[0].input_shape

                if type(input_shape) == list:
                    input_shape = input_shape[0][1:3]
                else:
                    input_shape = input_shape[1:3]

                input_shape_x = input_shape[0]
                input_shape_y = input_shape[1]

                img = functions.detectFace(img_path,
                                           (input_shape_y, input_shape_x),
                                           enforce_detection=enforce_detection)
                target_representation = model.predict(img)[0, :]

                distances = []
                for index, instance in df.iterrows():
                    source_representation = instance["representation"]

                    if distance_metric == 'cosine':
                        distance = dst.findCosineDistance(
                            source_representation, target_representation)
                    elif distance_metric == 'euclidean':
                        distance = dst.findEuclideanDistance(
                            source_representation, target_representation)
                    elif distance_metric == 'euclidean_l2':
                        distance = dst.findEuclideanDistance(
                            dst.l2_normalize(source_representation),
                            dst.l2_normalize(target_representation))
                    else:
                        raise ValueError("Invalid distance_metric passed - ",
                                         distance_metric)

                    distances.append(distance)

                threshold = functions.findThreshold(model_name,
                                                    distance_metric)

                df["distance"] = distances
                df = df.drop(columns=["representation"])
                df = df[df.distance <= threshold]

                df = df.sort_values(by=["distance"],
                                    ascending=True).reset_index(drop=True)
                resp_obj.append(df)
                df = df_base.copy()  #restore df for the next iteration

        toc = time.time()

        print("find function lasts ", toc - tic, " seconds")

        if len(resp_obj) == 1:
            return resp_obj[0]

        return resp_obj

    else:
        raise ValueError("Passed db_path does not exist!")

    return None
Esempio n. 23
0
def featureExtraction(img1_path,
                      model_name='VGG-Face',
                      model=None,
                      enforce_detection=True):

    tic = time.time()

    if type(img1_path) == list:
        bulkProcess = True
        img_list = img1_path.copy()
    else:
        bulkProcess = False
        img_list = [[img1_path]]

    #------------------------------

    if model == None:
        if model_name == 'VGG-Face':
            print("Using VGG-Face model backend.")
            model = VGGFace.loadModel()

        elif model_name == 'OpenFace':
            print("Using OpenFace model backend.")
            model = OpenFace.loadModel()

        elif model_name == 'Facenet':
            print("Using Facenet model backend.")
            model = Facenet.loadModel()

        elif model_name == 'DeepFace':
            print("Using FB DeepFace model backend.")
            model = FbDeepFace.loadModel()

        else:
            raise ValueError("Invalid model_name passed - ", model_name)
    else:  #model != None
        print("Already built model is passed")

    #------------------------------
    #face recognition models have different size of inputs
    input_shape = model.layers[0].input_shape[1:3]

    #------------------------------
    pbar = tqdm(range(0, len(img_list)), desc='Feature Extraction')

    resp_objects = []

    #for instance in img_list:
    for index in pbar:

        instance = img_list[index]

        if type(instance) == list:
            img1_path = instance[0]

            #----------------------
            #crop and align faces

            img1 = functions.detectFace(img1_path,
                                        input_shape,
                                        enforce_detection=enforce_detection)

            #----------------------
            #find embeddings

            img1_representation = model.predict(img1)[0, :]

            #----------------------
            #response object

            resp_obj = "{"
            resp_obj += "  \"feature\": \"[" + ','.join(
                map(str, img1_representation)) + "]\""
            resp_obj += ", \"model\": \"" + model_name + "\""
            resp_obj += "}"

            resp_obj = json.loads(resp_obj)  #string to json

            if bulkProcess == True:
                resp_objects.append(resp_obj)
            else:
                #K.clear_session()
                return resp_obj
            #----------------------

        else:
            raise ValueError("Invalid arguments passed to verify function: ",
                             instance)

    #-------------------------

    toc = time.time()

    #print("identification lasts ",toc-tic," seconds")

    if bulkProcess == True:
        resp_obj = "{"

        for i in range(0, len(resp_objects)):
            resp_item = json.dumps(resp_objects[i])

            if i > 0:
                resp_obj += ", "

            resp_obj += "\"feature_" + str(i + 1) + "\": " + resp_item
        resp_obj += "}"
        resp_obj = json.loads(resp_obj)
        return resp_obj
Esempio n. 24
0
def verify(img1_path,
           img2_path='',
           model_name='VGG-Face',
           distance_metric='cosine',
           model=None,
           enforce_detection=True):

    tic = time.time()

    if type(img1_path) == list:
        bulkProcess = True
        img_list = img1_path.copy()
    else:
        bulkProcess = False
        img_list = [[img1_path, img2_path]]

    #------------------------------

    resp_objects = []

    if model_name == 'Ensemble':
        print("Ensemble learning enabled")

        import lightgbm as lgb  #lightgbm==2.3.1

        if model == None:
            model = {}

            model_pbar = tqdm(range(0, 4), desc='Face recognition models')

            for index in model_pbar:

                if index == 0:
                    model_pbar.set_description("Loading VGG-Face")
                    model["VGG-Face"] = VGGFace.loadModel()
                elif index == 1:
                    model_pbar.set_description("Loading Google FaceNet")
                    model["Facenet"] = Facenet.loadModel()
                elif index == 2:
                    model_pbar.set_description("Loading OpenFace")
                    model["OpenFace"] = OpenFace.loadModel()
                elif index == 3:
                    model_pbar.set_description("Loading Facebook DeepFace")
                    model["DeepFace"] = FbDeepFace.loadModel()

        #--------------------------
        #validate model dictionary because it might be passed from input as pre-trained

        found_models = []
        for key, value in model.items():
            found_models.append(key)

        if ('VGG-Face' in found_models) and ('Facenet' in found_models) and (
                'OpenFace' in found_models) and ('DeepFace' in found_models):
            print("Ensemble learning will be applied for ", found_models,
                  " models")
        else:
            raise ValueError(
                "You would like to apply ensemble learning and pass pre-built models but models must contain [VGG-Face, Facenet, OpenFace, DeepFace] but you passed "
                + found_models)

        #--------------------------

        model_names = ["VGG-Face", "Facenet", "OpenFace", "DeepFace"]
        metrics = ["cosine", "euclidean", "euclidean_l2"]

        pbar = tqdm(range(0, len(img_list)), desc='Verification')

        #for instance in img_list:
        for index in pbar:
            instance = img_list[index]

            if type(instance) == list and len(instance) >= 2:
                img1_path = instance[0]
                img2_path = instance[1]

                ensemble_features = []
                ensemble_features_string = "["

                for i in model_names:
                    custom_model = model[i]

                    #input_shape = custom_model.layers[0].input_shape[1:3] #my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue.

                    input_shape = custom_model.layers[0].input_shape

                    if type(input_shape) == list:
                        input_shape = input_shape[0][1:3]
                    else:
                        input_shape = input_shape[1:3]

                    img1 = functions.detectFace(
                        img1_path,
                        input_shape,
                        enforce_detection=enforce_detection)
                    img2 = functions.detectFace(
                        img2_path,
                        input_shape,
                        enforce_detection=enforce_detection)

                    img1_representation = custom_model.predict(img1)[0, :]
                    img2_representation = custom_model.predict(img2)[0, :]

                    for j in metrics:
                        if j == 'cosine':
                            distance = dst.findCosineDistance(
                                img1_representation, img2_representation)
                        elif j == 'euclidean':
                            distance = dst.findEuclideanDistance(
                                img1_representation, img2_representation)
                        elif j == 'euclidean_l2':
                            distance = dst.findEuclideanDistance(
                                dst.l2_normalize(img1_representation),
                                dst.l2_normalize(img2_representation))

                        if i == 'OpenFace' and j == 'euclidean':  #this returns same with OpenFace - euclidean_l2
                            continue
                        else:

                            ensemble_features.append(distance)

                            if len(ensemble_features) > 1:
                                ensemble_features_string += ", "
                            ensemble_features_string += str(distance)

                #print("ensemble_features: ", ensemble_features)
                ensemble_features_string += "]"

                #-------------------------------
                #find deepface path

                home = str(Path.home())

                if os.path.isfile(
                        home +
                        '/.deepface/weights/face-recognition-ensemble-model.txt'
                ) != True:
                    print(
                        "face-recognition-ensemble-model.txt will be downloaded..."
                    )
                    url = 'https://raw.githubusercontent.com/serengil/deepface/master/deepface/models/face-recognition-ensemble-model.txt'
                    output = home + '/.deepface/weights/face-recognition-ensemble-model.txt'
                    gdown.download(url, output, quiet=False)

                ensemble_model_path = home + '/.deepface/weights/face-recognition-ensemble-model.txt'

                #print(ensemble_model_path)

                #-------------------------------

                deepface_ensemble = lgb.Booster(model_file=ensemble_model_path)

                prediction = deepface_ensemble.predict(
                    np.expand_dims(np.array(ensemble_features), axis=0))[0]

                verified = np.argmax(prediction) == 1
                if verified: identified = "true"
                else: identified = "false"

                score = prediction[np.argmax(prediction)]

                #print("verified: ", verified,", score: ", score)

                resp_obj = "{"
                resp_obj += "\"verified\": " + identified
                resp_obj += ", \"score\": " + str(score)
                resp_obj += ", \"distance\": " + ensemble_features_string
                resp_obj += ", \"model\": [\"VGG-Face\", \"Facenet\", \"OpenFace\", \"DeepFace\"]"
                resp_obj += ", \"similarity_metric\": [\"cosine\", \"euclidean\", \"euclidean_l2\"]"
                resp_obj += "}"

                #print(resp_obj)

                resp_obj = json.loads(resp_obj)  #string to json

                if bulkProcess == True:
                    resp_objects.append(resp_obj)
                else:
                    return resp_obj

                #-------------------------------

        if bulkProcess == True:
            resp_obj = "{"

            for i in range(0, len(resp_objects)):
                resp_item = json.dumps(resp_objects[i])

                if i > 0:
                    resp_obj += ", "

                resp_obj += "\"pair_" + str(i + 1) + "\": " + resp_item
            resp_obj += "}"
            resp_obj = json.loads(resp_obj)
            return resp_obj

        return None

    #ensemble learning block end
    #--------------------------------
    #ensemble learning disabled

    if model == None:
        if model_name == 'VGG-Face':
            print("Using VGG-Face model backend and", distance_metric,
                  "distance.")
            model = VGGFace.loadModel()

        elif model_name == 'OpenFace':
            print("Using OpenFace model backend", distance_metric, "distance.")
            model = OpenFace.loadModel()

        elif model_name == 'Facenet':
            print("Using Facenet model backend", distance_metric, "distance.")
            model = Facenet.loadModel()

        elif model_name == 'DeepFace':
            print("Using FB DeepFace model backend", distance_metric,
                  "distance.")
            model = FbDeepFace.loadModel()

        elif model_name == 'DeepID':
            print("Using DeepID2 model backend", distance_metric, "distance.")
            model = DeepID.loadModel()

        else:
            raise ValueError("Invalid model_name passed - ", model_name)
    else:  #model != None
        print("Already built model is passed")

    #------------------------------
    #face recognition models have different size of inputs
    #input_shape = model.layers[0].input_shape[1:3] #my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue.

    input_shape = model.layers[0].input_shape

    if type(input_shape) == list:
        input_shape = input_shape[0][1:3]
    else:
        input_shape = input_shape[1:3]

    input_shape_x = input_shape[0]
    input_shape_y = input_shape[1]

    #------------------------------

    #tuned thresholds for model and metric pair
    threshold = functions.findThreshold(model_name, distance_metric)

    #------------------------------
    pbar = tqdm(range(0, len(img_list)), desc='Verification')

    #for instance in img_list:
    for index in pbar:

        instance = img_list[index]

        if type(instance) == list and len(instance) >= 2:
            img1_path = instance[0]
            img2_path = instance[1]

            #----------------------
            #crop and align faces

            img1 = functions.detectFace(img1_path,
                                        (input_shape_y, input_shape_x),
                                        enforce_detection=enforce_detection)
            img2 = functions.detectFace(img2_path,
                                        (input_shape_y, input_shape_x),
                                        enforce_detection=enforce_detection)

            #----------------------
            #find embeddings

            img1_representation = model.predict(img1)[0, :]
            img2_representation = model.predict(img2)[0, :]

            #----------------------
            #find distances between embeddings

            if distance_metric == 'cosine':
                distance = dst.findCosineDistance(img1_representation,
                                                  img2_representation)
            elif distance_metric == 'euclidean':
                distance = dst.findEuclideanDistance(img1_representation,
                                                     img2_representation)
            elif distance_metric == 'euclidean_l2':
                distance = dst.findEuclideanDistance(
                    dst.l2_normalize(img1_representation),
                    dst.l2_normalize(img2_representation))
            else:
                raise ValueError("Invalid distance_metric passed - ",
                                 distance_metric)

            #----------------------
            #decision

            if distance <= threshold:
                identified = "true"
            else:
                identified = "false"

            #----------------------
            #response object

            resp_obj = "{"
            resp_obj += "\"verified\": " + identified
            resp_obj += ", \"distance\": " + str(distance)
            resp_obj += ", \"max_threshold_to_verify\": " + str(threshold)
            resp_obj += ", \"model\": \"" + model_name + "\""
            resp_obj += ", \"similarity_metric\": \"" + distance_metric + "\""
            resp_obj += "}"

            resp_obj = json.loads(resp_obj)  #string to json

            if bulkProcess == True:
                resp_objects.append(resp_obj)
            else:
                #K.clear_session()
                return resp_obj
            #----------------------

        else:
            raise ValueError("Invalid arguments passed to verify function: ",
                             instance)

    #-------------------------

    toc = time.time()

    #print("identification lasts ",toc-tic," seconds")

    if bulkProcess == True:
        resp_obj = "{"

        for i in range(0, len(resp_objects)):
            resp_item = json.dumps(resp_objects[i])

            if i > 0:
                resp_obj += ", "

            resp_obj += "\"pair_" + str(i + 1) + "\": " + resp_item
        resp_obj += "}"
        resp_obj = json.loads(resp_obj)
        return resp_obj
img_B = mpimg.imread("28_0_0_man.jpg")
fig, ax = plt.subplots(1, 2)
ax[0].imshow(img_A)
ax[1].imshow(img_B)
values_view = facenet_result.values()
value_iterator = iter(values_view)
first_value = next(value_iterator)
#print(first_value)
if (first_value == True):
    print("Both are the same person")
else:
    print("Both are different individuals")

# In[ ]:

model = VGGFace.loadModel(
)  #all face recognition models have loadModel() function in their interfaces
DeepFace.verify("img1.jpg", "img2.jpg", model_name="VGG-Face", model=model)

# In[ ]:

result = DeepFace.verify("img1.jpg",
                         "img2.jpg",
                         model_name="VGG-Face",
                         distance_metric="cosine")
result = DeepFace.verify("img1.jpg",
                         "img2.jpg",
                         model_name="VGG-Face",
                         distance_metric="euclidean")
result = DeepFace.verify("img1.jpg",
                         "img2.jpg",
                         model_name="VGG-Face",
Esempio n. 26
0
	print("Unit tests are completed successfully. Score: ",accuracy,"%")
else:
	raise ValueError("Unit test score does not satisfy the minimum required accuracy. Minimum expected score is ",threshold,"% but this got ",accuracy,"%")

#-----------------------------------

# api tests - already built models will be passed to the functions

from deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace

#-----------------------------------
print("--------------------------")

print("Verify function with passing pre-trained model")

vggface_model = VGGFace.loadModel()
resp_obj = DeepFace.verify("dataset/img1.jpg", "dataset/img2.jpg", model_name = "VGG-Face", model = vggface_model)
print(resp_obj)

#-----------------------------------
print("--------------------------")

print("Analyze function with passing pre-trained model")

from deepface.extendedmodels import Age, Gender, Race, Emotion

emotion_model = Emotion.loadModel()
age_model = Age.loadModel()
gender_model = Gender.loadModel()
race_model = Race.loadModel()
Esempio n. 27
0
def save_hash(img_dict,
              model_name='Ensemble',
              model=None,
              enforce_detection=True):

    tic = time.time()

    img_list = list(img_dict.keys())

    if model_name == 'Ensemble':
        print("Ensemble learning enabled")

        import lightgbm as lgb  #lightgbm==2.3.1

        if model == None:
            model = {}

            model_pbar = tqdm(range(0, 4), desc='Face recognition models')

            for index in model_pbar:

                if index == 0:
                    model_pbar.set_description("Loading VGG-Face")
                    model["VGG-Face"] = VGGFace.loadModel()
                elif index == 1:
                    model_pbar.set_description("Loading Google FaceNet")
                    model["Facenet"] = Facenet.loadModel()
                elif index == 2:
                    model_pbar.set_description("Loading OpenFace")
                    model["OpenFace"] = OpenFace.loadModel()
                elif index == 3:
                    model_pbar.set_description("Loading Facebook DeepFace")
                    model["DeepFace"] = FbDeepFace.loadModel()

        #--------------------------

        model_names = ["VGG-Face", "Facenet", "OpenFace", "DeepFace"]

        pbar = tqdm(range(0, len(img_list)), desc='Avaliando Hash')

        df = pd.DataFrame(np.zeros([len(img_list),
                                    len(model_names)]),
                          columns=model_names,
                          index=list(img_dict.keys()))

        df = df.astype(object)

        representation = []

        erro_keys = []

        #for instance in img_list:
        for index in pbar:

            erro = False

            img1_key = img_list[index]

            representation.clear()

            for i in model_names:
                custom_model = model[i]

                #input_shape = custom_model.layers[0].input_shape[1:3] #my environment returns (None, 224, 224, 3) but some 						people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue.

                input_shape = custom_model.layers[0].input_shape

                if type(input_shape) == list:
                    input_shape = input_shape[0][1:3]
                else:
                    input_shape = input_shape[1:3]

                try:
                    img1 = functions.detectFace(
                        img_dict[img1_key],
                        input_shape,
                        enforce_detection=enforce_detection)
                except:
                    erro = True
                    erro_keys.append(img1_key)
                    break

                img1_representation = custom_model.predict(img1)[0, :]

                representation.append(img1_representation)

            if erro:
                continue

            df.loc[list(img_dict.keys())[index]] = representation

        for i in erro_keys:
            df.drop(index=i, inplace=True)

        return df