示例#1
0
def verify():

    global graph

    tic = time.time()
    req = request.get_json()
    trx_id = uuid.uuid4()
    #Carga de archivo Json (modelo)
    files = open('archivo.json', 'r')
    data = json.load(files)
    #print("Model Json", data)
    #asignacion del modelo de evaluacion
    model_name = "VGG-Face"
    distance_metric = "cosine"
    if "model_name" in list(req.keys()):
        model_name = req["model_name"]
    if "distance_metric" in list(req.keys()):
        distance_metric = req["distance_metric"]
    img = './eliezer1.jpg'
    # face recognition models have different size of inputs
    input_shape = model.layers[0].input_shape[1:3]
    # lumbral de reconocimiento segun cada modelo
    threshold = functions.findThreshold(model_name, distance_metric)
    # Deteccion de rostro en la imagen
    img_face = functions.detectFace(img, input_shape)
    # optener los puntos faciales
    #img_representation = model.predict(img_face)[0, :]

    resp_obj = {'success': False}
    with graph.as_default():
        img_representation = model.predict(img_face)[0, :]
        resp_obj['success'] = True
        resp_obj['model_name'] = model_name
        resp_obj['distance_metric'] = distance_metric
        resp_obj['threshold'] = threshold

    #--------------------------

    toc = time.time()

    return resp_obj, 200
示例#2
0
def find(img_path,
         db_path,
         model_name='VGG-Face',
         distance_metric='cosine',
         model=None,
         enforce_detection=True):

    tic = time.time()

    if type(img_path) == list:
        bulkProcess = True
        img_paths = img_path.copy()
    else:
        bulkProcess = False
        img_paths = [img_path]

    if os.path.isdir(db_path) == True:

        #---------------------------------------

        if model == None:
            if model_name == 'VGG-Face':
                print("Using VGG-Face model backend and", distance_metric,
                      "distance.")
                model = VGGFace.loadModel()
            elif model_name == 'OpenFace':
                print("Using OpenFace model backend", distance_metric,
                      "distance.")
                model = OpenFace.loadModel()
            elif model_name == 'Facenet':
                print("Using Facenet model backend", distance_metric,
                      "distance.")
                model = Facenet.loadModel()
            elif model_name == 'DeepFace':
                print("Using FB DeepFace model backend", distance_metric,
                      "distance.")
                model = FbDeepFace.loadModel()
            elif model_name == 'DeepID':
                print("Using DeepID model backend", distance_metric,
                      "distance.")
                model = DeepID.loadModel()
            elif model_name == 'Ensemble':

                print("Ensemble learning enabled")
                #TODO: include DeepID in ensemble method

                import lightgbm as lgb  #lightgbm==2.3.1

                model_names = ['VGG-Face', 'Facenet', 'OpenFace', 'DeepFace']
                metric_names = ['cosine', 'euclidean', 'euclidean_l2']
                models = {}

                pbar = tqdm(range(0, len(model_names)),
                            desc='Face recognition models')

                for index in pbar:
                    if index == 0:
                        pbar.set_description("Loading VGG-Face")
                        models['VGG-Face'] = VGGFace.loadModel()
                    elif index == 1:
                        pbar.set_description("Loading FaceNet")
                        models['Facenet'] = Facenet.loadModel()
                    elif index == 2:
                        pbar.set_description("Loading OpenFace")
                        models['OpenFace'] = OpenFace.loadModel()
                    elif index == 3:
                        pbar.set_description("Loading DeepFace")
                        models['DeepFace'] = FbDeepFace.loadModel()

            else:
                raise ValueError("Invalid model_name passed - ", model_name)
        else:  #model != None
            print("Already built model is passed")

            if model_name == 'Ensemble':

                #validate model dictionary because it might be passed from input as pre-trained

                found_models = []
                for key, value in model.items():
                    found_models.append(key)

                if ('VGG-Face'
                        in found_models) and ('Facenet' in found_models) and (
                            'OpenFace' in found_models) and ('DeepFace'
                                                             in found_models):
                    print("Ensemble learning will be applied for ",
                          found_models, " models")
                else:
                    raise ValueError(
                        "You would like to apply ensemble learning and pass pre-built models but models must contain [VGG-Face, Facenet, OpenFace, DeepFace] but you passed "
                        + found_models)

        #threshold = functions.findThreshold(model_name, distance_metric)

        #---------------------------------------

        file_name = "representations_%s.pkl" % (model_name)
        file_name = file_name.replace("-", "_").lower()

        if path.exists(db_path + "/" + file_name):

            print(
                "WARNING: Representations for images in ", db_path,
                " folder were previously stored in ", file_name,
                ". If you added new instances after this file creation, then please delete this file and call find function again. It will create it again."
            )

            f = open(db_path + '/' + file_name, 'rb')
            representations = pickle.load(f)

            print("There are ", len(representations),
                  " representations found in ", file_name)

        else:
            employees = []

            for r, d, f in os.walk(
                    db_path):  # r=root, d=directories, f = files
                for file in f:
                    if ('.jpg' in file):
                        exact_path = r + "/" + file
                        employees.append(exact_path)

            if len(employees) == 0:
                raise ValueError("There is no image in ", db_path, " folder!")

            #------------------------
            #find representations for db images

            representations = []

            pbar = tqdm(range(0, len(employees)),
                        desc='Finding representations')

            #for employee in employees:
            for index in pbar:
                employee = employees[index]

                if model_name != 'Ensemble':

                    #input_shape = model.layers[0].input_shape[1:3] #my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue.

                    input_shape = model.layers[0].input_shape

                    if type(input_shape) == list:
                        input_shape = input_shape[0][1:3]
                    else:
                        input_shape = input_shape[1:3]

                    input_shape_x = input_shape[0]
                    input_shape_y = input_shape[1]

                    img = functions.detectFace(
                        employee, (input_shape_y, input_shape_x),
                        enforce_detection=enforce_detection)
                    representation = model.predict(img)[0, :]

                    instance = []
                    instance.append(employee)
                    instance.append(representation)

                else:  #ensemble learning

                    instance = []
                    instance.append(employee)

                    for j in model_names:
                        model = models[j]

                        #input_shape = model.layers[0].input_shape[1:3] #my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue.

                        input_shape = model.layers[0].input_shape

                        if type(input_shape) == list:
                            input_shape = input_shape[0][1:3]
                        else:
                            input_shape = input_shape[1:3]

                        input_shape_x = input_shape[0]
                        input_shape_y = input_shape[1]

                        img = functions.detectFace(
                            employee, (input_shape_y, input_shape_x),
                            enforce_detection=enforce_detection)
                        representation = model.predict(img)[0, :]
                        instance.append(representation)

                #-------------------------------

                representations.append(instance)

            f = open(db_path + '/' + file_name, "wb")
            pickle.dump(representations, f)
            f.close()

            print(
                "Representations stored in ", db_path, "/", file_name,
                " file. Please delete this file when you add new identities in your database."
            )

        #----------------------------
        #we got representations for database

        if model_name != 'Ensemble':
            df = pd.DataFrame(representations,
                              columns=["identity", "representation"])
        else:  #ensemble learning
            df = pd.DataFrame(representations,
                              columns=[
                                  "identity", "VGG-Face_representation",
                                  "Facenet_representation",
                                  "OpenFace_representation",
                                  "DeepFace_representation"
                              ])

        df_base = df.copy()

        resp_obj = []

        global_pbar = tqdm(range(0, len(img_paths)), desc='Analyzing')
        for j in global_pbar:
            img_path = img_paths[j]

            #find representation for passed image

            if model_name == 'Ensemble':
                for j in model_names:
                    model = models[j]

                    #input_shape = model.layers[0].input_shape[1:3] #my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue.

                    input_shape = model.layers[0].input_shape

                    if type(input_shape) == list:
                        input_shape = input_shape[0][1:3]
                    else:
                        input_shape = input_shape[1:3]

                    img = functions.detectFace(
                        img_path,
                        input_shape,
                        enforce_detection=enforce_detection)
                    target_representation = model.predict(img)[0, :]

                    for k in metric_names:
                        distances = []
                        for index, instance in df.iterrows():
                            source_representation = instance[
                                "%s_representation" % (j)]

                            if k == 'cosine':
                                distance = dst.findCosineDistance(
                                    source_representation,
                                    target_representation)
                            elif k == 'euclidean':
                                distance = dst.findEuclideanDistance(
                                    source_representation,
                                    target_representation)
                            elif k == 'euclidean_l2':
                                distance = dst.findEuclideanDistance(
                                    dst.l2_normalize(source_representation),
                                    dst.l2_normalize(target_representation))

                            distances.append(distance)

                        if j == 'OpenFace' and k == 'euclidean':
                            continue
                        else:
                            df["%s_%s" % (j, k)] = distances

                #----------------------------------

                feature_names = []
                for j in model_names:
                    for k in metric_names:
                        if j == 'OpenFace' and k == 'euclidean':
                            continue
                        else:
                            feature = '%s_%s' % (j, k)
                            feature_names.append(feature)

                #print(df[feature_names].head())

                x = df[feature_names].values

                #----------------------------------
                #lightgbm model
                home = str(Path.home())

                if os.path.isfile(
                        home +
                        '/.deepface/weights/face-recognition-ensemble-model.txt'
                ) != True:
                    print(
                        "face-recognition-ensemble-model.txt will be downloaded..."
                    )
                    url = 'https://raw.githubusercontent.com/serengil/deepface/master/deepface/models/face-recognition-ensemble-model.txt'
                    output = home + '/.deepface/weights/face-recognition-ensemble-model.txt'
                    gdown.download(url, output, quiet=False)

                ensemble_model_path = home + '/.deepface/weights/face-recognition-ensemble-model.txt'

                deepface_ensemble = lgb.Booster(model_file=ensemble_model_path)

                y = deepface_ensemble.predict(x)

                verified_labels = []
                scores = []
                for i in y:
                    verified = np.argmax(i) == 1
                    score = i[np.argmax(i)]

                    verified_labels.append(verified)
                    scores.append(score)

                df['verified'] = verified_labels
                df['score'] = scores

                df = df[df.verified == True]
                #df = df[df.score > 0.99] #confidence score
                df = df.sort_values(by=["score"],
                                    ascending=False).reset_index(drop=True)
                df = df[['identity', 'verified', 'score']]

                resp_obj.append(df)
                df = df_base.copy()  #restore df for the next iteration

                #----------------------------------

            if model_name != 'Ensemble':

                #input_shape = model.layers[0].input_shape[1:3] #my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue.

                input_shape = model.layers[0].input_shape

                if type(input_shape) == list:
                    input_shape = input_shape[0][1:3]
                else:
                    input_shape = input_shape[1:3]

                input_shape_x = input_shape[0]
                input_shape_y = input_shape[1]

                img = functions.detectFace(img_path,
                                           (input_shape_y, input_shape_x),
                                           enforce_detection=enforce_detection)
                target_representation = model.predict(img)[0, :]

                distances = []
                for index, instance in df.iterrows():
                    source_representation = instance["representation"]

                    if distance_metric == 'cosine':
                        distance = dst.findCosineDistance(
                            source_representation, target_representation)
                    elif distance_metric == 'euclidean':
                        distance = dst.findEuclideanDistance(
                            source_representation, target_representation)
                    elif distance_metric == 'euclidean_l2':
                        distance = dst.findEuclideanDistance(
                            dst.l2_normalize(source_representation),
                            dst.l2_normalize(target_representation))
                    else:
                        raise ValueError("Invalid distance_metric passed - ",
                                         distance_metric)

                    distances.append(distance)

                threshold = functions.findThreshold(model_name,
                                                    distance_metric)

                df["distance"] = distances
                df = df.drop(columns=["representation"])
                df = df[df.distance <= threshold]

                df = df.sort_values(by=["distance"],
                                    ascending=True).reset_index(drop=True)
                resp_obj.append(df)
                df = df_base.copy()  #restore df for the next iteration

        toc = time.time()

        print("find function lasts ", toc - tic, " seconds")

        if len(resp_obj) == 1:
            return resp_obj[0]

        return resp_obj

    else:
        raise ValueError("Passed db_path does not exist!")

    return None
示例#3
0
def verify(img1_path,
           img2_path='',
           model_name='VGG-Face',
           distance_metric='cosine',
           model=None,
           enforce_detection=True):

    tic = time.time()

    if type(img1_path) == list:
        bulkProcess = True
        img_list = img1_path.copy()
    else:
        bulkProcess = False
        img_list = [[img1_path, img2_path]]

    #------------------------------

    resp_objects = []

    if model_name == 'Ensemble':
        print("Ensemble learning enabled")

        import lightgbm as lgb  #lightgbm==2.3.1

        if model == None:
            model = {}

            model_pbar = tqdm(range(0, 4), desc='Face recognition models')

            for index in model_pbar:

                if index == 0:
                    model_pbar.set_description("Loading VGG-Face")
                    model["VGG-Face"] = VGGFace.loadModel()
                elif index == 1:
                    model_pbar.set_description("Loading Google FaceNet")
                    model["Facenet"] = Facenet.loadModel()
                elif index == 2:
                    model_pbar.set_description("Loading OpenFace")
                    model["OpenFace"] = OpenFace.loadModel()
                elif index == 3:
                    model_pbar.set_description("Loading Facebook DeepFace")
                    model["DeepFace"] = FbDeepFace.loadModel()

        #--------------------------
        #validate model dictionary because it might be passed from input as pre-trained

        found_models = []
        for key, value in model.items():
            found_models.append(key)

        if ('VGG-Face' in found_models) and ('Facenet' in found_models) and (
                'OpenFace' in found_models) and ('DeepFace' in found_models):
            print("Ensemble learning will be applied for ", found_models,
                  " models")
        else:
            raise ValueError(
                "You would like to apply ensemble learning and pass pre-built models but models must contain [VGG-Face, Facenet, OpenFace, DeepFace] but you passed "
                + found_models)

        #--------------------------

        model_names = ["VGG-Face", "Facenet", "OpenFace", "DeepFace"]
        metrics = ["cosine", "euclidean", "euclidean_l2"]

        pbar = tqdm(range(0, len(img_list)), desc='Verification')

        #for instance in img_list:
        for index in pbar:
            instance = img_list[index]

            if type(instance) == list and len(instance) >= 2:
                img1_path = instance[0]
                img2_path = instance[1]

                ensemble_features = []
                ensemble_features_string = "["

                for i in model_names:
                    custom_model = model[i]

                    #input_shape = custom_model.layers[0].input_shape[1:3] #my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue.

                    input_shape = custom_model.layers[0].input_shape

                    if type(input_shape) == list:
                        input_shape = input_shape[0][1:3]
                    else:
                        input_shape = input_shape[1:3]

                    img1 = functions.detectFace(
                        img1_path,
                        input_shape,
                        enforce_detection=enforce_detection)
                    img2 = functions.detectFace(
                        img2_path,
                        input_shape,
                        enforce_detection=enforce_detection)

                    img1_representation = custom_model.predict(img1)[0, :]
                    img2_representation = custom_model.predict(img2)[0, :]

                    for j in metrics:
                        if j == 'cosine':
                            distance = dst.findCosineDistance(
                                img1_representation, img2_representation)
                        elif j == 'euclidean':
                            distance = dst.findEuclideanDistance(
                                img1_representation, img2_representation)
                        elif j == 'euclidean_l2':
                            distance = dst.findEuclideanDistance(
                                dst.l2_normalize(img1_representation),
                                dst.l2_normalize(img2_representation))

                        if i == 'OpenFace' and j == 'euclidean':  #this returns same with OpenFace - euclidean_l2
                            continue
                        else:

                            ensemble_features.append(distance)

                            if len(ensemble_features) > 1:
                                ensemble_features_string += ", "
                            ensemble_features_string += str(distance)

                #print("ensemble_features: ", ensemble_features)
                ensemble_features_string += "]"

                #-------------------------------
                #find deepface path

                home = str(Path.home())

                if os.path.isfile(
                        home +
                        '/.deepface/weights/face-recognition-ensemble-model.txt'
                ) != True:
                    print(
                        "face-recognition-ensemble-model.txt will be downloaded..."
                    )
                    url = 'https://raw.githubusercontent.com/serengil/deepface/master/deepface/models/face-recognition-ensemble-model.txt'
                    output = home + '/.deepface/weights/face-recognition-ensemble-model.txt'
                    gdown.download(url, output, quiet=False)

                ensemble_model_path = home + '/.deepface/weights/face-recognition-ensemble-model.txt'

                #print(ensemble_model_path)

                #-------------------------------

                deepface_ensemble = lgb.Booster(model_file=ensemble_model_path)

                prediction = deepface_ensemble.predict(
                    np.expand_dims(np.array(ensemble_features), axis=0))[0]

                verified = np.argmax(prediction) == 1
                if verified: identified = "true"
                else: identified = "false"

                score = prediction[np.argmax(prediction)]

                #print("verified: ", verified,", score: ", score)

                resp_obj = "{"
                resp_obj += "\"verified\": " + identified
                resp_obj += ", \"score\": " + str(score)
                resp_obj += ", \"distance\": " + ensemble_features_string
                resp_obj += ", \"model\": [\"VGG-Face\", \"Facenet\", \"OpenFace\", \"DeepFace\"]"
                resp_obj += ", \"similarity_metric\": [\"cosine\", \"euclidean\", \"euclidean_l2\"]"
                resp_obj += "}"

                #print(resp_obj)

                resp_obj = json.loads(resp_obj)  #string to json

                if bulkProcess == True:
                    resp_objects.append(resp_obj)
                else:
                    return resp_obj

                #-------------------------------

        if bulkProcess == True:
            resp_obj = "{"

            for i in range(0, len(resp_objects)):
                resp_item = json.dumps(resp_objects[i])

                if i > 0:
                    resp_obj += ", "

                resp_obj += "\"pair_" + str(i + 1) + "\": " + resp_item
            resp_obj += "}"
            resp_obj = json.loads(resp_obj)
            return resp_obj

        return None

    #ensemble learning block end
    #--------------------------------
    #ensemble learning disabled

    if model == None:
        if model_name == 'VGG-Face':
            print("Using VGG-Face model backend and", distance_metric,
                  "distance.")
            model = VGGFace.loadModel()

        elif model_name == 'OpenFace':
            print("Using OpenFace model backend", distance_metric, "distance.")
            model = OpenFace.loadModel()

        elif model_name == 'Facenet':
            print("Using Facenet model backend", distance_metric, "distance.")
            model = Facenet.loadModel()

        elif model_name == 'DeepFace':
            print("Using FB DeepFace model backend", distance_metric,
                  "distance.")
            model = FbDeepFace.loadModel()

        elif model_name == 'DeepID':
            print("Using DeepID2 model backend", distance_metric, "distance.")
            model = DeepID.loadModel()

        else:
            raise ValueError("Invalid model_name passed - ", model_name)
    else:  #model != None
        print("Already built model is passed")

    #------------------------------
    #face recognition models have different size of inputs
    #input_shape = model.layers[0].input_shape[1:3] #my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue.

    input_shape = model.layers[0].input_shape

    if type(input_shape) == list:
        input_shape = input_shape[0][1:3]
    else:
        input_shape = input_shape[1:3]

    input_shape_x = input_shape[0]
    input_shape_y = input_shape[1]

    #------------------------------

    #tuned thresholds for model and metric pair
    threshold = functions.findThreshold(model_name, distance_metric)

    #------------------------------
    pbar = tqdm(range(0, len(img_list)), desc='Verification')

    #for instance in img_list:
    for index in pbar:

        instance = img_list[index]

        if type(instance) == list and len(instance) >= 2:
            img1_path = instance[0]
            img2_path = instance[1]

            #----------------------
            #crop and align faces

            img1 = functions.detectFace(img1_path,
                                        (input_shape_y, input_shape_x),
                                        enforce_detection=enforce_detection)
            img2 = functions.detectFace(img2_path,
                                        (input_shape_y, input_shape_x),
                                        enforce_detection=enforce_detection)

            #----------------------
            #find embeddings

            img1_representation = model.predict(img1)[0, :]
            img2_representation = model.predict(img2)[0, :]

            #----------------------
            #find distances between embeddings

            if distance_metric == 'cosine':
                distance = dst.findCosineDistance(img1_representation,
                                                  img2_representation)
            elif distance_metric == 'euclidean':
                distance = dst.findEuclideanDistance(img1_representation,
                                                     img2_representation)
            elif distance_metric == 'euclidean_l2':
                distance = dst.findEuclideanDistance(
                    dst.l2_normalize(img1_representation),
                    dst.l2_normalize(img2_representation))
            else:
                raise ValueError("Invalid distance_metric passed - ",
                                 distance_metric)

            #----------------------
            #decision

            if distance <= threshold:
                identified = "true"
            else:
                identified = "false"

            #----------------------
            #response object

            resp_obj = "{"
            resp_obj += "\"verified\": " + identified
            resp_obj += ", \"distance\": " + str(distance)
            resp_obj += ", \"max_threshold_to_verify\": " + str(threshold)
            resp_obj += ", \"model\": \"" + model_name + "\""
            resp_obj += ", \"similarity_metric\": \"" + distance_metric + "\""
            resp_obj += "}"

            resp_obj = json.loads(resp_obj)  #string to json

            if bulkProcess == True:
                resp_objects.append(resp_obj)
            else:
                #K.clear_session()
                return resp_obj
            #----------------------

        else:
            raise ValueError("Invalid arguments passed to verify function: ",
                             instance)

    #-------------------------

    toc = time.time()

    #print("identification lasts ",toc-tic," seconds")

    if bulkProcess == True:
        resp_obj = "{"

        for i in range(0, len(resp_objects)):
            resp_item = json.dumps(resp_objects[i])

            if i > 0:
                resp_obj += ", "

            resp_obj += "\"pair_" + str(i + 1) + "\": " + resp_item
        resp_obj += "}"
        resp_obj = json.loads(resp_obj)
        return resp_obj
示例#4
0
def verify(img1_path,
           img2_path,
           model_name='VGG-Face',
           distance_metric='cosine',
           plot=False):

    tic = time.time()

    if os.path.isfile(img1_path) != True:
        raise ValueError("Confirm that ", img1_path, " exists")

    if os.path.isfile(img2_path) != True:
        raise ValueError("Confirm that ", img2_path, " exists")

    #-------------------------

    #tuned thresholds for model and metric pair
    threshold = functions.findThreshold(model_name, distance_metric)

    #-------------------------

    if model_name == 'VGG-Face':
        print("Using VGG-Face model backend and", distance_metric, "distance.")
        model = VGGFace.loadModel()
        input_shape = (224, 224)

    elif model_name == 'OpenFace':
        print("Using OpenFace model backend", distance_metric, "distance.")
        model = OpenFace.loadModel()
        input_shape = (96, 96)

    elif model_name == 'Facenet':
        print("Using Facenet model backend", distance_metric, "distance.")
        model = Facenet.loadModel()
        input_shape = (160, 160)

    elif model_name == 'DeepFace':
        print("Using FB DeepFace model backend", distance_metric, "distance.")
        model = FbDeepFace.loadModel()
        input_shape = (152, 152)

    else:
        raise ValueError("Invalid model_name passed - ", model_name)

    #-------------------------
    #crop face

    img1 = functions.detectFace(img1_path, input_shape)
    img2 = functions.detectFace(img2_path, input_shape)

    #-------------------------
    #find embeddings

    img1_representation = model.predict(img1)[0, :]
    img2_representation = model.predict(img2)[0, :]

    #-------------------------
    #find distances between embeddings

    if distance_metric == 'cosine':
        distance = dst.findCosineDistance(img1_representation,
                                          img2_representation)
    elif distance_metric == 'euclidean':
        distance = dst.findEuclideanDistance(img1_representation,
                                             img2_representation)
    elif distance_metric == 'euclidean_l2':
        distance = dst.findEuclideanDistance(
            dst.l2_normalize(img1_representation),
            dst.l2_normalize(img2_representation))
    else:
        raise ValueError("Invalid distance_metric passed - ", distance_metric)

    #-------------------------
    #decision

    if distance <= threshold:
        identified = "true"
    else:
        identified = "false"

    #-------------------------

    if plot:
        label = "Verified: " + identified
        label += "\nThreshold: " + str(round(distance, 2))
        label += ", Max Threshold to Verify: " + str(threshold)
        label += "\nModel: " + model_name
        label += ", Similarity metric: " + distance_metric

        fig = plt.figure()
        fig.add_subplot(1, 2, 1)
        plt.imshow(img1[0][:, :, ::-1])
        plt.xticks([])
        plt.yticks([])
        fig.add_subplot(1, 2, 2)
        plt.imshow(img2[0][:, :, ::-1])
        plt.xticks([])
        plt.yticks([])
        fig.suptitle(label, fontsize=17)
        plt.show(block=True)

    #-------------------------

    toc = time.time()

    resp_obj = "{"
    resp_obj += "\"verified\": " + identified
    resp_obj += ", \"distance\": " + str(distance)
    resp_obj += ", \"max_threshold_to_verify\": " + str(threshold)
    resp_obj += ", \"model\": \"" + model_name + "\""
    resp_obj += ", \"similarity_metric\": \"" + distance_metric + "\""
    resp_obj += "}"

    resp_obj = json.loads(resp_obj)  #string to json

    #print("identification lasts ",toc-tic," seconds")

    return resp_obj
示例#5
0
def verify(img1_path, img2_path
	, model_name ='VGG-Face', distance_metric = 'cosine'):
	
	tic = time.time()
	
	if os.path.isfile(img1_path) != True:
		raise ValueError("Confirm that ",img1_path," exists")
	
	if os.path.isfile(img2_path) != True:
		raise ValueError("Confirm that ",img2_path," exists")
		
	#-------------------------
	
	#print("Face verification will be applied on ",model_name," model and ",distance_metric," metric")
	
	functions.validateInputs(model_name, distance_metric)
	
	#-------------------------
	
	#tuned thresholds for model and metric pair
	threshold = functions.findThreshold(model_name, distance_metric)
	
	#-------------------------
	
	if model_name == 'VGG-Face':
		model = VGGFace.loadModel()
		input_shape = (224, 224)	
	
	elif model_name == 'OpenFace':
		model = OpenFace.loadModel()
		input_shape = (96, 96)
	
	elif model_name == 'Facenet':
		model = Facenet.loadModel()
		input_shape = (160, 160)
	
	#-------------------------
	#crop face
	
	img1 = functions.detectFace(img1_path, input_shape)
	img2 = functions.detectFace(img2_path, input_shape)
	
	#-------------------------
	#TO-DO: Apply face alignment here. Experiments show that aligment increases accuracy 1%.
	
	#-------------------------
	#find embeddings
	
	img1_representation = model.predict(img1)[0,:]
	img2_representation = model.predict(img2)[0,:]
	
	#-------------------------
	#find distances between embeddings
	
	if distance_metric == 'cosine':
		distance = dst.findCosineDistance(img1_representation, img2_representation)
	elif distance_metric == 'euclidean':
		distance = dst.findEuclideanDistance(img1_representation, img2_representation)
	elif distance_metric == 'euclidean_l2':
		distance = dst.findEuclideanDistance(dst.l2_normalize(img1_representation), dst.l2_normalize(img2_representation))
	
	#-------------------------
	#decision
	
	if distance <= threshold:
		identified =  True
		message = "The both face photos are same person."
	else:
		identified =  False
		message = "The both face photos are not same person!"
	
	#-------------------------
	
	plot = False
	
	if plot:
		label = "Distance is "+str(round(distance, 2))+"\nwhereas max threshold is "+ str(threshold)+ ".\n"+ message
		
		fig = plt.figure()
		fig.add_subplot(1,2, 1)
		plt.imshow(img1[0][:, :, ::-1])
		plt.xticks([]); plt.yticks([])
		fig.add_subplot(1,2, 2)
		plt.imshow(img2[0][:, :, ::-1])
		plt.xticks([]); plt.yticks([])
		fig.suptitle(label, fontsize=17)
		plt.show(block=True)
	
	#-------------------------
	
	toc = time.time()
	
	#print("identification lasts ",toc-tic," seconds")
	
	#Return a tuple. First item is the identification result based on tuned threshold.
	#Second item is the threshold. You might want to customize this threshold to identify faces.
	return (identified, distance, threshold)
示例#6
0
def analysis(img,
             db_path,
             input_shape,
             model_name,
             distance_metric,
             model=None,
             enable_face_analysis=True):
    if (model == None):
        model, input_shape = create_model(db_path, model_name)
    file_name = "representations_%s.pkl" % (model_name)
    f = open(db_path + '/' + file_name, 'rb')
    embeddings = pickle.load(f)
    #     print(embeddings)
    input_shape_x = input_shape[0]
    input_shape_y = input_shape[1]
    time_threshold = 5
    frame_threshold = 5
    pivot_img_size = 112  #face recognition result image

    #-----------------------

    opencv_path = functions.get_opencv_path()
    face_detector_path = opencv_path + "haarcascade_frontalface_default.xml"
    face_cascade = cv2.CascadeClassifier(face_detector_path)

    #-----------------------

    freeze = False
    face_detected = False
    face_included_frames = 0  #freeze screen if face detected sequantially 5 frames
    freezed_frame = 0
    tic = time.time()
    text_color = (255, 255, 255)

    employees = []
    #check passed db folder exists
    if os.path.isdir(db_path) == True:
        for r, d, f in os.walk(db_path):  # r=root, d=directories, f = files
            for file in f:
                if ('.jpg' in file):
                    #exact_path = os.path.join(r, file)
                    exact_path = r + "/" + file
                    #print(exact_path)
                    employees.append(exact_path)

    df = pd.DataFrame(embeddings, columns=['employee', 'embedding'])
    df['distance_metric'] = distance_metric
    #     cap = VideoStream(src=0,usePiCamera=True,framerate=32).start()
    threshold = functions.findThreshold(model_name, distance_metric)
    #     time.sleep(2)
    i = 1
    while i:
        label = 'None'
        i = 0
        start = time.time()
        #         img = cap.read()
        faces = face_cascade.detectMultiScale(img, 1.3, 5)
        for (x, y, w, h) in faces:
            custom_face = functions.preprocess_face(
                img=img,
                target_size=(input_shape_y, input_shape_x),
                enforce_detection=False)
            #check preprocess_face function handled
            if custom_face.shape[1:3] == input_shape:
                if df.shape[
                        0] > 0:  #if there are images to verify, apply face recognition
                    img1_representation = model.predict(custom_face)[0, :]

                    ##                        print(img1_representation)
                    #print(freezed_frame," - ",img1_representation[0:5])

                    def findDistance(row):
                        distance_metric = row['distance_metric']
                        img2_representation = row['embedding']

                        distance = 1000  #initialize very large value
                        if distance_metric == 'cosine':
                            distance = dst.findCosineDistance(
                                img1_representation, img2_representation)
                        elif distance_metric == 'euclidean':
                            distance = dst.findEuclideanDistance(
                                img1_representation, img2_representation)
                        elif distance_metric == 'euclidean_l2':
                            distance = dst.findEuclideanDistance(
                                dst.l2_normalize(img1_representation),
                                dst.l2_normalize(img2_representation))

                        return distance

                    df['distance'] = df.apply(findDistance, axis=1)
                    df = df.sort_values(by=["distance"])

                    candidate = df.iloc[0]
                    employee_name = candidate['employee']
                    best_distance = candidate['distance']

                    ##                        print(candidate[['employee', 'distance']].values)
                    ##                        print(threshold)
                    #if True:
                    if best_distance <= threshold:
                        #print(employee_name)
                        end = time.time()
                        display_img = cv2.imread(employee_name)

                        display_img = cv2.resize(
                            display_img, (pivot_img_size, pivot_img_size))

                        label = employee_name.split("/")[-2].replace(
                            ".jpg", "")
                        ##                                print(label)
                        #                                     hex_string = "0x5C"[2:]
                        #                                     bytes_object = bytes.fromhex(hex_string)
                        #                                     ascii_string = bytes_object.decode("ASCII")
                        #                                     label =  label.split(ascii_string)[-1]
                        print(best_distance, "--->", label, "----->",
                              str(end - start))
#                                     cv2.putText(img, str(label), (100,60), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 0, 0), 2)
                    else:
                        label = "Unkown"
#                                 cv2.putText(img, str(label), (100,60), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 0, 0), 2)
#                             cv2.rectangle(img, (x,y), (x+w,y+h), (67,167,67), 1)
        return label
示例#7
0
def analysis(db_path, model_name, distance_metric, enable_face_analysis=True):

    input_shape = (224, 224)
    text_color = (255, 255, 255)

    employees = []
    #check passed db folder exists
    if os.path.isdir(db_path) == True:
        for r, d, f in os.walk(db_path):  # r=root, d=directories, f = files
            for file in f:
                if ('.jpg' in file):
                    #exact_path = os.path.join(r, file)
                    exact_path = r + "/" + file
                    #print(exact_path)
                    employees.append(exact_path)

    #------------------------

    if len(employees) > 0:
        if model_name == 'VGG-Face':
            print("Using VGG-Face model backend and", distance_metric,
                  "distance.")
            model = VGGFace.loadModel()
            input_shape = (224, 224)

        elif model_name == 'OpenFace':
            print("Using OpenFace model backend", distance_metric, "distance.")
            model = OpenFace.loadModel()
            input_shape = (96, 96)

        elif model_name == 'Facenet':
            print("Using Facenet model backend", distance_metric, "distance.")
            model = Facenet.loadModel()
            input_shape = (160, 160)

        elif model_name == 'DeepFace':
            print("Using FB DeepFace model backend", distance_metric,
                  "distance.")
            model = FbDeepFace.loadModel()
            input_shape = (152, 152)

        else:
            raise ValueError("Invalid model_name passed - ", model_name)
        #------------------------

        #tuned thresholds for model and metric pair
        threshold = functions.findThreshold(model_name, distance_metric)

    #------------------------
    #facial attribute analysis models

    if enable_face_analysis == True:

        tic = time.time()

        emotion_model = Emotion.loadModel()
        print("Emotion model loaded")

        age_model = Age.loadModel()
        print("Age model loaded")

        gender_model = Gender.loadModel()
        print("Gender model loaded")

        toc = time.time()

        print("Facial attibute analysis models loaded in ", toc - tic,
              " seconds")

    #------------------------

    #find embeddings for employee list

    tic = time.time()

    pbar = tqdm(range(0, len(employees)), desc='Finding embeddings')

    embeddings = []
    #for employee in employees:
    for index in pbar:
        employee = employees[index]
        pbar.set_description("Finding embedding for %s" %
                             (employee.split("/")[-1]))
        embedding = []
        img = functions.detectFace(employee, input_shape)
        img_representation = model.predict(img)[0, :]

        embedding.append(employee)
        embedding.append(img_representation)
        embeddings.append(embedding)

    df = pd.DataFrame(embeddings, columns=['employee', 'embedding'])
    df['distance_metric'] = distance_metric

    toc = time.time()

    print("Embeddings found for given data set in ", toc - tic, " seconds")

    #-----------------------

    time_threshold = 5
    frame_threshold = 5
    pivot_img_size = 112  #face recognition result image

    #-----------------------

    opencv_path = functions.get_opencv_path()
    face_detector_path = opencv_path + "haarcascade_frontalface_default.xml"
    face_cascade = cv2.CascadeClassifier(face_detector_path)

    #-----------------------

    freeze = False
    face_detected = False
    face_included_frames = 0  #freeze screen if face detected sequantially 5 frames
    freezed_frame = 0
    tic = time.time()

    cap = cv2.VideoCapture(0)  #webcam
    #cap = cv2.VideoCapture("C:/Users/IS96273/Desktop/skype-video-1.mp4") #video

    while (True):
        ret, img = cap.read()

        #cv2.namedWindow('img', cv2.WINDOW_FREERATIO)
        #cv2.setWindowProperty('img', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)

        raw_img = img.copy()
        resolution = img.shape

        resolution_x = img.shape[1]
        resolution_y = img.shape[0]

        if freeze == False:
            faces = face_cascade.detectMultiScale(img, 1.3, 5)

            if len(faces) == 0:
                face_included_frames = 0
        else:
            faces = []

        detected_faces = []
        face_index = 0
        for (x, y, w, h) in faces:
            if w > 130:  #discard small detected faces

                face_detected = True
                if face_index == 0:
                    face_included_frames = face_included_frames + 1  #increase frame for a single face

                cv2.rectangle(img, (x, y), (x + w, y + h), (67, 67, 67),
                              1)  #draw rectangle to main image

                cv2.putText(img, str(frame_threshold - face_included_frames),
                            (int(x + w / 4), int(y + h / 1.5)),
                            cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 255, 255), 2)

                detected_face = img[int(y):int(y + h),
                                    int(x):int(x + w)]  #crop detected face

                #-------------------------------------

                detected_faces.append((x, y, w, h))
                face_index = face_index + 1

                #-------------------------------------

        if face_detected == True and face_included_frames == frame_threshold and freeze == False:
            freeze = True
            #base_img = img.copy()
            base_img = raw_img.copy()
            detected_faces_final = detected_faces.copy()
            tic = time.time()

        if freeze == True:

            toc = time.time()
            if (toc - tic) < time_threshold:

                if freezed_frame == 0:
                    freeze_img = base_img.copy()
                    #freeze_img = np.zeros(resolution, np.uint8) #here, np.uint8 handles showing white area issue

                    for detected_face in detected_faces_final:
                        x = detected_face[0]
                        y = detected_face[1]
                        w = detected_face[2]
                        h = detected_face[3]

                        cv2.rectangle(freeze_img, (x, y), (x + w, y + h),
                                      (67, 67, 67),
                                      1)  #draw rectangle to main image

                        #-------------------------------

                        #apply deep learning for custom_face

                        custom_face = base_img[y:y + h, x:x + w]

                        #-------------------------------
                        #facial attribute analysis

                        if enable_face_analysis == True:

                            gray_img = functions.detectFace(
                                custom_face, (48, 48), True)
                            emotion_labels = [
                                'Angry', 'Disgust', 'Fear', 'Happy', 'Sad',
                                'Surprise', 'Neutral'
                            ]
                            emotion_predictions = emotion_model.predict(
                                gray_img)[0, :]
                            sum_of_predictions = emotion_predictions.sum()

                            mood_items = []
                            for i in range(0, len(emotion_labels)):
                                mood_item = []
                                emotion_label = emotion_labels[i]
                                emotion_prediction = 100 * emotion_predictions[
                                    i] / sum_of_predictions
                                mood_item.append(emotion_label)
                                mood_item.append(emotion_prediction)
                                mood_items.append(mood_item)

                            emotion_df = pd.DataFrame(
                                mood_items, columns=["emotion", "score"])
                            emotion_df = emotion_df.sort_values(
                                by=["score"],
                                ascending=False).reset_index(drop=True)

                            #background of mood box

                            #transparency
                            overlay = freeze_img.copy()
                            opacity = 0.4

                            if x + w + pivot_img_size < resolution_x:
                                #right
                                cv2.rectangle(
                                    freeze_img
                                    #, (x+w,y+20)
                                    ,
                                    (x + w, y),
                                    (x + w + pivot_img_size, y + h),
                                    (64, 64, 64),
                                    cv2.FILLED)

                                cv2.addWeighted(overlay, opacity, freeze_img,
                                                1 - opacity, 0, freeze_img)

                            elif x - pivot_img_size > 0:
                                #left
                                cv2.rectangle(
                                    freeze_img
                                    #, (x-pivot_img_size,y+20)
                                    ,
                                    (x - pivot_img_size, y),
                                    (x, y + h),
                                    (64, 64, 64),
                                    cv2.FILLED)

                                cv2.addWeighted(overlay, opacity, freeze_img,
                                                1 - opacity, 0, freeze_img)

                            for index, instance in emotion_df.iterrows():
                                emotion_label = "%s " % (instance['emotion'])
                                emotion_score = instance['score'] / 100

                                bar_x = 35  #this is the size if an emotion is 100%
                                bar_x = int(bar_x * emotion_score)

                                if x + w + pivot_img_size < resolution_x:

                                    text_location_y = y + 20 + (index + 1) * 20
                                    text_location_x = x + w

                                    if text_location_y < y + h:
                                        cv2.putText(
                                            freeze_img, emotion_label,
                                            (text_location_x, text_location_y),
                                            cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                            (255, 255, 255), 1)

                                        cv2.rectangle(
                                            freeze_img, (x + w + 70, y + 13 +
                                                         (index + 1) * 20),
                                            (x + w + 70 + bar_x, y + 13 +
                                             (index + 1) * 20 + 5),
                                            (255, 255, 255), cv2.FILLED)

                                elif x - pivot_img_size > 0:

                                    text_location_y = y + 20 + (index + 1) * 20
                                    text_location_x = x - pivot_img_size

                                    if text_location_y <= y + h:
                                        cv2.putText(
                                            freeze_img, emotion_label,
                                            (text_location_x, text_location_y),
                                            cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                            (255, 255, 255), 1)

                                        cv2.rectangle(
                                            freeze_img,
                                            (x - pivot_img_size + 70, y + 13 +
                                             (index + 1) * 20),
                                            (x - pivot_img_size + 70 + bar_x,
                                             y + 13 + (index + 1) * 20 + 5),
                                            (255, 255, 255), cv2.FILLED)

                            #-------------------------------

                            face_224 = functions.detectFace(
                                custom_face, (224, 224), False)

                            age_predictions = age_model.predict(face_224)[0, :]
                            apparent_age = Age.findApparentAge(age_predictions)

                            #-------------------------------

                            gender_prediction = gender_model.predict(face_224)[
                                0, :]

                            if np.argmax(gender_prediction) == 0:
                                gender = "W"
                            elif np.argmax(gender_prediction) == 1:
                                gender = "M"

                            #print(str(int(apparent_age))," years old ", dominant_emotion, " ", gender)

                            analysis_report = str(
                                int(apparent_age)) + " " + gender

                            #-------------------------------

                            info_box_color = (46, 200, 255)

                            #top
                            if y - pivot_img_size + int(
                                    pivot_img_size / 5) > 0:

                                triangle_coordinates = np.array([
                                    (x + int(w / 2), y),
                                    (x + int(w / 2) - int(w / 10),
                                     y - int(pivot_img_size / 3)),
                                    (x + int(w / 2) + int(w / 10),
                                     y - int(pivot_img_size / 3))
                                ])

                                cv2.drawContours(freeze_img,
                                                 [triangle_coordinates], 0,
                                                 info_box_color, -1)

                                cv2.rectangle(
                                    freeze_img,
                                    (x + int(w / 5), y - pivot_img_size +
                                     int(pivot_img_size / 5)),
                                    (x + w - int(w / 5),
                                     y - int(pivot_img_size / 3)),
                                    info_box_color, cv2.FILLED)

                                cv2.putText(freeze_img, analysis_report,
                                            (x + int(w / 3.5),
                                             y - int(pivot_img_size / 2.1)),
                                            cv2.FONT_HERSHEY_SIMPLEX, 1,
                                            (0, 111, 255), 2)

                            #bottom
                            elif y + h + pivot_img_size - int(
                                    pivot_img_size / 5) < resolution_y:

                                triangle_coordinates = np.array([
                                    (x + int(w / 2), y + h),
                                    (x + int(w / 2) - int(w / 10),
                                     y + h + int(pivot_img_size / 3)),
                                    (x + int(w / 2) + int(w / 10),
                                     y + h + int(pivot_img_size / 3))
                                ])

                                cv2.drawContours(freeze_img,
                                                 [triangle_coordinates], 0,
                                                 info_box_color, -1)

                                cv2.rectangle(
                                    freeze_img,
                                    (x + int(w / 5),
                                     y + h + int(pivot_img_size / 3)),
                                    (x + w - int(w / 5), y + h +
                                     pivot_img_size - int(pivot_img_size / 5)),
                                    info_box_color, cv2.FILLED)

                                cv2.putText(freeze_img, analysis_report,
                                            (x + int(w / 3.5), y + h +
                                             int(pivot_img_size / 1.5)),
                                            cv2.FONT_HERSHEY_SIMPLEX, 1,
                                            (0, 111, 255), 2)

                        #-------------------------------
                        #face recognition

                        custom_face = functions.detectFace(
                            custom_face, input_shape)

                        #check detectFace function handled
                        if custom_face.shape[1:3] == input_shape:
                            if df.shape[
                                    0] > 0:  #if there are images to verify, apply face recognition
                                img1_representation = model.predict(
                                    custom_face)[0, :]

                                #print(freezed_frame," - ",img1_representation[0:5])

                                def findDistance(row):
                                    distance_metric = row['distance_metric']
                                    img2_representation = row['embedding']

                                    distance = 1000  #initialize very large value
                                    if distance_metric == 'cosine':
                                        distance = dst.findCosineDistance(
                                            img1_representation,
                                            img2_representation)
                                    elif distance_metric == 'euclidean':
                                        distance = dst.findEuclideanDistance(
                                            img1_representation,
                                            img2_representation)
                                    elif distance_metric == 'euclidean_l2':
                                        distance = dst.findEuclideanDistance(
                                            dst.l2_normalize(
                                                img1_representation),
                                            dst.l2_normalize(
                                                img2_representation))

                                    return distance

                                df['distance'] = df.apply(findDistance, axis=1)
                                df = df.sort_values(by=["distance"])

                                candidate = df.iloc[0]
                                employee_name = candidate['employee']
                                best_distance = candidate['distance']

                                if best_distance <= threshold:
                                    #print(employee_name)
                                    display_img = cv2.imread(employee_name)

                                    display_img = cv2.resize(
                                        display_img,
                                        (pivot_img_size, pivot_img_size))

                                    label = employee_name.split(
                                        "/")[-1].replace(".jpg", "")
                                    label = re.sub('[0-9]', '', label)

                                    try:
                                        if y - pivot_img_size > 0 and x + w + pivot_img_size < resolution_x:
                                            #top right
                                            freeze_img[
                                                y - pivot_img_size:y,
                                                x + w:x + w +
                                                pivot_img_size] = display_img

                                            overlay = freeze_img.copy()
                                            opacity = 0.4
                                            cv2.rectangle(
                                                freeze_img, (x + w, y),
                                                (x + w + pivot_img_size,
                                                 y + 20), (46, 200, 255),
                                                cv2.FILLED)
                                            cv2.addWeighted(
                                                overlay, opacity, freeze_img,
                                                1 - opacity, 0, freeze_img)

                                            cv2.putText(
                                                freeze_img, label,
                                                (x + w, y + 10),
                                                cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                                text_color, 1)

                                            #connect face and text
                                            cv2.line(freeze_img,
                                                     (x + int(w / 2), y),
                                                     (x + 3 * int(w / 4), y -
                                                      int(pivot_img_size / 2)),
                                                     (67, 67, 67), 1)
                                            cv2.line(freeze_img,
                                                     (x + 3 * int(w / 4), y -
                                                      int(pivot_img_size / 2)),
                                                     (x + w, y -
                                                      int(pivot_img_size / 2)),
                                                     (67, 67, 67), 1)

                                        elif y + h + pivot_img_size < resolution_y and x - pivot_img_size > 0:
                                            #bottom left
                                            freeze_img[
                                                y + h:y + h + pivot_img_size,
                                                x -
                                                pivot_img_size:x] = display_img

                                            overlay = freeze_img.copy()
                                            opacity = 0.4
                                            cv2.rectangle(
                                                freeze_img,
                                                (x - pivot_img_size,
                                                 y + h - 20), (x, y + h),
                                                (46, 200, 255), cv2.FILLED)
                                            cv2.addWeighted(
                                                overlay, opacity, freeze_img,
                                                1 - opacity, 0, freeze_img)

                                            cv2.putText(
                                                freeze_img, label,
                                                (x - pivot_img_size,
                                                 y + h - 10),
                                                cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                                text_color, 1)

                                            #connect face and text
                                            cv2.line(freeze_img,
                                                     (x + int(w / 2), y + h),
                                                     (x + int(w / 2) -
                                                      int(w / 4), y + h +
                                                      int(pivot_img_size / 2)),
                                                     (67, 67, 67), 1)
                                            cv2.line(freeze_img,
                                                     (x + int(w / 2) -
                                                      int(w / 4), y + h +
                                                      int(pivot_img_size / 2)),
                                                     (x, y + h +
                                                      int(pivot_img_size / 2)),
                                                     (67, 67, 67), 1)

                                        elif y - pivot_img_size > 0 and x - pivot_img_size > 0:
                                            #top left
                                            freeze_img[
                                                y - pivot_img_size:y, x -
                                                pivot_img_size:x] = display_img

                                            overlay = freeze_img.copy()
                                            opacity = 0.4
                                            cv2.rectangle(
                                                freeze_img,
                                                (x - pivot_img_size, y),
                                                (x, y + 20), (46, 200, 255),
                                                cv2.FILLED)
                                            cv2.addWeighted(
                                                overlay, opacity, freeze_img,
                                                1 - opacity, 0, freeze_img)

                                            cv2.putText(
                                                freeze_img, label,
                                                (x - pivot_img_size, y + 10),
                                                cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                                text_color, 1)

                                            #connect face and text
                                            cv2.line(
                                                freeze_img,
                                                (x + int(w / 2), y),
                                                (x + int(w / 2) - int(w / 4),
                                                 y - int(pivot_img_size / 2)),
                                                (67, 67, 67), 1)
                                            cv2.line(
                                                freeze_img,
                                                (x + int(w / 2) - int(w / 4),
                                                 y - int(pivot_img_size / 2)),
                                                (x,
                                                 y - int(pivot_img_size / 2)),
                                                (67, 67, 67), 1)

                                        elif x + w + pivot_img_size < resolution_x and y + h + pivot_img_size < resolution_y:
                                            #bottom righ
                                            freeze_img[
                                                y + h:y + h + pivot_img_size,
                                                x + w:x + w +
                                                pivot_img_size] = display_img

                                            overlay = freeze_img.copy()
                                            opacity = 0.4
                                            cv2.rectangle(
                                                freeze_img,
                                                (x + w, y + h - 20),
                                                (x + w + pivot_img_size,
                                                 y + h), (46, 200, 255),
                                                cv2.FILLED)
                                            cv2.addWeighted(
                                                overlay, opacity, freeze_img,
                                                1 - opacity, 0, freeze_img)

                                            cv2.putText(
                                                freeze_img, label,
                                                (x + w, y + h - 10),
                                                cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                                text_color, 1)

                                            #connect face and text
                                            cv2.line(freeze_img,
                                                     (x + int(w / 2), y + h),
                                                     (x + int(w / 2) +
                                                      int(w / 4), y + h +
                                                      int(pivot_img_size / 2)),
                                                     (67, 67, 67), 1)
                                            cv2.line(freeze_img,
                                                     (x + int(w / 2) +
                                                      int(w / 4), y + h +
                                                      int(pivot_img_size / 2)),
                                                     (x + w, y + h +
                                                      int(pivot_img_size / 2)),
                                                     (67, 67, 67), 1)
                                    except Exception as err:
                                        print(str(err))

                        tic = time.time(
                        )  #in this way, freezed image can show 5 seconds

                        #-------------------------------

                time_left = int(time_threshold - (toc - tic) + 1)

                cv2.rectangle(freeze_img, (10, 10), (90, 50), (67, 67, 67),
                              -10)
                cv2.putText(freeze_img, str(time_left), (40, 40),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1)

                cv2.imshow('img', freeze_img)

                freezed_frame = freezed_frame + 1
            else:
                face_detected = False
                face_included_frames = 0
                freeze = False
                freezed_frame = 0

        else:
            cv2.imshow('img', img)

        if cv2.waitKey(1) & 0xFF == ord('q'):  #press q to quit
            break

    #kill open cv things
    cap.release()
    cv2.destroyAllWindows()
示例#8
0
def find(img_path,
         db_path,
         model_name='VGG-Face',
         distance_metric='cosine',
         model=None,
         enforce_detection=True,
         detector_backend='mtcnn'):

    tic = time.time()

    img_paths, bulkProcess = initialize_input(img_path)
    functions.initialize_detector(detector_backend=detector_backend)

    #-------------------------------

    if os.path.isdir(db_path) == True:

        if model == None:

            if model_name == 'Ensemble':
                print("Ensemble learning enabled")
                models = Boosting.loadModel()

            else:  #model is not ensemble
                model = build_model(model_name)
                models = {}
                models[model_name] = model

        else:  #model != None
            print("Already built model is passed")

            if model_name == 'Ensemble':
                Boosting.validate_model(model)
                models = model.copy()
            else:
                models = {}
                models[model_name] = model

        #---------------------------------------

        if model_name == 'Ensemble':
            model_names = ['VGG-Face', 'Facenet', 'OpenFace', 'DeepFace']
            metric_names = ['cosine', 'euclidean', 'euclidean_l2']
        elif model_name != 'Ensemble':
            model_names = []
            metric_names = []
            model_names.append(model_name)
            metric_names.append(distance_metric)

        #---------------------------------------

        file_name = "representations_%s.pkl" % (model_name)
        file_name = file_name.replace("-", "_").lower()

        if path.exists(db_path + "/" + file_name):

            print(
                "WARNING: Representations for images in ", db_path,
                " folder were previously stored in ", file_name,
                ". If you added new instances after this file creation, then please delete this file and call find function again. It will create it again."
            )

            f = open(db_path + '/' + file_name, 'rb')
            representations = pickle.load(f)

            print("There are ", len(representations),
                  " representations found in ", file_name)

        else:  #create representation.pkl from scratch
            employees = []

            for r, d, f in os.walk(
                    db_path):  # r=root, d=directories, f = files
                for file in f:
                    if ('.jpg' in file):
                        exact_path = r + "/" + file
                        employees.append(exact_path)

            if len(employees) == 0:
                raise ValueError("There is no image in ", db_path, " folder!")

            #------------------------
            #find representations for db images

            representations = []

            pbar = tqdm(range(0, len(employees)),
                        desc='Finding representations')

            #for employee in employees:
            for index in pbar:
                employee = employees[index]

                instance = []
                instance.append(employee)

                for j in model_names:
                    custom_model = models[j]

                    #----------------------------------
                    #decide input shape

                    input_shape = functions.find_input_shape(custom_model)
                    input_shape_x = input_shape[0]
                    input_shape_y = input_shape[1]

                    #----------------------------------

                    img = functions.preprocess_face(
                        img=employee,
                        target_size=(input_shape_y, input_shape_x),
                        enforce_detection=enforce_detection,
                        detector_backend=detector_backend)

                    representation = custom_model.predict(img)[0, :]
                    instance.append(representation)

                #-------------------------------

                representations.append(instance)

            f = open(db_path + '/' + file_name, "wb")
            pickle.dump(representations, f)
            f.close()

            print(
                "Representations stored in ", db_path, "/", file_name,
                " file. Please delete this file when you add new identities in your database."
            )

        #----------------------------
        #now, we got representations for facial database

        if model_name != 'Ensemble':
            df = pd.DataFrame(
                representations,
                columns=["identity",
                         "%s_representation" % (model_name)])
        else:  #ensemble learning

            columns = ['identity']
            [columns.append('%s_representation' % i) for i in model_names]

            df = pd.DataFrame(representations, columns=columns)

        df_base = df.copy(
        )  #df will be filtered in each img. we will restore it for the next item.

        resp_obj = []

        global_pbar = tqdm(range(0, len(img_paths)), desc='Analyzing')
        for j in global_pbar:
            img_path = img_paths[j]

            #find representation for passed image

            for j in model_names:
                custom_model = models[j]

                #--------------------------------
                #decide input shape
                input_shape = functions.find_input_shape(custom_model)

                #--------------------------------

                img = functions.preprocess_face(
                    img=img_path,
                    target_size=input_shape,
                    enforce_detection=enforce_detection,
                    detector_backend=detector_backend)

                target_representation = custom_model.predict(img)[0, :]

                for k in metric_names:
                    distances = []
                    for index, instance in df.iterrows():
                        source_representation = instance["%s_representation" %
                                                         (j)]

                        if k == 'cosine':
                            distance = dst.findCosineDistance(
                                source_representation, target_representation)
                        elif k == 'euclidean':
                            distance = dst.findEuclideanDistance(
                                source_representation, target_representation)
                        elif k == 'euclidean_l2':
                            distance = dst.findEuclideanDistance(
                                dst.l2_normalize(source_representation),
                                dst.l2_normalize(target_representation))

                        distances.append(distance)

                    #---------------------------

                    if model_name == 'Ensemble' and j == 'OpenFace' and k == 'euclidean':
                        continue
                    else:
                        df["%s_%s" % (j, k)] = distances

                        if model_name != 'Ensemble':
                            threshold = functions.findThreshold(j, k)
                            df = df.drop(columns=["%s_representation" % (j)])
                            df = df[df["%s_%s" % (j, k)] <= threshold]

                            df = df.sort_values(
                                by=["%s_%s" % (j, k)],
                                ascending=True).reset_index(drop=True)

                            resp_obj.append(df)
                            df = df_base.copy(
                            )  #restore df for the next iteration

            #----------------------------------

            if model_name == 'Ensemble':

                feature_names = []
                for j in model_names:
                    for k in metric_names:
                        if model_name == 'Ensemble' and j == 'OpenFace' and k == 'euclidean':
                            continue
                        else:
                            feature = '%s_%s' % (j, k)
                            feature_names.append(feature)

                #print(df.head())

                x = df[feature_names].values

                #--------------------------------------

                boosted_tree = Boosting.build_gbm()

                y = boosted_tree.predict(x)

                verified_labels = []
                scores = []
                for i in y:
                    verified = np.argmax(i) == 1
                    score = i[np.argmax(i)]

                    verified_labels.append(verified)
                    scores.append(score)

                df['verified'] = verified_labels
                df['score'] = scores

                df = df[df.verified == True]
                #df = df[df.score > 0.99] #confidence score
                df = df.sort_values(by=["score"],
                                    ascending=False).reset_index(drop=True)
                df = df[['identity', 'verified', 'score']]

                resp_obj.append(df)
                df = df_base.copy()  #restore df for the next iteration

            #----------------------------------

        toc = time.time()

        print("find function lasts ", toc - tic, " seconds")

        if len(resp_obj) == 1:
            return resp_obj[0]

        return resp_obj

    else:
        raise ValueError("Passed db_path does not exist!")

    return None
示例#9
0
def verify(img1_path,
           img2_path='',
           model_name='VGG-Face',
           distance_metric='cosine',
           model=None,
           enforce_detection=True,
           detector_backend='mtcnn'):

    tic = time.time()

    img_list, bulkProcess = initialize_input(img1_path, img2_path)
    functions.initialize_detector(detector_backend=detector_backend)

    resp_objects = []

    #--------------------------------

    if model_name == 'Ensemble':
        model_names = ["VGG-Face", "Facenet", "OpenFace", "DeepFace"]
        metrics = ["cosine", "euclidean", "euclidean_l2"]
    else:
        model_names = []
        metrics = []
        model_names.append(model_name)
        metrics.append(distance_metric)

    #--------------------------------
    #ensemble learning disabled

    if model == None:
        if model_name == 'Ensemble':
            models = Boosting.loadModel()
        else:
            model = build_model(model_name)
            models = {}
            models[model_name] = model
    else:
        if model_name == 'Ensemble':
            Boosting.validate_model(model)
        else:
            models = {}
            models[model_name] = model

    #------------------------------

    #calling deepface in a for loop causes lots of progress bars. this prevents it.
    disable_option = False if len(img_list) > 1 else True

    pbar = tqdm(range(0, len(img_list)),
                desc='Verification',
                disable=disable_option)

    for index in pbar:

        instance = img_list[index]

        if type(instance) == list and len(instance) >= 2:
            img1_path = instance[0]
            img2_path = instance[1]

            ensemble_features = []

            for i in model_names:
                custom_model = models[i]

                #decide input shape
                input_shape = functions.find_input_shape(custom_model)
                input_shape_x = input_shape[0]
                input_shape_y = input_shape[1]

                #----------------------
                #detect and align faces

                img1 = functions.preprocess_face(
                    img=img1_path,
                    target_size=(input_shape_y, input_shape_x),
                    enforce_detection=enforce_detection,
                    detector_backend=detector_backend)

                img2 = functions.preprocess_face(
                    img=img2_path,
                    target_size=(input_shape_y, input_shape_x),
                    enforce_detection=enforce_detection,
                    detector_backend=detector_backend)

                #----------------------
                #find embeddings

                img1_representation = custom_model.predict(img1)[0, :]
                img2_representation = custom_model.predict(img2)[0, :]

                #----------------------
                #find distances between embeddings

                for j in metrics:

                    if j == 'cosine':
                        distance = dst.findCosineDistance(
                            img1_representation, img2_representation)
                    elif j == 'euclidean':
                        distance = dst.findEuclideanDistance(
                            img1_representation, img2_representation)
                    elif j == 'euclidean_l2':
                        distance = dst.findEuclideanDistance(
                            dst.l2_normalize(img1_representation),
                            dst.l2_normalize(img2_representation))
                    else:
                        raise ValueError("Invalid distance_metric passed - ",
                                         distance_metric)

                    #----------------------
                    #decision

                    if model_name != 'Ensemble':

                        threshold = functions.findThreshold(i, j)

                        if distance <= threshold:
                            identified = True
                        else:
                            identified = False

                        resp_obj = {
                            "verified": identified,
                            "distance": distance,
                            "max_threshold_to_verify": threshold,
                            "model": model_name,
                            "similarity_metric": distance_metric
                        }

                        if bulkProcess == True:
                            resp_objects.append(resp_obj)
                        else:
                            return resp_obj

                    else:  #Ensemble

                        #this returns same with OpenFace - euclidean_l2
                        if i == 'OpenFace' and j == 'euclidean':
                            continue
                        else:
                            ensemble_features.append(distance)

            #----------------------

            if model_name == 'Ensemble':

                boosted_tree = Boosting.build_gbm()

                prediction = boosted_tree.predict(
                    np.expand_dims(np.array(ensemble_features), axis=0))[0]

                verified = np.argmax(prediction) == 1
                score = prediction[np.argmax(prediction)]

                resp_obj = {
                    "verified": verified,
                    "score": score,
                    "distance": ensemble_features,
                    "model": ["VGG-Face", "Facenet", "OpenFace", "DeepFace"],
                    "similarity_metric":
                    ["cosine", "euclidean", "euclidean_l2"]
                }

                if bulkProcess == True:
                    resp_objects.append(resp_obj)
                else:
                    return resp_obj

            #----------------------

        else:
            raise ValueError("Invalid arguments passed to verify function: ",
                             instance)

    #-------------------------

    toc = time.time()

    #print("identification lasts ",toc-tic," seconds")

    if bulkProcess == True:

        resp_obj = {}

        for i in range(0, len(resp_objects)):
            resp_item = resp_objects[i]
            resp_obj["pair_%d" % (i + 1)] = resp_item

        return resp_obj
示例#10
0
def verify(img1_path,
           img2_path='',
           model_name='VGG-Face',
           distance_metric='cosine',
           model=None,
           enforce_detection=True):

    tic = time.time()

    if type(img1_path) == list:
        bulkProcess = True
        img_list = img1_path.copy()
    else:
        bulkProcess = False
        img_list = [[img1_path, img2_path]]

    #------------------------------

    if model == None:
        if model_name == 'VGG-Face':
            print("Using VGG-Face model backend and", distance_metric,
                  "distance.")
            model = VGGFace.loadModel()

        elif model_name == 'OpenFace':
            print("Using OpenFace model backend", distance_metric, "distance.")
            model = OpenFace.loadModel()

        elif model_name == 'Facenet':
            print("Using Facenet model backend", distance_metric, "distance.")
            model = Facenet.loadModel()

        elif model_name == 'DeepFace':
            print("Using FB DeepFace model backend", distance_metric,
                  "distance.")
            model = FbDeepFace.loadModel()

        else:
            raise ValueError("Invalid model_name passed - ", model_name)
    else:  #model != None
        print("Already built model is passed")

    #------------------------------
    #face recognition models have different size of inputs
    input_shape = model.layers[0].input_shape[1:3]

    #------------------------------

    #tuned thresholds for model and metric pair
    threshold = functions.findThreshold(model_name, distance_metric)

    #------------------------------
    pbar = tqdm(range(0, len(img_list)), desc='Verification')

    resp_objects = []

    #for instance in img_list:
    for index in pbar:

        instance = img_list[index]

        if type(instance) == list and len(instance) >= 2:
            img1_path = instance[0]
            img2_path = instance[1]

            #----------------------
            #crop and align faces

            img1 = functions.detectFace(img1_path,
                                        input_shape,
                                        enforce_detection=enforce_detection)
            img2 = functions.detectFace(img2_path,
                                        input_shape,
                                        enforce_detection=enforce_detection)

            #----------------------
            #find embeddings

            img1_representation = model.predict(img1)[0, :]
            img2_representation = model.predict(img2)[0, :]

            #----------------------
            #find distances between embeddings

            if distance_metric == 'cosine':
                distance = dst.findCosineDistance(img1_representation,
                                                  img2_representation)
            elif distance_metric == 'euclidean':
                distance = dst.findEuclideanDistance(img1_representation,
                                                     img2_representation)
            elif distance_metric == 'euclidean_l2':
                distance = dst.findEuclideanDistance(
                    dst.l2_normalize(img1_representation),
                    dst.l2_normalize(img2_representation))
            else:
                raise ValueError("Invalid distance_metric passed - ",
                                 distance_metric)

            #----------------------
            #decision

            if distance <= threshold:
                identified = "true"
            else:
                identified = "false"

            #----------------------
            #response object

            resp_obj = "{"
            resp_obj += "\"verified\": " + identified
            resp_obj += ", \"distance\": " + str(distance)
            resp_obj += ", \"max_threshold_to_verify\": " + str(threshold)
            resp_obj += ", \"model\": \"" + model_name + "\""
            resp_obj += ", \"similarity_metric\": \"" + distance_metric + "\""
            resp_obj += "}"

            resp_obj = json.loads(resp_obj)  #string to json

            if bulkProcess == True:
                resp_objects.append(resp_obj)
            else:
                #K.clear_session()
                return resp_obj
            #----------------------

        else:
            raise ValueError("Invalid arguments passed to verify function: ",
                             instance)

    #-------------------------

    toc = time.time()

    #print("identification lasts ",toc-tic," seconds")

    if bulkProcess == True:
        resp_obj = "{"

        for i in range(0, len(resp_objects)):
            resp_item = json.dumps(resp_objects[i])

            if i > 0:
                resp_obj += ", "

            resp_obj += "\"pair_" + str(i + 1) + "\": " + resp_item
        resp_obj += "}"
        resp_obj = json.loads(resp_obj)
        return resp_obj
    input_shape = model.layers[0].input_shape
    if type(input_shape) == list:
        input_shape = input_shape[0][1:3]
    else:
        input_shape = input_shape[1:3]

    input_shape_x = input_shape[0]
    input_shape_y = input_shape[1]

    #accuracy calculation for positive samples
    log_file.write(
        "Accuracy Calculation for Positive Samples (Euclidian L2 distance used) \n"
    )

    threshold = functions.findThreshold(models[i], distance_metric)
    for detector in detector_backend_list:
        true_count = 0
        total_sample_count = 0
        no_face_count = 0
        start = time.time()
        log_file.write("Using " + detector + " backend \n")
        for imageName in os.listdir(base_path + anchor_path):
            input_folder = base_path + imageName.split('_anchor')[0]
            for sampleImage in os.listdir(input_folder + '\\'):
                print(sampleImage)
                input_foto_path = input_folder + '\\' + sampleImage
                anchor_foto_path = base_path + anchor_path + imageName
                #----------------------

                img1, face_flag = functions.preprocess_face(
示例#12
0
def find(img_path,
         db_path,
         model_name='VGG-Face',
         distance_metric='cosine',
         model=None,
         enforce_detection=True):

    tic = time.time()

    if type(img_path) == list:
        bulkProcess = True
        img_paths = img_path.copy()
    else:
        bulkProcess = False
        img_paths = [img_path]

    if os.path.isdir(db_path) == True:

        #---------------------------------------

        if model == None:
            if model_name == 'VGG-Face':
                print("Using VGG-Face model backend and", distance_metric,
                      "distance.")
                model = VGGFace.loadModel()
            elif model_name == 'OpenFace':
                print("Using OpenFace model backend", distance_metric,
                      "distance.")
                model = OpenFace.loadModel()
            elif model_name == 'Facenet':
                print("Using Facenet model backend", distance_metric,
                      "distance.")
                model = Facenet.loadModel()
            elif model_name == 'DeepFace':
                print("Using FB DeepFace model backend", distance_metric,
                      "distance.")
                model = FbDeepFace.loadModel()
            else:
                raise ValueError("Invalid model_name passed - ", model_name)
        else:  #model != None
            print("Already built model is passed")

        input_shape = model.layers[0].input_shape[1:3]
        threshold = functions.findThreshold(model_name, distance_metric)

        #---------------------------------------

        file_name = "representations_%s.pkl" % (model_name)
        file_name = file_name.replace("-", "_").lower()

        if path.exists(db_path + "/" + file_name):

            print(
                "WARNING: Representations for images in ", db_path,
                " folder were previously stored in ", file_name,
                ". If you added new instances after this file creation, then please delete this file and call find function again. It will create it again."
            )

            f = open(db_path + '/' + file_name, 'rb')
            representations = pickle.load(f)

            print("There are ", len(representations),
                  " representations found in ", file_name)

        else:
            employees = []

            for r, d, f in os.walk(
                    db_path):  # r=root, d=directories, f = files
                for file in f:
                    if ('.jpg' in file):
                        exact_path = r + "/" + file
                        employees.append(exact_path)

            if len(employees) == 0:
                raise ValueError("There is no image in ", db_path, " folder!")

            #------------------------
            #find representations for db images

            representations = []

            pbar = tqdm(range(0, len(employees)),
                        desc='Finding representations')

            #for employee in employees:
            for index in pbar:
                employee = employees[index]
                img = functions.detectFace(employee,
                                           input_shape,
                                           enforce_detection=enforce_detection)
                representation = model.predict(img)[0, :]

                instance = []
                instance.append(employee)
                instance.append(representation)

                representations.append(instance)

            f = open(db_path + '/' + file_name, "wb")
            pickle.dump(representations, f)
            f.close()

            print(
                "Representations stored in ", db_path, "/", file_name,
                " file. Please delete this file when you add new identities in your database."
            )

        #----------------------------
        #we got representations for database
        df = pd.DataFrame(representations,
                          columns=["identity", "representation"])
        df_base = df.copy()

        resp_obj = []

        global_pbar = tqdm(range(0, len(img_paths)), desc='Analyzing')
        for j in global_pbar:
            img_path = img_paths[j]

            #find representation for passed image
            img = functions.detectFace(img_path,
                                       input_shape,
                                       enforce_detection=enforce_detection)
            target_representation = model.predict(img)[0, :]

            distances = []
            for index, instance in df.iterrows():
                source_representation = instance["representation"]

                if distance_metric == 'cosine':
                    distance = dst.findCosineDistance(source_representation,
                                                      target_representation)
                elif distance_metric == 'euclidean':
                    distance = dst.findEuclideanDistance(
                        source_representation, target_representation)
                elif distance_metric == 'euclidean_l2':
                    distance = dst.findEuclideanDistance(
                        dst.l2_normalize(source_representation),
                        dst.l2_normalize(target_representation))
                else:
                    raise ValueError("Invalid distance_metric passed - ",
                                     distance_metric)

                distances.append(distance)

            df["distance"] = distances
            df = df.drop(columns=["representation"])
            df = df[df.distance <= threshold]

            df = df.sort_values(by=["distance"],
                                ascending=True).reset_index(drop=True)
            resp_obj.append(df)
            df = df_base.copy()  #restore df for the next iteration

        toc = time.time()

        print("find function lasts ", toc - tic, " seconds")

        if len(resp_obj) == 1:
            return resp_obj[0]

        return resp_obj

    else:
        raise ValueError("Passed db_path does not exist!")

    return None