def prepare_database1(model):
    database = {}
    for file in glob.glob("diseases/*"):
        identity = os.path.splitext(os.path.basename(file))[0]
        database[identity] = img_to_encoding(file, model)

    return database
示例#2
0
def verify(image_path, identity, database, model):
    """
    Function that verifies if the person on the "image_path" image is "identity".

    Arguments:
    image_path -- path to an image
    identity -- string, name of the person you'd like to verify the identity. Has to be a resident of the Happy house.
    database -- python dictionary mapping names of allowed people's names (strings) to their encodings (vectors).
    model -- your Inception model instance in Keras

    Returns:
    dist -- distance between the image_path and the image of "identity" in the database.
    door_open -- True, if the door should open. False otherwise.
    """

    ### START CODE HERE ###

    # Step 1: Compute the encoding for the image. Use img_to_encoding() see example above. (≈ 1 line)
    encoding = img_to_encoding(image_path, model)

    # Step 2: Compute distance with identity's image (≈ 1 line)
    dist = np.linalg.norm(database[identity] - encoding)

    # Step 3: Open the door if dist < 0.7, else don't open (≈ 3 lines)
    if dist < 0.7:
        print("It's " + str(identity) + ", welcome home!")
        door_open = True
    else:
        print("It's not " + str(identity) + ", please go away")
        door_open = False

    ### END CODE HERE ###

    return dist, door_open
示例#3
0
    def verify(self, image_path, identity, database, threshold=None):
        """
        Function that verifies if the person on the "image_path" image is "identity".

        Arguments:
        image_path -- path to an image
        identity -- string, name of the person you'd like to verify the identity. Has to be a resident of the Happy house.
        database -- python dictionary mapping names of allowed people's names (strings) to their encodings (vectors).
        model -- your Inception model instance in Keras

        Returns:
        dist -- distance between the image_path and the image of "identity" in the database.
        door_open -- True, if the door should open. False otherwise.
        """

        if threshold is None:
            threshold = 0.7

        # Step 1: Compute the encoding for the image. Use img_to_encoding() see example above. (≈ 1 line)
        encoding = img_to_encoding(image_path, self.model)

        # Step 2: Compute distance with identity's image (≈ 1 line)
        dist = float(np.linalg.norm(encoding - database[identity]))

        # Step 3: Open the door if dist < threshold, else don't open (≈ 3 lines)
        if dist < threshold:
            print("It's " + str(identity))
            is_valid = True
        else:
            print("It's not " + str(identity))
            is_valid = False

        return dist, is_valid
示例#4
0
def calculate_score_for_images(model,
                               feature_encoding,
                               test_images_folder,
                               detailed_print=False):
    database = {}
    database["feature"] = feature_encoding

    test_images_files = get_files_from_folder(test_images_folder)
    print(
        f"found {len(test_images_files)} files in folder {test_images_folder}")

    for test_image_file in test_images_files:
        test_image_key = os.path.basename(test_image_file)
        database[test_image_key] = img_to_encoding(test_image_file, model)

    if detailed_print:
        print("database:")
        print(database)

    all_scores = []
    for index, key in enumerate(database):
        score, threshold_status = verify(feature_encoding, key, database,
                                         model)
        all_scores.append((key, score))
    return all_scores
示例#5
0
def verify(image_path: str, identity: str, database: dict, model,
           threshold: float) -> tuple:
    """Verify if the person on the "image_path" image is "identity".

    Arguments:
    image_path -- path to an image
    identity -- string, name of the person you'd like to verify the identity
    database -- mapping of allowed people's names to their encodings
    model -- your Inception model instance in Keras
    threshold -- cut-off for positive/negative match

    Returns:
    dist -- distance between the image_path and the image of "identity" in the database
    positive_match -- True, if positive match. False otherwise
    """
    # Compute the encoding for the image
    encoding = img_to_encoding(image_path, model)

    # Compute distance with identity's image
    dist = np.linalg.norm(encoding - database[identity])
    print(dist)

    # Positive match if dist < threshold, else negative match
    if dist < threshold:
        print("It's " + str(identity) + ", welcome home!")
        positive_match = True
    else:
        print("It's not " + str(identity) + ", please go away")
        positive_match = False

    return dist, positive_match
 def predict(self, image):
     image = resize_image(image)
     image_embedding = img_to_encoding(np.array([image]), facenet)
     label = self.model.predict(
         image_embedding
     )  # predict方法返回值是array of shape [n_samples],因此下面要用label[0]从array中取得数值,https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html#sklearn.neighbors.KNeighborsClassifier.predict
     return label[0]
示例#7
0
def who_is_it(image_path, database, model):
    """
    Implements face recognition for the happy house by finding who is the person on the image_path image.

    Arguments:
    image_path -- path to an image
    database -- database containing image encodings along with the name of the person on the image
    model -- your Inception model instance in Keras

    Returns:
    min_dist -- the minimum distance between image_path encoding and the encodings from the database
    identity -- string, the name prediction for the person on image_path
    """

    encoding = img_to_encoding(image_path, model)
    min_dist = 100

    for (name, db_enc) in database.items():
        dist = np.linalg.norm(encoding - db_enc)
        if dist < min_dist:
            min_dist = dist
            identity = name

    if min_dist > 0.7:
        print("Not in the database.")
    else:
        print("it's " + str(identity) + ", the distance is " + str(min_dist))

    return min_dist, identity
示例#8
0
def verify(image_path, identity, database, model):
    """
    对“identity”与“image_path”的编码进行验证。

    参数:
        image_path -- 摄像头的图片。
        identity -- 字符类型,想要验证的人的名字。
        database -- 字典类型,包含了成员的名字信息与对应的编码。
        model -- 在Keras的模型的实例。

    返回:
        dist -- 摄像头的图片与数据库中的图片的编码的差距。
        is_open_door -- boolean,是否该开门。
    """
    #第一步:计算图像的编码,使用fr_utils.img_to_encoding()来计算。
    encoding = fr_utils.img_to_encoding(image_path, model)

    #第二步:计算与数据库中保存的编码的差距
    dist = np.linalg.norm(encoding - database[identity])

    #第三步:判断是否打开门
    if dist < 0.7:
        print("欢迎 " + str(identity) + "回家!")
        is_door_open = True
    else:
        print("经验证,您与" + str(identity) + "不符!")
        is_door_open = False

    return dist, is_door_open
示例#9
0
def verify(image_path, identity, database, model):
    """
    Function that verifies if the person on the "image_path" image is "identity".
    
    Arguments:
    image_path -- path to an image
    identity -- string, name of the person you'd like to verify the identity. Has to be an employee who works in the office.
    database -- python dictionary mapping names of allowed people's names (strings) to their encodings (vectors).
    model -- your Inception model instance in Keras
    
    Returns:
    dist -- distance between the image_path and the image of "identity" in the database.
    door_open -- True, if the door should open. False otherwise.
    """

    # Compute the encoding for the image
    encoding = img_to_encoding(image_path, model)

    # Compute distance with identity's image
    dist = np.linalg.norm(encoding - database[identity])

    # Open the door if dist < 0.7, else don't open
    if dist < 0.7:
        print("It's " + str(identity) + ", welcome in!")
        door_open = True
    else:
        print("It's not " + str(identity) + ", please go away")
        door_open = False

    return dist, door_open
示例#10
0
def prepare_database(model):

    database = {}
    for file in glob.glob("images/*"):
        identity = os.path.splitext(os.path.basename(file))[0]
        database[identity] = img_to_encoding(cv2.imread(file, 1), model)

    return database
示例#11
0
def compare(model):
	global ls
	print(model)
	embedding1 = img_to_encoding("/Users/prituldave/pritul_E_drive/SIHv5/1.png",model)
	global flag2
	os.chdir("faces")
	x = (os.listdir())
	min_dist = 100.0
	output_name = " "
	for names in x:
		if names == ".DS_Store":
			continue
		os.chdir(names)

		file_images = os.listdir()
		
		sum = 0
		cnt = 0
		for _images in file_images:
			if _images == ".DS_Store":
				continue
			embedding2 = img_to_encoding(_images,model)
			dist = np.linalg.norm(embedding2 - embedding1)
			#sum = sum + dist
			#cnt = cnt + 1

		if dist<min_dist:

			output_name = names
			min_dist = dist

		os.chdir("..")
	os.chdir("..")
	print("recognized person is ",output_name)
	print("min distance is ",min_dist)
	if(min_dist>=0.60):
		output_name = "unknown"
		ls.append(output_name)
		print("unknown")
	else:
		output_name = "known"
		ls.append(output_name)
		print("known")
	#if output_name == "unknown":
	#	print("unknown")
	return output_name
示例#12
0
 def load(self, img_rows = IMAGE_SIZE, img_cols = IMAGE_SIZE, img_channels = 3, model = facenet):
     
     images, labels = load_dataset(self.path_name)
     
     X_embedding = img_to_encoding(images, model)
    
     print('X_train shape', X_embedding.shape)
     print('y_train shape', labels.shape)
     print(X_embedding.shape[0], 'train samples')
    
     self.X_train = X_embedding
     self.y_train = labels
示例#13
0
def compute_sim(image_path_1, image_path_2, model):
    """
    Function that verifies if the person on the "image_path" image is "identity".

    Arguments:
    image_path_1 -- path to an image1
    image_path_2 -- path to an image2
    model -- your Inception model instance in Keras

    Returns:
    dist -- distance between the image_path_1 and image_path_2.
    """

    # Step 1: Compute the encoding for the image. Use img_to_encoding() see example above. (≈ 1 line)
    encoding_1 = img_to_encoding(image_path_1, model)
    encoding_2 = img_to_encoding(image_path_2, model)

    # Step 2: Compute distance with identity's image (≈ 1 line)
    dist = np.linalg.norm(encoding_1 - encoding_2)

    return dist
def recognise_face2(imagepath: object, database: object, model: object) -> object:
    encoding = img_to_encoding(imagepath, model)

    i = 0

    for (name, db_enc) in database.items():
        dist1 = np.linalg.norm(db_enc - encoding)
        a1[i] = dist1
        i = i + 1
        print(dist1)

    return a1
示例#15
0
def main():
    start_time = time.time()
    np.set_printoptions(threshold=np.nan)
    FRmodel = faceRecoModel(input_shape=(3, 96, 96))
    print("Total Params:", FRmodel.count_params())
    FRmodel.compile(optimizer='adam', loss=triplet_loss, metrics=['accuracy'])
    load_weights_from_FaceNet(FRmodel)

    # reate database
    database = dict()
    database["younes"] = img_to_encoding("images/younes.jpg", FRmodel)
    database["kevin"] = img_to_encoding("images/kevin.jpg", FRmodel)
    database["andrew"] = img_to_encoding("images/andrew.jpg", FRmodel)
    database["sebastiano"] = img_to_encoding("images/sebastiano.jpg", FRmodel)
    database["moi"] = img_to_encoding("images/moi3.jpg", FRmodel)

    verify("images/camera_0.jpg", "younes", database, FRmodel)

    print('')
    print("---- camera_0 ----")
    who_is_it("images/camera_0.jpg", database, FRmodel)

    print('')
    print("---- moi1 ----")
    who_is_it("images/moi1.jpg", database, FRmodel, display=True)

    print('')
    print("---- moi2 ----")
    who_is_it("images/moi2.jpg", database, FRmodel, display=True)

    print('')
    print("---- moi4 ----")
    who_is_it("images/moi4.jpg", database, FRmodel, display=True)

    print('')
    print("---- moi5 ----")
    who_is_it("images/moi5.jpg", database, FRmodel, display=True)

    print('')
    print("--- %s seconds ---" % (time.time() - start_time))
示例#16
0
def verify(image_path, identity, database, model):

    encoding = fr_utils.img_to_encoding(image_path, model)

    dist = np.linalg.norm(encoding - database[identity])

    if dist < 0.7:
        print("欢迎" + str(identity) + "回家!")
        is_door_open = True
    else:
        print("经验证, 您与" + str(identity) + "不符!")
        is_door_open = False

    return dist, is_door_open
def recognise_face(imagepath, database, model):
    encoding = img_to_encoding(imagepath, model)
    identity = None
    min_dist = 100
    for (name, db_enc) in database.items():
        dist = np.linalg.norm(db_enc - encoding)
        print('distance for %s is %s' % (name, dist))
        if dist < min_dist:
            min_dist = dist
            identity = name

    if min_dist > 0.6:
        #speak('cant recognisethe face', 2)
        print("cant recognise the face")
        return str(0)
    else:
        return str(identity)
 def load(self,
          img_rows=IMAGE_SIZE,
          img_cols=IMAGE_SIZE,
          img_channels=3,
          model=facenet):
     # 加载数据集到内存
     images, labels = load_dataset(self.path_name)
     # 生成128维特征向量
     X_embedding = img_to_encoding(
         images, model
     )  # 考虑这里分批执行,否则可能内存不够,这里在img_to_encoding函数里通过predict的batch_size参数实现
     # 输出训练集、验证集和测试集的数量
     print('X_train shape', X_embedding.shape)
     print('y_train shape', labels.shape)
     print(X_embedding.shape[0], 'train samples')
     # 这里对X_train就不再进一步normalization了,因为已经在facenet里有了l2_norm
     self.X_train = X_embedding
     self.y_train = labels
示例#19
0
    def who_is_it(self, image_path, database, threshold=None):
        """
        Implements face recognition for the happy house by finding who is the person on the image_path image.

        Arguments:
        image_path -- path to an image
        database -- database containing image encodings along with the name of the person on the image
        model -- your Inception model instance in Keras

        Returns:
        min_dist -- the minimum distance between image_path encoding and the encodings from the database
        identity -- string, the name prediction for the person on image_path
        """

        if threshold is None:
            threshold = 0.7

        ## Step 1: Compute the target "encoding" for the image. Use img_to_encoding() see example above. ## (≈ 1 line)
        encoding = img_to_encoding(image_path, self.model)

        ## Step 2: Find the closest encoding ##

        # Initialize "min_dist" to a large value, say 100 (≈1 line)
        min_dist = 100
        identity = None

        # Loop over the database dictionary's names and encodings.
        for (name, db_enc) in database.items():

            # Compute L2 distance between the target "encoding" and the current "emb" from the database. (≈ 1 line)
            dist = np.linalg.norm(db_enc - encoding)

            # If this distance is less than the min_dist, then set min_dist to dist, and identity to name. (≈ 3 lines)
            if dist < min_dist:
                min_dist = dist
                identity = name

        if min_dist > threshold:
            print("Not in the database.")
        else:
            print("it's " + str(identity) + ", the distance is " +
                  str(min_dist))

        return min_dist, identity
def recognise_face1(imagepath: object, database: object, model: object) -> object:
    encoding = img_to_encoding(imagepath, model)
    identity = None
    min_dist = 100

    for (name, db_enc) in database.items():

        dist = np.linalg.norm(db_enc - encoding)

        if dist < min_dist:
            min_dist = dist
            identity = name

    if min_dist > 0.3:

        return str(0)

    else:
        return str(identity)
示例#21
0
    def recognize_face(self, face_descriptor):
        encoding = fr_utils.img_to_encoding(face_descriptor, self.FRmodel)
        min_dist = 100
        identity = None

        # Loop over the database dictionary's names and encodings.
        for (name, db_enc) in self.database.items():

            # Compute L2 distance between the target "encoding" and the current "emb" from the database.
            dist = np.linalg.norm(db_enc - encoding)

            print('distance for %s is %s' % (name, dist))

            # If this distance is less than the min_dist, then set min_dist to dist, and identity to name
            if dist < min_dist:
                min_dist = dist
                identity = name

        return identity, min_dist
示例#22
0
def who_is_it(image_path, database, model):

    encoding = fr_utils.img_to_encoding(image_path, model)

    min_dist = 100

    for (name, db_enc) in database.items():  # 字典的名字和对应的内容

        dist = np.linalg.norm(encoding - db_enc)

        if dist < min_dist:
            min_dist = dist
            identity = name

    if min_dist > 0.7:
        print("抱歉, 您的信息不在数据库中。")
    else:
        print("姓名" + str(identity) + "  差距:" + str(min_dist))

    return min_dist, identity
示例#23
0
def recognise(image_path: str, database: dict, model,
              threshold: float) -> tuple:
    """Implement face recognition against a database.

    Arguments:
    image_path -- path to an image
    database -- database containing image encodings along with name of person on the image
    model -- your Inception model instance in Keras
    threshold -- prediction cut-off been positive and negative

    Returns:
    min_dist -- the minimum distance between image_path encoding and encodings from the db
    identity -- string, the name prediction for the person on image_path
    """
    # Compute the target "encoding" for the image
    encoding = img_to_encoding(image_path, model)

    # Initialize "min_dist"
    min_dist = 100

    identity = ""
    # Loop over the database dictionary's names and encodings
    for (name, db_enc) in database.items():
        print(name)

        # Compute L2 distance between the target "encoding" and the current "enc"
        dist = np.linalg.norm(encoding - db_enc)
        print(dist)

        # If distance less than the min_dist, set min_dist to dist, and identity to name
        if dist < min_dist:
            min_dist = dist
            identity = name

    if min_dist > threshold:
        print("Not in the database.")
    else:
        print("it's " + str(identity) + ", the distance is " + str(min_dist))

    return min_dist, identity
示例#24
0
def who_is_it(image_path, database, model):
    """
    根据指定的图片来进行人脸识别

    参数:
        images_path -- 图像地址
        database -- 包含了名字与编码的字典
        model -- 在Keras中的模型的实例。

    返回:
        min_dist -- 在数据库中与指定图像最相近的编码。
        identity -- 字符串类型,与min_dist编码相对应的名字。
    """
    #步骤1:计算指定图像的编码,使用fr_utils.img_to_encoding()来计算。
    encoding = fr_utils.img_to_encoding(image_path, model)

    #步骤2 :找到最相近的编码
    ## 初始化min_dist变量为足够大的数字,这里设置为100
    min_dist = 100

    ## 遍历数据库找到最相近的编码
    for (name, db_enc) in database.items():
        ### 计算目标编码与当前数据库编码之间的L2差距。
        dist = np.linalg.norm(encoding - db_enc)

        ### 如果差距小于min_dist,那么就更新名字与编码到identity与min_dist中。
        if dist < min_dist:
            min_dist = dist
            identity = name

    # 判断是否在数据库中
    if min_dist > 0.7:
        print("抱歉,您的信息不在数据库中。")

    else:
        print("姓名" + str(identity) + "  差距:" + str(min_dist))

    return min_dist, identity
示例#25
0
def who_is_it(image_path, database, model):
    """
    Implements face recognition for the office by finding who is the person on the image_path image.
    
    Arguments:
    image_path -- path to an image
    database -- database containing image encodings along with the name of the person on the image
    model -- your Inception model instance in Keras
    
    Returns:
    min_dist -- the minimum distance between image_path encoding and the encodings from the database
    identity -- string, the name prediction for the person on image_path
    """

    # Compute the target "encoding" for the image
    encoding = img_to_encoding(image_path, model)

    # Initialize "min_dist" to a large value, say 100
    min_dist = 100

    # Loop over the database dictionary's names and encodings
    for (name, db_enc) in database.items():

        # Compute L2 distance between the target "encoding" and the current db_enc from the database
        dist = np.linalg.norm(encoding - db_enc)

        # If this distance is less than the min_dist, then set min_dist to dist, and identity to name
        if dist < min_dist:
            min_dist = dist
            identity = name

    if min_dist > 0.7:
        print("Not in the database.")
    else:
        print("it's " + str(identity) + ", the distance is " + str(min_dist))

    return min_dist, identity
def detect_face(video_capture, threshold, face_cascade, FR_model,
                face_database):
    while True:
        ret, frame = video_capture.read()
        frame = cv2.flip(frame, 1)

        faces = face_cascade.detectMultiScale(frame, 1.3, 5)
        for (x, y, w, h) in faces:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 0), 2)
            roi = frame[y:y + h, x:x + w]
            encoding = img_to_encoding(roi, FR_model)
            min_dist = 100
            identity = None

            for (name, encoded_image_name) in face_database.items():
                dist = np.linalg.norm(encoding - encoded_image_name)
                if (dist < min_dist):
                    min_dist = dist
                    identity = name
                print('Min dist: ', min_dist)

            if min_dist < 0.1:
                cv2.putText(frame, "Face : " + identity[:-1], (x, y - 50),
                            cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)
                cv2.putText(frame, "Dist : " + str(min_dist), (x, y - 20),
                            cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)
            else:
                cv2.putText(frame, 'No matching faces', (x, y - 20),
                            cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 0, 255), 2)

        cv2.imshow('Face Recognition System', frame)
        if (cv2.waitKey(1) & 0xFF == ord('q')):
            break

    video_capture.release()
    cv2.destroyAllWindows()
示例#27
0
def who_is_it(image, database, model):
	"""
	Implements face recognition for the happy house by finding who is the person on the image_path image.

	Arguments:
		image_path -- path to an image
		database -- database containing image encodings along with the name of the person on the image
		model -- your Inception model instance in Keras

	Returns:
		min_dist -- the minimum distance between image_path encoding and the encodings from the database
		identity -- string, the name prediction for the person on image_path
	"""
	encoding = fr_utils.img_to_encoding(image, model)
    
	min_dist = 100
	identity = None
    
	# Loop over the database dictionary's names and encodings.
	for (name, db_enc) in database.items():
        
		# Compute L2 distance between the target "encoding" and the current "emb" from the database.
		dist = np.linalg.norm(db_enc - encoding)

#		print('distance for %s is %s' %(name, dist))

		# If this distance is less than the min_dist, then set min_dist to dist, and identity to name
		if dist < min_dist:
			min_dist = dist
			identity = name
    
	if min_dist > 0.52:
		return None

	print(f'found...name={name}, distance={dist}')
	return str(identity)
示例#28
0

if __name__ == '__main__':
    K.set_image_data_format('channels_first')
    FRmodel = faceRecoModel(input_shape=(3, 96, 96))

    load_weights(FRmodel)

    database = dict()

    resize_image("/Users/markmc/Repos/RecogNet/faces/liis_01.jpg", 96, 96)
    resize_image("/Users/markmc/Repos/RecogNet/faces/mark_01.jpg", 96, 96)
    resize_image("/Users/markmc/Repos/RecogNet/faces/liis_02.jpg", 96, 96)
    resize_image("/Users/markmc/Repos/RecogNet/faces/mark_02.jpg", 96, 96)

    database["lisa"] = img_to_encoding(
        "/Users/markmc/Repos/RecogNet/faces/liis_03.jpg", FRmodel)
    database["mark"] = img_to_encoding(
        "/Users/markmc/Repos/RecogNet/faces/mark_05.jpg", FRmodel)

    verify("/Users/markmc/Repos/RecogNet/faces/mark_02.jpg", "lisa", database,
           FRmodel, 0.1)
    verify("/Users/markmc/Repos/RecogNet/faces/mark_03.jpg", "lisa", database,
           FRmodel, 0.1)

    print('=============liis_01 image for processing =================')
    recognise("/Users/markmc/Repos/RecogNet/faces/liis_01.jpg", database,
              FRmodel, 0.2)
    print('=============liis_02 image for processing =================')
    recognise("/Users/markmc/Repos/RecogNet/faces/liis_02.jpg", database,
              FRmodel, 0.2)
    print('=============liis_03 image for processing =================')
示例#29
0
 def img_to_encoding(self, image_path):
     return img_to_encoding(image_path, self.model)
示例#30
0
    #     neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), axis = 1)
    # Step 3: subtract the two previous distances and add alpha.
    basic_loss = tf.add(tf.subtract(pos_dist, neg_dist), alpha)
    # Step 4: Take the maximum of basic_loss and 0.0. Sum over the training examples.
    loss = tf.reduce_sum(tf.maximum(basic_loss, 0.0))
    ### END CODE HERE ###

    return loss


FRmodel.compile(optimizer='adam', loss=triplet_loss, metrics=['accuracy'])

load_weights_from_FaceNet(FRmodel)

database = {}
database["danielle"] = img_to_encoding("images/danielle.png", FRmodel)
database["younes"] = img_to_encoding("images/younes.jpg", FRmodel)
database["tian"] = img_to_encoding("images/tian.jpg", FRmodel)
database["andrew"] = img_to_encoding("images/andrew.jpg", FRmodel)
database["kian"] = img_to_encoding("images/kian.jpg", FRmodel)
database["dan"] = img_to_encoding("images/dan.jpg", FRmodel)
database["sebastiano"] = img_to_encoding("images/sebastiano.jpg", FRmodel)
database["bertrand"] = img_to_encoding("images/bertrand.jpg", FRmodel)
database["kevin"] = img_to_encoding("images/kevin.jpg", FRmodel)
database["felix"] = img_to_encoding("images/felix.jpg", FRmodel)
database["benoit"] = img_to_encoding("images/benoit.jpg", FRmodel)
database["arnaud"] = img_to_encoding("images/arnaud.jpg", FRmodel)


def verify(image_path, identity, database, model):
    """