def add_user(img_path, group_name):
    """
    Add a single user, save face in user image as feature vector.
    :param img_path: Path to user image
    :param group_name: Name of user group
    :return: None
    """
    db_path = os.path.join(DB_ROOT_DIR, group_name)
    if not os.path.exists(db_path):
        os.makedirs(db_path)

    if not os.path.exists(img_path):
        print("Invalid Image Path: " + img_path + "!")
        return

    extension = img_path.split(".")[-1]
    if extension != "jpg" and extension != "png" and extension != 'jfif':
        print("Invalid Image Path: " + img_path + "!")
        return
    img = imageio.imread(img_path)
    prewhitened = detect_face(img)

    if prewhitened is not None:
        img_name = str(img_path.split("/")[-1].split(".")[0])
        imageio.imsave(os.path.join(db_path, img_name + '.jpg'), prewhitened)
        print(img_name + " Has Been Added!")
def add_user_batch(img_path, group_name):
    """
    Add users in batches, read all images in the path and save faces as feature vectors
    :param img_path: Path to the folder holding user images
    :param group_name: Name of user group
    :return: None
    """
    db_path = os.path.join(DB_ROOT_DIR, group_name)
    if not os.path.exists(db_path):
        os.makedirs(db_path)

    if not os.path.exists(img_path):
        print("Invalid Image Path: " + img_path + "!")
        return

    for image in os.listdir(img_path):
        extension = image.split(".")[-1]
        if extension == "jpg" or extension == "png" or extension == "jfif":
            img = imageio.imread(os.path.join(img_path, image))
            prewhitened = detect_face(img)

            if prewhitened is not None:
                img_name = str(image.split('.')[0])
                imageio.imsave(os.path.join(db_path, img_name + '.jpg'),
                               prewhitened)
                print(str(image.split('.')[0]) + " Has Been Added!")
예제 #3
0
    def find_faces(self, image):
        faces = []

        bounding_boxes, _ = detect_face(image, self.minsize, self.pnet,
                                        self.rnet, self.onet, self.threshold,
                                        self.factor)
        for bb in bounding_boxes:
            face = Face()
            face.container_image = image
            face.bounding_box = np.zeros(4, dtype=np.int32)

            img_size = np.asarray(image.shape)[0:2]
            face.bounding_box[0] = np.maximum(
                bb[0] - self.face_crop_margin / 2, 0)
            face.bounding_box[1] = np.maximum(
                bb[1] - self.face_crop_margin / 2, 0)
            face.bounding_box[2] = np.minimum(
                bb[2] + self.face_crop_margin / 2, img_size[1])
            face.bounding_box[3] = np.minimum(
                bb[3] + self.face_crop_margin / 2, img_size[0])
            cropped = image[face.bounding_box[1]:face.bounding_box[3],
                            face.bounding_box[0]:face.bounding_box[2], :]
            face.image = misc.imresize(
                cropped, (self.face_crop_size, self.face_crop_size),
                interp='bilinear')

            faces.append(face)

        return faces
예제 #4
0
 def read(self):
     file_name, ok = QFileDialog.getOpenFileNames(self, '多文件选择', './')
     if ok:
         if len(file_name)>9:
             local_path = os.path.dirname(os.path.dirname(file_name[0]))
             if os.path.exists(local_path+"/detect_face")!=0 :
                 if os.path.exists( local_path+"/embedding.txt")==0:
                     embedding_image(local_path,self.images_placeholder,self.embeddings,self.phase_train_placeholder,self.sess)
                     train_model(local_path)
             if os.path.exists(local_path+"/detect_face")==0:
                 detect_face(local_path,file_name)
                 embedding_image(local_path,self.images_placeholder,self.embeddings,self.phase_train_placeholder,self.sess)
                 #for roots,dirs,files in os.walk("./new_model"):
                     #print(dirs)
                 #print(os.path.dirname(file_name[0]))
                 train_model(os.path.dirname(file_name[0]))
             QMessageBox.information(self, "Message", "success",QMessageBox.Ok, QMessageBox.Ok)
예제 #5
0
def run(input_image):

    if (graph == None or model == None):
        print("Run setup function once first")
        return []

    faces = detect_face(input_image)
    if (faces == 0):
        print("No face detected")
        return []

    face_input_image = faces[0]
    infer_image = cv2.imread(face_input_image)
    input_vector = utilities.run_inference(infer_image, graph)
    match = utilities.run_image(model, input_vector)
    os.remove(face_input_image)
    return [match]
예제 #6
0
def add_user_batch(img_path, group_name):
    """
    Add users in batches, read all images in the path and save faces as feature vectors
    :param img_path: Path to the folder holding user images
    :param group_name: Name of user group
    :return: None
    """
    db_path = os.path.join(DB_ROOT_DIR, group_name)
    if not os.path.exists(db_path):
        os.makedirs(db_path)

    if not os.path.exists(img_path):
        print("Invalid Image Path: " + img_path + "!")
        return

    for image in os.listdir(img_path):

        extension = image.split(".")[-1]
        if extension == "jpg" or extension == "png" or extension == "jfif":
            img = cv2.cvtColor(cv2.imread(os.path.join(img_path, image)), cv2.COLOR_BGR2RGB)

            face_locations = detect_face(img)

            if len(face_locations) > 1:
                print("More than One Face in Image: " + image + "!")
                continue
            if len(face_locations) < 1:
                print("No Faces in Image: " + image + "!")
                continue

            # Encode faces into 128-dimensional features
            face_enc = face_recognition.face_encodings(face_image=img,
                                                       known_face_locations=face_locations,
                                                       num_jitters=1
                                                       )[0]

            name = str(image.split(".")[0])
            np.save(os.path.join(db_path, name), face_enc)
            print(name + " Has Been Added!")

    print("User Add Finished!")
예제 #7
0
def add_user(img_path, group_name):
    """
    Add a single user, save face in user image as feature vector.
    :param img_path: Path to user image
    :param group_name: Name of user group
    :return: None
    """
    db_path = os.path.join(DB_ROOT_DIR, group_name)
    if not os.path.exists(db_path):
        os.makedirs(db_path)

    if not os.path.exists(img_path):
        print("Invalid Image Path: " + img_path + "!")
        return

    extension = img_path.split(".")[-1]
    if extension != "jpg" and extension != "png" and extension != "jfif":
        print("Invalid Image Path: " + img_path + "!")
        return

    img = cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB)

    face_locations = detect_face(img)

    if len(face_locations) > 1:
        print("More than One Face in Image!")
        return
    if len(face_locations) < 1:
        print("No Faces in Image!")
        return

    # Encode faces into 128-dimensional features
    face_enc = face_recognition.face_encodings(face_image=img,
                                               known_face_locations=face_locations,
                                               num_jitters=1
                                               )[0]

    name = str(img_path.split(".")[-2].split("/")[-1])
    np.save(os.path.join(db_path, name), face_enc)
    print(name + " Has Been Added!")
예제 #8
0
def main():
    detect_face(1)
예제 #9
0
def realtime_recognition(group_name,
                         face_size=1,
                         track_interval=200,
                         recognition_interval=2000,
                         scale_factor=1,
                         tolerance=0.6):
    """
    Run realtime face recognition.
    :param group_name: Name of user group
    :param face_size: Minimum detected face size
    :param track_interval: Face detect interval/ms
    :param recognition_interval: Face recognize interval/ms
    :param scale_factor: Image processing zoom factor
    :param tolerance: Face recognition threshold
    :return: None
    """
    # Load face database
    db_path = os.path.join(DB_ROOT_DIR, group_name)

    known_face_encodings = []  # Features in database
    known_face_names = []  # Names in database

    for person in os.listdir(db_path):
        known_face_encodings.append(np.load(os.path.join(db_path, person)))
        known_face_names.append(person.replace(".", "_").split("_")[0])

    face_locations = []  # Container for detected face boxes
    face_names = []  # Container for recognized face names

    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
    ret, frame = cap.read()

    timer = 0  # Frame skip timer
    while ret:
        timer += 1
        ret, frame = cap.read()

        # Face detection
        if timer % (track_interval * FPS // 1000) == 0:
            small_frame = cv2.resize(frame, (0, 0),
                                     fx=1 / scale_factor,
                                     fy=1 / scale_factor)
            rgb_small_frame = cv2.cvtColor(small_frame, cv2.COLOR_BGR2RGB)
            # gray_small_frame = cv2.cvtColor(small_frame, cv2.COLOR_BGR2GRAY)
            face_locations = detect_face(rgb_small_frame, face_size)

        # Face recognition
        if timer % (recognition_interval * FPS //
                    1000) == 0 and face_locations != []:
            rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            # Encode faces into 128-dimensional features
            face_encodings = face_recognition.face_encodings(
                face_image=rgb_frame,
                known_face_locations=face_locations * scale_factor,
                num_jitters=1)
            face_names.clear()
            for face_encoding in face_encodings:
                matches = face_recognition.compare_faces(
                    known_face_encodings=known_face_encodings,
                    face_encoding_to_check=face_encoding,
                    tolerance=tolerance)
                name = "Unknown"
                face_distances = face_recognition.face_distance(
                    face_encodings=known_face_encodings,
                    face_to_compare=face_encoding)

                best_match_index = np.argmin(face_distances)
                if matches[best_match_index]:
                    name = known_face_names[int(best_match_index)]
                face_names.append(name)

        # Draw face boxes and names
        for (top, right, bottom, left), name in zip(face_locations,
                                                    face_names):

            top *= scale_factor
            right *= scale_factor
            bottom *= scale_factor
            left *= scale_factor

            cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 1)
            cv2.rectangle(frame, (left, bottom),
                          (right, int(bottom + (bottom - top) * 0.25)),
                          (0, 0, 255), cv2.FILLED)
            cv2.putText(frame, name,
                        (left, int(bottom + (bottom - top) * 0.24)),
                        cv2.FONT_HERSHEY_DUPLEX, (right - left) / 120,
                        (255, 255, 255), 1)

        cv2.imshow('camera', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cap.release()
    cv2.destroyWindow("camera")
def align_mtcnn(input_dir,
                  output_dir,
                  image_size=182,
                  margin=44,
                  random_order=None,
                  gpu_memory_fraction=1.0,
                  detect_multiple_faces=False):
    """
    Align dataset
    :param input_dir:
    :param output_dir:
    :param image_size:
    :param margin:
    :param random_order:
    :param gpu_memory_fraction:
    :param detect_multiple_faces:
    :return:
    """
    sleep(random.random())
    output_dir = os.path.expanduser(output_dir)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    store_revision_info(src_path, output_dir, ' '.join(sys.argv))
    dataset = get_dataset(input_dir)

    print('Creating networks and loading parameters')

    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        with sess.as_default():
            pnet, rnet, onet = create_mtcnn(sess, None)

    minsize = 20  # minimum size of face
    threshold = [0.6, 0.7, 0.7]  # three steps's threshold
    factor = 0.709  # scale factor

    # Add a random key to the filename to allow alignment using multiple processes
    random_key = np.random.randint(0, high=99999)
    bounding_boxes_filename = os.path.join(output_dir, 'bounding_boxes_%05d.txt' % random_key)

    with open(bounding_boxes_filename, "w") as text_file:
        nrof_images_total = 0
        nrof_successfully_aligned = 0
        if random_order:
            random.shuffle(dataset)
        for cls in dataset:
            output_class_dir = os.path.join(output_dir, cls.name)
            if not os.path.exists(output_class_dir):
                os.makedirs(output_class_dir)
                if random_order:
                    random.shuffle(cls.image_paths)
            for image_path in cls.image_paths:
                nrof_images_total += 1
                filename = os.path.splitext(os.path.split(image_path)[1])[0]
                output_filename = os.path.join(output_class_dir, filename + '.png')
                print(image_path)
                if not os.path.exists(output_filename):
                    try:
                        img = misc.imread(image_path)
                    except (IOError, ValueError, IndexError) as e:
                        errorMessage = '{}: {}'.format(image_path, e)
                        print(errorMessage)
                    else:
                        if img.ndim < 2:
                            print('Unable to align "%s"' % image_path)
                            text_file.write('%s\n' % (output_filename))
                            continue
                        if img.ndim == 2:
                            img = to_rgb(img)
                        img = img[:, :, 0:3]

                        bounding_boxes, _ = detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
                        nrof_faces = bounding_boxes.shape[0]
                        if nrof_faces > 0:
                            det = bounding_boxes[:, 0:4]
                            det_arr = []
                            img_size = np.asarray(img.shape)[0:2]
                            if nrof_faces > 1:
                                if detect_multiple_faces:
                                    for i in range(nrof_faces):
                                        det_arr.append(np.squeeze(det[i]))
                                else:
                                    bounding_box_size = (det[:, 2] - det[:, 0]) * (det[:, 3] - det[:, 1])
                                    img_center = img_size / 2
                                    offsets = np.vstack([(det[:, 0] + det[:, 2]) / 2 - img_center[1],
                                                         (det[:, 1] + det[:, 3]) / 2 - img_center[0]])
                                    offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)
                                    index = np.argmax(
                                        bounding_box_size - offset_dist_squared * 2.0)  # some extra weight on the centering
                                    det_arr.append(det[index, :])
                            else:
                                det_arr.append(np.squeeze(det))

                            for i, det in enumerate(det_arr):
                                det = np.squeeze(det)
                                bb = np.zeros(4, dtype=np.int32)
                                bb[0] = np.maximum(det[0] - margin / 2, 0)
                                bb[1] = np.maximum(det[1] - margin / 2, 0)
                                bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
                                bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
                                cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
                                scaled = misc.imresize(cropped, (image_size, image_size), interp='bilinear')
                                nrof_successfully_aligned += 1
                                filename_base, file_extension = os.path.splitext(output_filename)
                                if detect_multiple_faces:
                                    output_filename_n = "{}_{}{}".format(filename_base, i, file_extension)
                                else:
                                    output_filename_n = "{}{}".format(filename_base, file_extension)
                                misc.imsave(output_filename_n, scaled)
                                text_file.write('%s %d %d %d %d\n' % (output_filename_n, bb[0], bb[1], bb[2], bb[3]))
                        else:
                            print('Unable to align "%s"' % image_path)
                            text_file.write('%s\n' % (output_filename))

    print('Total number of images: %d' % nrof_images_total)
    print('Number of successfully aligned images: %d' % nrof_successfully_aligned)
예제 #11
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--path',
                        help='Path of the video you want to test on.',
                        default=0)
    parser.add_argument('--image-size', default='112,112', help='')
    parser.add_argument('--image', default='Tom_Hanks_54745.png', help='')
    parser.add_argument('--model',
                        default='../gender-age/model/model,0',
                        help='path to load model.')
    parser.add_argument('--gpu', default=0, type=int, help='gpu id')
    parser.add_argument(
        '--det',
        default=0,
        type=int,
        help='mtcnn option, 1 means using R+O, 0 means detect from begining')

    args = parser.parse_args()

    MINSIZE = 20
    THRESHOLD = [0.6, 0.7, 0.7]
    FACTOR = 0.709
    IMAGE_SIZE = 182
    INPUT_IMAGE_SIZE = 160
    CLASSIFIER_PATH = 'Models/Entity/Entity_margin_svm.pkl'
    VIDEO_PATH = args.path
    FACENET_MODEL_PATH = 'Models/facenet/20180402-114759.pb'

    # Load The Custom Classifier
    with open(CLASSIFIER_PATH, 'rb') as file:
        model, class_names = pickle.load(file)
    print("Custom Classifier, Successfully loaded")

    # # Load age and gender model
    # depth = 16
    # k = 8
    # margin = 0.4
    # weight_file = "Models/weights.28-3.73.hdf5"
    # model_age_gender = WideResNet(64, depth=depth, k=k)()
    # model_age_gender.load_weights(weight_file)
    model_age_gender = face_model.FaceModel(args)

    with tf.Graph().as_default():

        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))

        with sess.as_default():

            # Load the model
            print('Loading feature extraction model')
            facenet.load_model(FACENET_MODEL_PATH)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")
            embedding_size = embeddings.get_shape()[1]

            pnet, rnet, onet = align.detect_face.create_mtcnn(
                sess, "./src/align")

            people_detected = set()
            person_detected = collections.Counter()

            frame = cv2.imread(VIDEO_PATH)
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            # bounding_boxes, _ = align.detect_face.detect_face(frame, MINSIZE, pnet, rnet, onet, THRESHOLD, FACTOR)
            # bounding_boxes = detect_tiny_face(frame)
            bounding_boxes = detect_face(frame)

            print(bounding_boxes)
            print(len(bounding_boxes))
            print(bounding_boxes.shape)
            faces_found = bounding_boxes.shape[0]

            entity = {}

            try:
                if faces_found > 0:
                    det = bounding_boxes[:, 0:4]
                    bb = np.zeros((faces_found, 4), dtype=np.int32)
                    unknown_faces = []
                    unknown_coor = []
                    for i in range(faces_found):
                        bb[i][0] = det[i][0]
                        bb[i][1] = det[i][1]
                        bb[i][2] = det[i][2]
                        bb[i][3] = det[i][3]

                        x, y = bb[i][0], bb[i][1]
                        w = bb[i][2] - x
                        h = bb[i][3] - y

                        x_pad_size = round(w * 0.01)
                        y_pad_size = round(h * 0.01)
                        x -= x_pad_size
                        y -= y_pad_size
                        w += 2 * x_pad_size
                        h += 2 * y_pad_size

                        bb[i][0] = x
                        bb[i][1] = y
                        bb[i][2] = x + w
                        bb[i][3] = y + h

                        cropped = frame[bb[i][1]:bb[i][3],
                                        bb[i][0]:bb[i][2], :]
                        cropped_ = frame[bb[i][1]:bb[i][3], bb[i][0]:bb[i][2]]
                        scaled = cv2.resize(
                            cropped, (INPUT_IMAGE_SIZE, INPUT_IMAGE_SIZE),
                            interpolation=cv2.INTER_CUBIC)
                        scaled = facenet.prewhiten(scaled)
                        scaled_reshape = scaled.reshape(
                            -1, INPUT_IMAGE_SIZE, INPUT_IMAGE_SIZE, 3)
                        feed_dict = {
                            images_placeholder: scaled_reshape,
                            phase_train_placeholder: False
                        }
                        emb_array = sess.run(embeddings, feed_dict=feed_dict)
                        predictions = model.predict_proba(emb_array)
                        best_class_indices = np.argmax(predictions, axis=1)
                        best_class_probabilities = predictions[
                            np.arange(len(best_class_indices)),
                            best_class_indices]
                        best_name = class_names[best_class_indices[0]]
                        print("Name: {}, Probability: {}".format(
                            best_name, best_class_probabilities))

                        color = (0, 0, 0)

                        if best_class_probabilities > 0.75:

                            name = class_names[best_class_indices[0]]

                            if name == "unknown":

                                name_entity = "{}_{}".format(name, i)
                                unknown_faces.append(
                                    cv2.resize(np.copy(cropped_), (64, 64),
                                               interpolation=cv2.INTER_CUBIC))
                                unknown_coor.append(
                                    (bb[i][0], bb[i][1], bb[i][2], bb[i][3]))
                                # color = (255, 255, 255)

                                aligned = np.transpose(cropped_, (2, 0, 1))
                                input_blob = np.expand_dims(aligned, axis=0)
                                data = mx.nd.array(input_blob)
                                db = mx.io.DataBatch(data=(data, ))

                                gender, age = model_age_gender.get_ga(db)
                                print(gender, age)
                                coor = (bb[i][0], bb[i][1], bb[i][2], bb[i][3])
                                entity[coor] = "{}_{}".format(
                                    "F" if gender == 0 else "M", age)
                            else:

                                name_entity = name
                                coor = (bb[i][0], bb[i][1], bb[i][2], bb[i][3])
                                entity[coor] = name_entity
                                # color = (0, 0, 255)

                            # cv2.rectangle(frame, (bb[i][0], bb[i][1]), (bb[i][2], bb[i][3]), color, 2)
                            # text_x = bb[i][0]
                            # text_y = bb[i][3] + 20

                            # cv2.putText(frame, name, (text_x, text_y), cv2.FONT_HERSHEY_COMPLEX_SMALL,
                            # 1, color, thickness=1, lineType=2)
                            # cv2.putText(frame, str(round(best_class_probabilities[0], 3)), (text_x, text_y+17), cv2.FONT_HERSHEY_COMPLEX_SMALL,
                            # 1, color, thickness=1, lineType=2)
                            person_detected[best_name] += 1

                        else:
                            unknown_faces.append(
                                cv2.resize(np.copy(cropped_), (64, 64),
                                           interpolation=cv2.INTER_CUBIC))
                            unknown_coor.append(
                                (bb[i][0], bb[i][1], bb[i][2], bb[i][3]))
                            # cv2.rectangle(frame, (bb[i][0], bb[i][1]), (bb[i][2], bb[i][3]), (255, 255, 255), 2)
                            # text_x = bb[i][0]
                            # text_y = bb[i][3] + 20
                            name = "unknown"
                            aligned = np.transpose(cropped_, (2, 0, 1))
                            input_blob = np.expand_dims(aligned, axis=0)
                            data = mx.nd.array(input_blob)
                            db = mx.io.DataBatch(data=(data, ))

                            gender, age = model_age_gender.get_ga(db)
                            print(gender, age)
                            coor = (bb[i][0], bb[i][1], bb[i][2], bb[i][3])
                            entity[coor] = "{}_{}".format(
                                "F" if gender == 0 else "M", age)
                            # name_entity = "{}_{}".format(name,i)
                            # cv2.putText(frame, name, (text_x, text_y), cv2.FONT_HERSHEY_COMPLEX_SMALL,
                            # 1, (255, 255, 255), thickness=1, lineType=2)
                            # cv2.putText(frame, str(round(best_class_probabilities[0], 3)), (text_x, text_y+17), cv2.FONT_HERSHEY_COMPLEX_SMALL,
                            # 1, (255, 255, 255), thickness=1, lineType=2)
                            person_detected[best_name] += 1
                            # entity[name_entity] = [bb[i][1], bb[i][3], bb[i][0], bb[i][2]]

            except Exception as e:
                print(e)
                pass

    # unknown_faces_stack = np.stack(unknown_faces, axis=0)
    # print(unknown_faces_stack.shape)
    # print(type(unknown_faces_stack))
    # results = model_age_gender.predict(unknown_faces_stack)
    # print("hahah")
    # predicted_genders = results[0]
    # ages = np.arange(0, 101).reshape(101, 1)
    # predicted_ages = results[1].dot(ages).flatten()
    # print(predicted_genders)
    # print(predicted_ages)

    # for i in range(len(predicted_ages)):
    #     label = "{}_{}".format(int(predicted_ages[i]), "M" if predicted_genders[i][0] < 0.5 else "F")
    #     entity[unknown_coor[i]] = label

    print(entity)

    for key, value in entity.items():
        if value.split("_")[-1] == "M" or value.split("_")[-1] == "F":
            color = (255, 255, 255)
        else:
            color = (0, 0, 255)

        text_x = key[0]
        text_y = key[3] + 20
        cv2.rectangle(frame, (key[0], key[1]), (key[2], key[3]), color, 2)
        cv2.putText(frame,
                    value, (text_x, text_y),
                    cv2.FONT_HERSHEY_COMPLEX_SMALL,
                    1, (255, 255, 255),
                    thickness=1,
                    lineType=2)

    cv2.imshow('Face Recognition', frame)
    cv2.waitKey(0)