예제 #1
0

def is_letter_input(letter):
    if select.select([
            sys.stdin,
    ], [], [], 0.0)[0]:
        input_char = sys.stdin.read(1)
        return input_char.lower() == letter.lower()
    return False


if __name__ == '__main__':

    counter = 0

    cap = config.capturing()
    # cap = cv2.VideoCapture(config.VIDEO_SOURCE)

    if not os.path.exists(config.TRAINING_DIR + user_folder_prefix):
        os.makedirs(config.TRAINING_DIR + user_folder_prefix)

    person_name = os.listdir(config.CHECK_FACE_FOLDER)
    known_encodings = []
    names = []

    for p_name in person_name:
        person = face_recognition.load_image_file(
            os.path.join(config.CHECK_FACE_FOLDER, p_name))
        face_encoding = face_recognition.face_encodings(person)[0]
        known_encodings.append(face_encoding)
        names.append(os.path.splitext(p_name))
예제 #2
0
        print('Loading Model...')
        facenet.load_model(modeldir)
        images_placeholder = tf.get_default_graph().get_tensor_by_name(
            "input:0")
        embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
        phase_train_placeholder = tf.get_default_graph().get_tensor_by_name(
            "phase_train:0")
        embedding_size = embeddings.get_shape()[1]

        classifier_filename_exp = os.path.expanduser(classifier_filename)
        with open(classifier_filename_exp, 'rb') as infile:
            (model, class_names) = pickle.load(infile)

        # video_capture = cv2.VideoCapture(config.VIDEO_SOURCE)
        video_capture = config.capturing()

        c = 0

        print('Start Recognition')

        prevTime = 0
        while True:
            ret, frame = video_capture.read()

            frame = cv2.resize(frame, (0, 0), fx=0.50,
                               fy=0.50)  # resize frame (optional)

            curTime = time.time() + 1  # calc fps
            timeF = frame_interval