Example #1
0
def face2database(picture_path,
                  model_path,
                  database_path,
                  batch_size=90,
                  image_size=160):
    # 提取特征到数据库
    # picture_path为人脸文件夹的所在路径
    # model_path为facenet模型路径
    # database_path为人脸数据库路径
    with tf.Graph().as_default():
        with tf.Session() as sess:
            dataset = facenet.get_dataset(picture_path)
            paths, labels = facenet.get_image_paths_and_labels(dataset)
            print('Number of classes: %d' % len(dataset))
            print('Number of images: %d' % len(paths))
            # Load the model
            print('Loading feature extraction model')
            facenet.load_model(model_path)
            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")
            embedding_size = embeddings.get_shape()[1]
            # Run forward pass to calculate embeddings
            print('Calculating features for images')
            nrof_images = len(paths)
            nrof_batches_per_epoch = int(
                math.ceil(1.0 * nrof_images / batch_size))
            emb_array = np.zeros((nrof_images, embedding_size))
            for i in range(nrof_batches_per_epoch):
                start_index = i * batch_size
                end_index = min((i + 1) * batch_size, nrof_images)
                paths_batch = paths[start_index:end_index]
                images = facenet.load_data(paths_batch, False, False,
                                           image_size)
                feed_dict = {
                    images_placeholder: images,
                    phase_train_placeholder: False
                }
                emb_array[start_index:end_index, :] = sess.run(
                    embeddings, feed_dict=feed_dict)
            np.savez(database_path, emb=emb_array, lab=labels)
            print("数据库特征提取完毕!")
Example #2
0
    def __init__(self, sess,
                 data_path='output', model='20180402-114759',
                 classifier_name='result1'):
        self.sess = sess
        self.dataset = facenet.get_dataset(data_path)       # 얼굴영역만 저장된 폴더들의 경로를 가져온다.
        self.class_names = [cls.name.replace('_', ' ') for cls in self.dataset]     # Dataset 폴더 내 각각의 폴더(Class)명
        self.paths, self.labels = facenet.get_image_paths_and_labels(self.dataset)  # 이미지들의 경로 및 Class 명
        print('Number of classes: %d' % len(self.dataset))
        print('Number of images: %d' % len(self.paths))

        print('Loading feature extraction model')
        model_path = 'model/{}/{}.pb'.format(model, model)      # 네트워크 모델(얼굴인식)이 저장된 경로
        facenet.load_model(model_path)
        self.model = SVC(kernel='linear', probability=True)     # 분류모델 생성
        self.images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
        self.embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
        self.phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
        self.embedding_size = self.embeddings.get_shape()[1]

        classifier_path = 'classifier/classifier_{0}.pkl'.format(classifier_name)   # 분류모델이 저장될 이름 및 경로
        self.classifier_path = os.path.expanduser(classifier_path)
        self.batch_size = 1000
        self.image_size = 160
    def __init__(self,
                 sess,
                 pnet,
                 rnet,
                 onet,
                 out_path='output',
                 data_path='dataset'):
        self.sess = sess
        self.pnet, self.rnet, self.onet = pnet, rnet, onet

        self.output_path = os.path.expanduser(out_path)  # 정렬된 얼굴 이미지가 저장될 경로
        if not os.path.exists(self.output_path):
            os.makedirs(self.output_path)

        self.dataset = facenet.get_dataset(data_path)  # 인식 대상이 저장된 이미지 경로
        self.minsize = 20  # 사진에서 얼굴 영역의 최소 크기 (20 x 20)
        self.threshold = [0.6, 0.7, 0.7]  # 얼굴 탐지 시 각 단계별 경계값
        self.factor = 0.709  # 얼굴 탐지에 필요한 변수
        self.margin = 44  # 얼굴 탐지에 필요한 변수
        self.image_size = 182  # 저장될 얼굴 사진의 크기

        self.images = []  # 자장된 얼굴 사진들의 경로들이 저장될 배열
        self.aligned_images = 0  # 최종적으로 변환된 얼굴 이미지 개수
face_detector = FaceDetector()
face_recognition = FaceRecognition()
face_classfier = FaceClassifier()


def get_image_paths_and_labels(dataset):
    image_paths_flat = []
    labels_flat = []
    for i in range(len(dataset)):
        image_paths_flat += dataset[i].image_paths
        labels_flat += [dataset[i].name] * len(dataset[i].image_paths)
    return image_paths_flat, labels_flat


dataset = get_dataset(datadir)

paths, labels = get_image_paths_and_labels(dataset)
print('Number of classes: %d' % len(dataset))
print('Number of images: %d' % len(paths))

# Run forward pass to calculate embeddings
print('Calculating features for images')
image_size = 160
nrof_images = len(paths)
features = np.zeros((2 * nrof_images, 512))
labels = np.asarray(labels).repeat(2)

for i in range(nrof_images):
    img = cv2.imread(paths[i])
    if img is None: