Пример #1
0
    def index_faces(self,paths, paths_to_pk, output_dir, video_pk):
        with tf.Graph().as_default():
            config = tf.ConfigProto()
            config.gpu_options.per_process_gpu_memory_fraction = 0.15
            with tf.Session(config=config) as sess:
                output_dir = os.path.expanduser(output_dir)
                if not os.path.isdir(output_dir):
                    os.makedirs(output_dir)
                logging.info("Loading trained model...\n")
                meta_file, ckpt_file, model_dir = facenet.get_model_filenames()
                facenet.load_model(model_dir, meta_file, ckpt_file)
                images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
                embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
                phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
                image_size = images_placeholder.get_shape()[1]
                embedding_size = embeddings.get_shape()[1]
                logging.info('Generating embeddings from images...\n')
                start_time = time.time()
                batch_size = 25
                nrof_images = len(paths)
                nrof_batches = int(np.ceil(1.0 * nrof_images / batch_size))
                emb_array = np.zeros((nrof_images, embedding_size))
                count = 0
                path_count = {}
                entries = []
                for i in xrange(nrof_batches):
                    start_index = i * batch_size
                    end_index = min((i + 1) * batch_size, nrof_images)
                    paths_batch = paths[start_index:end_index]
                    for eindex, fname in enumerate(paths_batch):
                        count += 1
                        entry = {
                            'path': fname,
                            'detection_primary_key': paths_to_pk[fname],
                            'index': eindex,
                            'type': 'detection',
                            'video_primary_key': video_pk
                        }
                        entries.append(entry)
                    images = facenet.load_data(paths_batch, do_random_crop=False, do_random_flip=False,
                                               image_size=image_size, do_prewhiten=True)
                    feed_dict = {images_placeholder: images, phase_train_placeholder: False}
                    emb_array[start_index:end_index, :] = sess.run(embeddings, feed_dict=feed_dict)

                if nrof_images:
                    time_avg_forward_pass = (time.time() - start_time) / float(nrof_images)
                    logging.info("Forward pass took avg of %.3f[seconds/image] for %d images\n" % (
                    time_avg_forward_pass, nrof_images))
                    logging.info("Finally saving embeddings and gallery to: %s" % (output_dir))
                feat_fname = os.path.join(output_dir, "facenet.npy")
                entries_fname = os.path.join(output_dir, "facenet.json")
                np.save(feat_fname, emb_array)
                fh = open(entries_fname, 'w')
                json.dump(entries, fh)
                fh.close()
                return path_count, emb_array, entries, feat_fname, entries_fname
Пример #2
0
 def load(self):
     if self.session is None:
         logging.warning("Loading the network {}".format(self.name))
         config = tf.ConfigProto()
         config.gpu_options.per_process_gpu_memory_fraction = 0.15
         self.session = tf.InteractiveSession(config=config)
         self.graph_def = tf.Graph().as_default()
         meta_file, ckpt_file, model_dir = facenet.get_model_filenames()
         self.saver = tf.train.import_meta_graph(os.path.join(model_dir, meta_file))
         self.saver.restore(self.session, os.path.join(model_dir, ckpt_file))
         self.images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
         self.embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
         self.phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
         self.image_size = self.images_placeholder.get_shape()[1]
         self.embedding_size = self.embeddings.get_shape()[1]
Пример #3
0
# 前方高能
# 该段代码占用过多CPU资源,放在全局供其他函数调用
with tf.Graph().as_default():
    gpu_memory_fraction = 1.0
    gpu_options = tf.GPUOptions(
        per_process_gpu_memory_fraction=gpu_memory_fraction)
    sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                            log_device_placement=False))
    with sess.as_default():
        pnet, rnet, onet = create_mtcnn(sess, None)

with tf.Graph().as_default():
    sess = tf.Session()
    # 加载模型
    meta_file, ckpt_file = get_model_filenames(MODELPATH)
    saver = tf.train.import_meta_graph(os.path.join(MODELPATH, meta_file))
    saver.restore(sess, os.path.join(MODELPATH, ckpt_file))
    # 获得输入输出张量
    images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
    embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
    phase_train_placeholder = tf.get_default_graph().get_tensor_by_name(
        "phase_train:0")

    # 进行人脸识别,加载
    print('Creating networks and loading parameters')

    class MemberUploadFace(View):
        """
        确认收款界面相关功能,包括人脸识别以及生成订单
        """
Пример #4
0
print('Loaded classifier model from file "%s"' % classifier_filename_exp)

graph = tf.Graph()

model_exp = os.path.expanduser(model_path)
if os.path.isfile(model_exp):
    print('Model filename: %s' % model_exp)
    with gfile.FastGFile(model_exp, 'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
    with graph.as_default():
        tf.import_graph_def(graph_def, name='')
    sess = tf.Session(graph=graph)
else:
    print('Model directory: %s' % model_exp)
    meta_file, ckpt_file = facenet.get_model_filenames(model_exp)

    print('Metagraph file: %s' % meta_file)
    print('Checkpoint file: %s' % ckpt_file)

    sess = tf.Session(graph=graph)
    saver = tf.train.import_meta_graph(os.path.join(model_exp, meta_file))
    saver.restore(sess, os.path.join(model_exp, ckpt_file))


def background_task():
    while True:
        files = list(
            map(lambda x: os.path.join(UPLOAD_FOLDER, x),
                sorted(os.listdir(UPLOAD_FOLDER))))
        if len(files) >= 5: