def load_model(self, model_path="model/20170216-091149"): print "Loading the FaceNet model. This could take a minute." self.graph = tf.Graph() self.sess = tf.Session() with self.sess.as_default(): # Load the model if model_path.endswith('.pb'): print('Model pb: %s' % model_path) with tf.gfile.FastGFile(model_path, 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) _ = tf.import_graph_def(graph_def, name='') else: print('Model directory: %s' % model_path) meta_file, ckpt_file = facenet.get_model_filenames( os.path.expanduser(model_path)) print('Metagraph file: %s' % meta_file) print('Checkpoint file: %s' % ckpt_file) facenet.load_model(model_path, meta_file, ckpt_file) # Get input and output tensors self.images_placeholder = tf.get_default_graph( ).get_tensor_by_name("input:0") self.embeddings = tf.get_default_graph().get_tensor_by_name( "embeddings:0") self.phase_train_placeholder = tf.get_default_graph( ).get_tensor_by_name("phase_train:0") print("FaceNet loaded!")
def main(args): with tf.Graph().as_default(): with tf.Session() as sess: # Load the model metagraph and checkpoint print('Model directory: %s' % args.model_dir) meta_file, ckpt_file = facenet.get_model_filenames(os.path.expanduser(args.model_dir)) print('Metagraph file: %s' % meta_file) print('Checkpoint file: %s' % ckpt_file) model_dir_exp = os.path.expanduser(args.model_dir) saver = tf.train.import_meta_graph(os.path.join(model_dir_exp, meta_file), clear_devices=True) tf.get_default_session().run(tf.global_variables_initializer()) tf.get_default_session().run(tf.local_variables_initializer()) saver.restore(tf.get_default_session(), os.path.join(model_dir_exp, ckpt_file)) # Retrieve the protobuf graph definition and fix the batch norm nodes input_graph_def = sess.graph.as_graph_def() # Freeze the graph def output_graph_def = freeze_graph_def(sess, input_graph_def, 'embeddings') # Serialize and dump the output graph to the filesystem with tf.gfile.GFile(args.output_file, 'wb') as f: f.write(output_graph_def.SerializeToString()) print("%d ops in the final graph: %s" % (len(output_graph_def.node), args.output_file))
def main(args): with tf.Graph().as_default(): with tf.Session() as sess: # Get the paths for the corresponding images #paths, actual_issame = lfw.get_paths(os.path.expanduser(args.test_dir), pairs, args.lfw_file_ext) paths = glob.glob(os.path.expanduser(args.data_dir) + '*.jpeg') paths.sort() # Load the model print('Model directory: %s' % args.model_dir) meta_file, ckpt_file = facenet.get_model_filenames( os.path.expanduser(args.model_dir)) print('Metagraph file: %s' % meta_file) print('Checkpoint file: %s' % ckpt_file) facenet.load_model(args.model_dir, meta_file, ckpt_file) # Get input and output tensors images_placeholder = tf.get_default_graph().get_tensor_by_name( "batch_join:0") #embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0") #images_placeholder = tf.get_default_graph4().get_tensor_by_name("input:0") embeddings = tf.get_default_graph().get_tensor_by_name("Add:0") phase_train_placeholder = tf.get_default_graph( ).get_tensor_by_name("phase_train:0") image_size = images_placeholder.get_shape()[1] embedding_size = embeddings.get_shape()[1] # Run forward pass to calculate embeddings print('Runnning forward pass on testing images') batch_size = args.batch_size nrof_images = len(paths) nrof_batches = int(math.ceil(1.0 * nrof_images / batch_size)) emb_array = np.zeros((nrof_images, embedding_size)) for i in range(nrof_batches): print(i) start_index = i * batch_size end_index = min((i + 1) * batch_size, nrof_images) paths_batch = paths[start_index:end_index] images = facenet.load_data(paths_batch, False, False, image_size, do_prewhiten=False, scale=args.scale, cx=args.cx, cy=args.cy) feed_dict = { images_placeholder: images, phase_train_placeholder: False } emb_array[start_index:end_index, :] = sess.run( embeddings, feed_dict=feed_dict) for i in range(nrof_images): norm = np.linalg.norm(emb_array[i, :]) + 0.0000001 emb_array[i, :] = emb_array[i, :] / norm np.save(args.fname, emb_array)
def main(args): with tf.Graph().as_default(): with tf.Session() as sess: # Get the paths for the corresponding images paths, actual_issame, ids = lfw.get_paths(args.lfw_dir, args.labels_file) logging.info('len of paths:%d' % len(paths)) logging.info('paths[0]:%s' % paths[0]) # Load the model logging.info('Model directory: %s' % args.model_dir) meta_file, ckpt_file = facenet.get_model_filenames( os.path.expanduser(args.model_dir)) logging.info('Metagraph file: %s' % meta_file) logging.info('Checkpoint file: %s' % ckpt_file) facenet.load_model(args.model_dir, meta_file, ckpt_file) # Get input and output tensors images_placeholder = tf.get_default_graph().get_tensor_by_name( "input:0") embeddings = tf.get_default_graph().get_tensor_by_name( "embeddings:0") phase_train_placeholder = tf.get_default_graph( ).get_tensor_by_name("phase_train:0") image_size = images_placeholder.get_shape()[1] embedding_size = embeddings.get_shape()[1] # Run forward pass to calculate embeddings logging.info('Runnning forward pass on LFW images') batch_size = args.lfw_batch_size nrof_images = len(paths) nrof_batches = int(math.ceil(1.0 * nrof_images / batch_size)) emb_array = np.zeros((nrof_images, embedding_size)) logging.info( 'batch size:%d, nrof_images len:%d, nrof_batches len:%d' % (batch_size, nrof_images, nrof_batches)) for i in range(nrof_batches): start_index = i * batch_size end_index = min((i + 1) * batch_size, nrof_images) paths_batch = paths[start_index:end_index] images = facenet.load_data(paths_batch, False, False, image_size) feed_dict = { images_placeholder: images, phase_train_placeholder: False } emb_array[start_index:end_index, :] = sess.run( embeddings, feed_dict=feed_dict) logging.info('forward process complete.') logging.info('emb_array len:%d' % len(emb_array)) best_threshold, tpr, fpr, acc, precision, recall, result = lfw.evaluate_v2( emb_array, actual_issame, ids) logging.warning('model:%s,%s' % (args.model_dir, ckpt_file)) logging.warning( 'Best result: %0.5f,%0.3f,%0.3f,%0.3f,%0.3f,%0.3f' % (best_threshold, tpr, fpr, acc, precision, recall))
def __init__(self, model_dir): self.in_size = 160 self.graph = tf.Graph() self.graph.as_default() tf_config = tf.compat.v1.ConfigProto(allow_soft_placement=True) self.session = tf.compat.v1.Session(config=tf_config) self.session.as_default() print('Loading face-embedder model...') global facenet sys.path.append(model_dir) import facenet model_data_dir = os.path.join(model_dir, '20170512-110547') meta_file, ckpt_file = facenet.get_model_filenames(model_data_dir) g = tf.Graph() g.as_default() saver = tf.compat.v1.train.import_meta_graph( os.path.join(model_data_dir, meta_file)) saver.restore(self.session, os.path.join(model_data_dir, ckpt_file)) self.images_placeholder = tf.compat.v1.get_default_graph( ).get_tensor_by_name('input:0') self.embeddings = tf.compat.v1.get_default_graph().get_tensor_by_name( 'embeddings:0') self.phase_train_placeholder = tf.compat.v1.get_default_graph( ).get_tensor_by_name('phase_train:0') print('Loaded face-embedder model')
def load_model(self): logging.info('[INFO] Loading model...') try: with self.graph.as_default(): with tf.device(self.device_str): with self.session.as_default(): model_path = self._model_dir meta_file, ckpt_file = get_model_filenames(model_path) saver = tf.train.import_meta_graph( os.path.join(model_path, meta_file)) saver.restore(self.session, os.path.join(model_path, ckpt_file)) self.images_placeholder = self.graph.get_tensor_by_name( 'input:0') self.embeddings = self.graph.get_tensor_by_name( 'embeddings:0') self.phase_train_placeholder = self.graph.get_tensor_by_name( 'phase_train:0') except Exception as e: logging.error("[Error] Fail to load model due to %s", str(e), exc_info=True) # maybe raise a Exception here to interrupt this running else: logging.info('[INFO] Model loaded!')
def __init__(self, facenet_model_checkpoint): network = importlib.import_module( 'recognition.models.inception_resnet_v1') print('Pre-trained model: %s' % os.path.expanduser(facenet_model_checkpoint)) self.image_size = 160 with tf.Graph().as_default(): self.images_placeholder = tf.placeholder( tf.float32, [None, self.image_size, self.image_size, 3], name='input') self.phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train') # Build the inference graph prelogits, _ = network.inference( self.images_placeholder, 1.0, phase_train=self.phase_train_placeholder, bottleneck_layer_size=512, weight_decay=0.0) model_exp = os.path.expanduser(facenet_model_checkpoint) self.embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings') #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction) gpu_options = tf.GPUOptions(allow_growth=True) self.sess = tf.Session(config=tf.ConfigProto( gpu_options=gpu_options, log_device_placement=False)) meta_file, ckpt_file = facenet.get_model_filenames(model_exp) saver = tf.train.Saver(tf.trainable_variables()) print('Restoring pretrained model: %s' % facenet_model_checkpoint) saver.restore(self.sess, os.path.join(model_exp, ckpt_file)) self.embedding_size = self.embeddings.get_shape()[1]
def main(args): with tf.Graph().as_default(): with tf.Session() as sess: # Load the model metagraph and checkpoint print('Model directory: %s' % args.model_dir) meta_file, ckpt_file = facenet.get_model_filenames( os.path.expanduser(args.model_dir)) print('Metagraph file: %s' % meta_file) print('Checkpoint file: %s' % ckpt_file) # 具象化用户路径 model_dir_exp = os.path.expanduser(args.model_dir) saver = tf.train.import_meta_graph(os.path.join( model_dir_exp, meta_file), clear_devices=True) tf.get_default_session().run(tf.global_variables_initializer()) tf.get_default_session().run(tf.local_variables_initializer()) saver.restore(tf.get_default_session(), os.path.join(model_dir_exp, ckpt_file)) # Retrieve the protobuf graph definition and fix the batch norm nodes input_graph_def = sess.graph.as_graph_def() # Freeze the graph def output_graph_def = freeze_graph_def(sess, input_graph_def, 'embeddings') # Serialize and dump the output graph to the filesystem with tf.gfile.GFile(args.output_file, 'wb') as f: f.write(output_graph_def.SerializeToString()) print("%d ops in the final graph: %s" % (len(output_graph_def.node), args.output_file))
def __init__(self, watch_path): cost('init graph and session') self.img_path = watch_path self.last_md5 = '' self.graph = tf.Graph() with self.graph.as_default(): gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = 1.0) self.session = tf.Session(config = tf.ConfigProto(gpu_options = gpu_options, log_device_placement = False)) with self.session.as_default(): self.pnet, self.rnet, self.onet = align.detect_face.create_mtcnn(self.session, None) cost('create mtcnn') # Load the model model_dir = '/dl/models/resnet' print('Model directory: %s' % model_dir) meta_file, ckpt_file = facenet.get_model_filenames(model_dir) print('Metagraph file: %s' % meta_file) print('Checkpoint file: %s' % ckpt_file) cost('start load model') facenet.load_model(model_dir, meta_file, ckpt_file) cost('load model done') # Get input and output tensors cost('start load vectors') self.images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0") cost('loaded images_placeholder') self.embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0") cost('loaded embeddings') self.phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0") cost('loaded phase_train_placeholder') self.face_set = []
def main(args): with tf.Graph().as_default(): with tf.Session() as sess: # Load the model metagraph and checkpoint print('Model directory: %s' % args.model_dir) meta_file, ckpt_file = facenet.get_model_filenames( os.path.expanduser(args.model_dir)) print('Metagraph file: %s' % meta_file) print('Checkpoint file: %s' % ckpt_file) facenet.load_model(args.model_dir, meta_file, ckpt_file) output_node_names = 'embeddings' whitelist_names = [] for node in sess.graph.as_graph_def().node: if node.name.startswith( 'InceptionResnetV1') or node.name.startswith( 'embeddings') or node.name.startswith( 'phase_train'): print(node.name) whitelist_names.append(node.name) output_graph_def = graph_util.convert_variables_to_constants( sess, sess.graph.as_graph_def(), output_node_names.split(","), variable_names_whitelist=whitelist_names) with tf.gfile.GFile(args.output_file, 'wb') as f: f.write(output_graph_def.SerializeToString()) print("%d ops in the final graph." % len(output_graph_def.node)) #pylint: disable=no-member
def main(args): network = importlib.import_module(args.model_def, 'inference') # Start with a gray image with a little noise np.random.seed(seed=args.seed) #img_noise = np.random.uniform(size=(args.image_size,args.image_size,3)) + 100.0 img_noise = plt.imread( '/data/zming/datasets/lfw/lfw_mtcnnpy_160/Zydrunas_Ilgauskas/Zydrunas_Ilgauskas_0001.png' ) sess = tf.Session() t_input = tf.placeholder(np.float32, shape=(args.image_size, args.image_size, 3), name='input') # define the input tensor image_mean = 117.0 t_preprocessed = tf.expand_dims(t_input - image_mean, 0) # Build the inference graph network.inference(t_preprocessed, 1.0, phase_train=True, weight_decay=0.0) # Create a saver for restoring variables saver = tf.train.Saver(tf.global_variables()) # Restore the parameters meta_file, ckpt_file = facenet.get_model_filenames(args.model_file) saver.restore(sess, os.path.join(os.path.expanduser(args.model_file), ckpt_file)) #saver.restore(sess, args.model_file) layers = [ op.name for op in tf.get_default_graph().get_operations() if op.type == 'Conv2D' ] feature_nums = {layer: int(T(layer).get_shape()[-1]) for layer in layers} print('Number of layers: %d' % len(layers)) for layer in sorted(feature_nums.keys()): print('%s%d' % ((layer + ': ').ljust(40), feature_nums[layer])) # Picking some internal layer. Note that we use outputs before applying the ReLU nonlinearity # to have non-zero gradients for features with negative initial activations. #layer = 'InceptionResnetV1/Repeat_2/block8_3/Conv2d_1x1/Conv2D' #layer = 'incept4b/in4_conv1x1_31/Conv2D' layer = 'InceptionResnetV1/Repeat_2/block8_3/Conv2d_1x1/convolution' result_dir = '../data/' print('Number of features in layer "%s": %d' % (layer, feature_nums[layer])) channels = range(feature_nums[layer]) np.random.shuffle(channels) for i in range(32): print('Rendering feature %d' % channels[i]) channel = channels[i] img = render_naive(sess, t_input, T(layer)[:, :, :, channel], img_noise) filename = '%s_%03d.png' % (layer.replace('/', '_'), channel) misc.imsave(os.path.join(result_dir, filename), img)
def main(args): with tf.Graph().as_default(): with tf.compat.v1.Session() as sess: # create output directory if it doesn't exist output_dir = os.path.expanduser(args.output_dir) if not os.path.isdir(output_dir): os.makedirs(output_dir) # load the model print("Loading trained model...\n") meta_file, ckpt_file = facenet.get_model_filenames( os.path.expanduser(args.trained_model_dir)) facenet.load_model(args.trained_model_dir, meta_file, ckpt_file) # grab all image paths and labels print("Finding image paths and targets...\n") data = load_files(args.data_dir, load_content=False, shuffle=False) labels_array = data['target'] paths = data['filenames'] # Get input and output tensors images_placeholder = tf.compat.v1.get_default_graph().get_tensor_by_name("input:0") embeddings = tf.compat.v1.get_default_graph().get_tensor_by_name("embeddings:0") phase_train_placeholder = tf.compat.v1.get_default_graph().get_tensor_by_name("phase_train:0") image_size = images_placeholder.get_shape()[1] embedding_size = embeddings.get_shape()[1] # Run forward pass to calculate embeddings print('Generating embeddings from images...\n') start_time = time.time() batch_size = args.batch_size nrof_images = len(paths) nrof_batches = int(np.ceil(1.0*nrof_images / batch_size)) emb_array = np.zeros((nrof_images, embedding_size)) for i in xrange(nrof_batches): start_index = i*batch_size end_index = min((i+1)*batch_size, nrof_images) paths_batch = paths[start_index:end_index] images = facenet.load_data( paths_batch, do_random_crop=False, do_random_flip=False, image_size=image_size, do_prewhiten=True) feed_dict = {images_placeholder: images, phase_train_placeholder: False} emb_array[start_index:end_index, :] = sess.run( embeddings, feed_dict=feed_dict) time_avg_forward_pass = ( time.time() - start_time) / float(nrof_images) print("Forward pass took avg of %.3f[seconds/image] for %d images\n" % ( time_avg_forward_pass, nrof_images)) print("Finally saving embeddings and gallery to: %s" % (output_dir)) # save the gallery and embeddings (signatures) as numpy arrays to disk np.save(os.path.join(output_dir, "gallery.npy"), labels_array) np.save(os.path.join(output_dir, "signatures.npy"), emb_array)
def main(args): with tf.Graph().as_default(): with tf.Session() as sess: # Read the file containing the pairs used for testing pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs)) # Get the paths for the corresponding images paths, actual_issame = lfw.get_paths( os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext) # Load the model print('Model directory: %s' % args.model_dir) meta_file, ckpt_file = facenet.get_model_filenames( os.path.expanduser(args.model_dir)) print('Metagraph file: %s' % meta_file) print('Checkpoint file: %s' % ckpt_file) facenet.load_model(args.model_dir, meta_file, ckpt_file) # Get input and output tensors images_placeholder = tf.get_default_graph().get_tensor_by_name( "input:0") embeddings = tf.get_default_graph().get_tensor_by_name( "embeddings:0") image_size = images_placeholder.get_shape()[1] embedding_size = embeddings.get_shape()[1] # Run forward pass to calculate embeddings print('Runnning forward pass on LFW images') batch_size = args.lfw_batch_size nrof_images = len(paths) nrof_batches = int(math.ceil(1.0 * nrof_images / batch_size)) emb_array = np.zeros((nrof_images, embedding_size)) for i in range(nrof_batches): start_index = i * batch_size end_index = min((i + 1) * batch_size, nrof_images) paths_batch = paths[start_index:end_index] images = facenet.load_data(paths_batch, False, False, image_size) feed_dict = {images_placeholder: images} emb_array[start_index:end_index, :] = sess.run( embeddings, feed_dict=feed_dict) tpr, fpr, accuracy, val, val_std, far = lfw.evaluate( emb_array, args.seed, actual_issame, nrof_folds=args.lfw_nrof_folds) print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) facenet.plot_roc(fpr, tpr, 'NN4')
def main(args): with tf.Graph().as_default(): with tf.Session() as sess: # Load the model metagraph and checkpoint print('Model directory: %s' % args.model_dir) meta_file, ckpt_file = facenet.get_model_filenames( os.path.expanduser(args.model_dir)) print('Metagraph file: %s' % meta_file) print('Checkpoint file: %s' % ckpt_file) model_dir_exp = os.path.expanduser(args.model_dir) saver = tf.train.import_meta_graph(os.path.join( model_dir_exp, meta_file), clear_devices=True) tf.get_default_session().run(tf.global_variables_initializer()) tf.get_default_session().run(tf.local_variables_initializer()) saver.restore(tf.get_default_session(), os.path.join(model_dir_exp, ckpt_file)) # Retrieve the protobuf graph definition and fix the batch norm nodes gd = sess.graph.as_graph_def() for node in gd.node: if node.op == 'RefSwitch': node.op = 'Switch' for index in xrange(len(node.input)): if 'moving_' in node.input[index]: node.input[index] = node.input[index] + '/read' elif node.op == 'AssignSub': node.op = 'Sub' if 'use_locking' in node.attr: del node.attr['use_locking'] elif node.op == 'AssignAdd': node.op = 'Add' if 'use_locking' in node.attr: del node.attr['use_locking'] # Get the list of important nodes output_node_names = 'embeddings' whitelist_names = [] for node in gd.node: if node.name.startswith( 'InceptionResnetV1') or node.name.startswith( 'embeddings') or node.name.startswith( 'phase_train') or node.name.startswith( 'Bottleneck'): print(node.name) whitelist_names.append(node.name) # Replace all the variables in the graph with constants of the same values output_graph_def = graph_util.convert_variables_to_constants( sess, gd, output_node_names.split(","), variable_names_whitelist=whitelist_names) # Serialize and dump the output graph to the filesystem with tf.gfile.GFile(args.output_file, 'wb') as f: f.write(output_graph_def.SerializeToString()) print("%d ops in the final graph." % len(output_graph_def.node))
def main(args): os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu with tf.Graph().as_default(): with tf.Session() as sess: # create output directory if it doesn't exist output_dir = os.path.expanduser(args.output_dir) if not os.path.isdir(output_dir): os.makedirs(output_dir) # load the model print("Loading trained model...\n") meta_file, ckpt_file = facenet.get_model_filenames( os.path.expanduser(args.trained_model_dir)) facenet.load_model(args.trained_model_dir) # grab all image paths and labels print("Finding image paths and targets...\n") data = load_files(args.data_dir, load_content=False, shuffle=False) # labels_array = data['target'] # paths = data['filenames'] # print(data) # Get input and output tensors images_placeholder = tf.get_default_graph().get_tensor_by_name( "input_ID:0") embeddings = tf.get_default_graph().get_tensor_by_name( "embeddings_ID:0") phase_train_placeholder = tf.get_default_graph( ).get_tensor_by_name("phase_train:0") image_size = images_placeholder.get_shape()[1] embedding_size = embeddings.get_shape()[1] # Run forward pass to calculate embeddings print('Generating embeddings from images...\n') # emb = np.zeros(embedding_size) dirs = os.listdir(args.data_dir) dirs.sort() for dir in dirs: path = os.path.join(args.data_dir, dir) if os.path.isdir(path): print("path: ", path) files = os.listdir(path) files.sort() for file in files: output_path = get_output_path(output_dir, dir, file) image_path = os.path.join(path, file) images = facenet.load_image(image_path, do_random_crop=False, do_random_flip=False, image_size=image_size, do_prewhiten=True) feed_dict = { images_placeholder: images, phase_train_placeholder: False } emb = sess.run(embeddings, feed_dict=feed_dict) np.save(output_path, emb[0])
def main(args): with tf.Graph().as_default(): with tf.Session() as sess: # Read the file containing the pairs used for testing pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs)) # Get the paths for the corresponding images paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext) # Load the model print('Model directory: %s' % args.model_dir) meta_file, ckpt_file = facenet.get_model_filenames(os.path.expanduser(args.model_dir)) print('Metagraph file: %s' % meta_file) print('Checkpoint file: %s' % ckpt_file) facenet.load_model(args.model_dir, meta_file, ckpt_file) # Get input and output tensors image_paths_placeholder = tf.get_default_graph().get_tensor_by_name("image_paths:0") labels_placeholder = tf.get_default_graph().get_tensor_by_name("labels:0") phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0") batch_size_placeholder = tf.get_default_graph().get_tensor_by_name("batch_size:0") embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0") enqueue_op = tf.get_default_graph().get_operation_by_name("enqueue_op") label_batch = tf.get_default_graph().get_tensor_by_name("label_batch:0") # Run forward pass to calculate embeddings print('Runnning forward pass on LFW images') # Enqueue one epoch of image paths and labels labels_array = np.expand_dims(np.arange(0,len(paths)),1) image_paths_array = np.expand_dims(np.array(paths),1) sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array}) embedding_size = embeddings.get_shape()[1] nrof_images = len(actual_issame)*2 nrof_batches = nrof_images // args.lfw_batch_size emb_array = np.zeros((nrof_images, embedding_size)) lab_array = np.zeros((nrof_images,)) for _ in range(nrof_batches): feed_dict = {phase_train_placeholder:False, batch_size_placeholder:args.lfw_batch_size} emb, lab = sess.run([embeddings, label_batch], feed_dict=feed_dict) lab_array[lab] = lab emb_array[lab] = emb assert np.array_equal(lab_array, np.arange(nrof_images))==True, 'Wrong labels used for evaluation, possibly caused by training examples left in the input pipeline' tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(emb_array, actual_issame, nrof_folds=args.lfw_nrof_folds) print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) facenet.plot_roc(fpr, tpr, 'NN4')
def freeze_graph(model_dir, output_nodes='embeddings', output_filename='facenet.pb', rename_outputs=None): # Load checkpoint meta_file, ckpt_file = facenet.get_model_filenames(model_dir) meta_file = path.join(model_dir, meta_file) ckpt_file = path.join(model_dir, ckpt_file) output_graph = output_filename print('Importing meta graph...') # Devices should be cleared to allow Tensorflow to control placement of # graph when loading on different machines saver = tf.train.import_meta_graph(meta_file, clear_devices=True) graph = tf.get_default_graph() onames = output_nodes.split(',') # https://stackoverflow.com/a/34399966/4190475 if rename_outputs is not None: nnames = rename_outputs.split(',') with graph.as_default(): for o, n in zip(onames, nnames): _out = tf.identity(graph.get_tensor_by_name(o + ':0'), name=n) onames = nnames input_graph_def = graph.as_graph_def() # fix batch norm nodes for node in input_graph_def.node: if node.op == 'RefSwitch': node.op = 'Switch' for index in range(len(node.input)): if 'moving_' in node.input[index]: node.input[index] = node.input[index] + '/read' elif node.op == 'AssignSub': node.op = 'Sub' if 'use_locking' in node.attr: del node.attr['use_locking'] with tf.Session(graph=graph) as sess: saver.restore(sess, ckpt_file) # In production, graph weights no longer need to be updated # graph_util provides utility to change all variables to constants output_graph_def = graph_util.convert_variables_to_constants( sess, input_graph_def, onames # unrelated nodes will be discarded ) # Serialize and write to file with tf.gfile.GFile(output_graph, "wb") as f: f.write(output_graph_def.SerializeToString()) print("%d ops in the final graph." % len(output_graph_def.node)) print("Saved to %s." % output_graph)
def main(args): """ Main Given a list of images, save out facial encoding data files and copy images into folders of face clusters. """ from os.path import join, basename, exists from os import makedirs import numpy as np import shutil import sys if not exists(args.output): makedirs(args.output) with tf.Graph().as_default(): with tf.Session() as sess: image_paths = get_onedir(args.input) meta_file, ckpt_file = facenet.get_model_filenames( os.path.expanduser(args.model_dir)) print('Metagraph file: %s' % meta_file) print('Checkpoint file: %s' % ckpt_file) load_model(args.model_dir, meta_file, ckpt_file) images_placeholder = tf.get_default_graph().get_tensor_by_name( "input:0") embeddings = tf.get_default_graph().get_tensor_by_name( "embeddings:0") phase_train_placeholder = tf.get_default_graph( ).get_tensor_by_name("phase_train:0") image_size = images_placeholder.get_shape()[1] print("image_size:", image_size) embedding_size = embeddings.get_shape()[1] print('Runnning forward pass on images') nrof_images = len(image_paths) nrof_batches = int(math.ceil(1.0 * nrof_images / args.batch_size)) emb_array = np.zeros((nrof_images, embedding_size)) facial_encodings = compute_facial_encodings( sess, images_placeholder, embeddings, phase_train_placeholder, image_size, embedding_size, nrof_images, nrof_batches, emb_array, args.batch_size, image_paths) sorted_clusters = cluster_facial_encodings(facial_encodings) num_cluster = len(sorted_clusters) for idx, cluster in enumerate(sorted_clusters): cluster_dir = join(args.output, str(idx)) if not exists(cluster_dir): makedirs(cluster_dir) for path in cluster: shutil.copy(path, join(cluster_dir, basename(path)))
def convert_facenet(dir, model_base_path, image_size, output_size, prefix=None, do_push=False): import facenet from models import inception_resnet_v1 out_dir = os.path.join(dir, 'movidius') if not os.path.exists(out_dir): os.mkdir(out_dir) dir = os.path.join(dir, "facenet") if not os.path.exists(dir): os.mkdir(dir) tf.reset_default_graph() with tf.Graph().as_default(): logging.info("Load FACENET graph") image = tf.placeholder("float", shape=[1, image_size, image_size, 3], name='input') prelogits, _ = inception_resnet_v1.inference( image, 1.0, phase_train=False, bottleneck_layer_size=output_size) normalized = tf.nn.l2_normalize(prelogits, 1, name='l2_normalize') output = tf.identity(normalized, name='output') # Do not remove saver = tf.train.Saver(tf.global_variables()) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) base_name = model_base_path meta_file, ckpt_file = facenet.get_model_filenames(base_name) logging.info("Restore FACENET graph from %s %s", meta_file, ckpt_file) saver = tf.train.import_meta_graph( os.path.join(base_name, meta_file)) saver.restore(sess, os.path.join(base_name, ckpt_file)) logging.info("Freeze FACENET graph") saver.save(sess, os.path.join(dir, 'facenet')) cmd = 'mvNCCheck {}/facenet.meta -in input -on output -s 12 -cs 0,1,2 -S 255'.format( dir) logging.info('Validate Movidius: %s', cmd) result = subprocess.check_output(cmd, shell=True).decode() logging.info(result) result = parse_check_ouput(result, prefix=prefix) submit(result) cmd = 'mvNCCompile {}/facenet.meta -in input -on output -o {}/facenet.graph -s 12'.format( dir, out_dir) logging.info('Compile: %s', cmd) result = subprocess.check_output(cmd, shell=True).decode() logging.info(result) if do_push: push('facenet', out_dir)
def main(args): """ Main Given a list of images, save out facial encoding data files and copy images into folders of face clusters. """ from os.path import join, basename, exists from os import makedirs import numpy as np import shutil import sys if not exists(args.output): makedirs(args.output) with tf.Graph().as_default(): with tf.Session() as sess: image_paths = get_onedir(args.input) #image_list, label_list = facenet.get_image_paths_and_labels(train_set) meta_file, ckpt_file = facenet.get_model_filenames(os.path.expanduser(args.model_dir)) print('Metagraph file: %s' % meta_file) print('Checkpoint file: %s' % ckpt_file) load_model(args.model_dir, meta_file, ckpt_file) # Get input and output tensors images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0") embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0") phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0") image_size = images_placeholder.get_shape()[1] print("image_size:",image_size) embedding_size = embeddings.get_shape()[1] # Run forward pass to calculate embeddings print('Runnning forward pass on images') nrof_images = len(image_paths) nrof_batches = int(math.ceil(1.0*nrof_images / args.batch_size)) emb_array = np.zeros((nrof_images, embedding_size)) facial_encodings = compute_facial_encodings(sess,images_placeholder,embeddings,phase_train_placeholder,image_size, embedding_size,nrof_images,nrof_batches,emb_array,args.batch_size,image_paths) sorted_clusters = cluster_facial_encodings(facial_encodings) num_cluster = len(sorted_clusters) # Copy image files to cluster folders for idx, cluster in enumerate(sorted_clusters): #save all the cluster cluster_dir = join(args.output, str(idx)) if not exists(cluster_dir): makedirs(cluster_dir) for path in cluster: shutil.copy(path, join(cluster_dir, basename(path)))
def get_tf_session(model_directory=MODEL_DIR): sess = tf.Session() meta_file, ckpt_file = get_model_filenames(model_directory) saver = tf.train.import_meta_graph(os.path.join(model_directory, meta_file)) saver.restore(sess, os.path.join(model_directory, ckpt_file)) images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0") embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0") phase_train_placeholder = tf.get_default_graph().get_tensor_by_name( "phase_train:0") return sess, images_placeholder, embeddings, phase_train_placeholder
def main(args): # TODO: Move constants to startup script ranker = Rankings(5, "../data/celeb_vectors.pk") with tf.Graph().as_default(): with tf.Session() as sess: # Load the model print('Model directory: %s' % args.model_dir) meta_file, ckpt_file = facenet.get_model_filenames( os.path.expanduser(args.model_dir)) print('Metagraph file: %s' % meta_file) print('Checkpoint file: %s' % ckpt_file) facenet.load_model(args.model_dir, meta_file, ckpt_file) print('Watching: %s' % args.watch_dir) while True: # TODO: Only accept VALID (fully downloaded) image files image_filenames = os.listdir(args.watch_dir) if not image_filenames: time.sleep(0.01) else: # TODO: Implement max_batch_size image_filepaths = [ os.path.join(args.watch_dir, image_filename) for image_filename in image_filenames ] images = load_and_align_data(image_filepaths, args.image_size, args.margin, args.gpu_memory_fraction) # Get input and output tensors images_placeholder = tf.get_default_graph( ).get_tensor_by_name("input:0") embeddings = tf.get_default_graph().get_tensor_by_name( "embeddings:0") # Run forward pass to calculate embeddings feed_dict = {images_placeholder: images} emb = sess.run(embeddings, feed_dict=feed_dict) for filename, vector in zip(image_filenames, emb.tolist()): image_filepath = os.path.join(args.watch_dir, filename) output_filepath = os.path.join( args.output_dir, os.path.splitext(filename)[0] + ".json") rank = ranker.calculate_ranking(vector) rank_names = [c.name for c in rank] with open(output_filepath, 'w') as fp: json.dump(rank_names, fp) os.remove(image_filepath)
def main(args): images = load_and_align_data(args.image_files, args.image_size, args.margin, args.gpu_memory_fraction) with tf.Graph().as_default(): with tf.Session() as sess: # Load the model print('Model directory: %s' % args.model_dir) meta_file, ckpt_file = facenet.get_model_filenames( os.path.expanduser(args.model_dir)) print('Metagraph file: %s' % meta_file) print('Checkpoint file: %s' % ckpt_file) facenet.load_model(args.model_dir, meta_file, ckpt_file) # Get input and output tensors images_placeholder = tf.get_default_graph().get_tensor_by_name( "input:0") embeddings = tf.get_default_graph().get_tensor_by_name( "embeddings:0") # Run forward pass to calculate embeddings phase_train_placeholder = tf.get_default_graph( ).get_tensor_by_name("phase_train:0") feed_dict = { images_placeholder: images, phase_train_placeholder: False } emb = sess.run(embeddings, feed_dict=feed_dict) print(emb) nrof_images = len(args.image_files) print('Images:') for i in range(nrof_images): print('%1d: %s' % (i, args.image_files[i])) print('') # Print distance matrix print('Distance matrix') print(' ', end='') for i in range(nrof_images): print(' %1d ' % i, end='') print('') for i in range(nrof_images): print('%1d ' % i, end='') for j in range(nrof_images): dist = np.sqrt( np.sum(np.square(np.subtract(emb[i, :], emb[j, :])))) print(' %1.4f ' % dist, end='') print('')
def main(args): with tf.Graph().as_default(): with tf.Session() as sess: # create output directory if it doesn't exist output_dir = os.path.expanduser(args.output_dir) if not os.path.isdir(output_dir): os.makedirs(output_dir) # load the model print("Loading trained model...\n") meta_file, ckpt_file = facenet.get_model_filenames(os.path.expanduser(args.trained_model_dir)) facenet.load_model(args.trained_model_dir, meta_file, ckpt_file) # grab all image paths and labels print("Finding image paths and targets...\n") data = load_files(args.data_dir, load_content=False, shuffle=False) labels_array = data['target'] paths = data['filenames'] # Get input and output tensors images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0") embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0") phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0") image_size = images_placeholder.get_shape()[1] embedding_size = embeddings.get_shape()[1] # Run forward pass to calculate embeddings print('Generating embeddings from images...\n') start_time = time.time() batch_size = args.batch_size nrof_images = len(paths) nrof_batches = int(np.ceil(1.0*nrof_images / batch_size)) emb_array = np.zeros((nrof_images, embedding_size)) for i in xrange(nrof_batches): start_index = i*batch_size end_index = min((i+1)*batch_size, nrof_images) paths_batch = paths[start_index:end_index] images = facenet.load_data(paths_batch, do_random_crop=False, do_random_flip=False, image_size=image_size, do_prewhiten=True) feed_dict = { images_placeholder:images, phase_train_placeholder:False} emb_array[start_index:end_index,:] = sess.run(embeddings, feed_dict=feed_dict) time_avg_forward_pass = (time.time() - start_time) / float(nrof_images) print("Forward pass took avg of %.3f[seconds/image] for %d images\n" % (time_avg_forward_pass, nrof_images)) print("Finally saving embeddings and gallery to: %s" % (output_dir)) # save the gallery and embeddings (signatures) as numpy arrays to disk np.save(os.path.join(output_dir, "gallery.npy"), labels_array) np.save(os.path.join(output_dir, "signatures.npy"), emb_array)
def main(args): with tf.Graph().as_default(): with tf.Session() as sess: # Read the file containing the pairs used for testing pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs)) # Get the paths for the corresponding images paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext) # Load the model print('Model directory: %s' % args.model_dir) meta_file, ckpt_file = facenet.get_model_filenames(os.path.expanduser(args.model_dir)) print('Metagraph file: %s' % meta_file) print('Checkpoint file: %s' % ckpt_file) facenet.load_model(args.model_dir, meta_file, ckpt_file) # Get input and output tensors images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0") embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0") phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0") image_size = images_placeholder.get_shape()[1] embedding_size = embeddings.get_shape()[1] # Run forward pass to calculate embeddings print('Runnning forward pass on LFW images') batch_size = args.lfw_batch_size nrof_images = len(paths) nrof_batches = int(math.ceil(1.0*nrof_images / batch_size)) emb_array = np.zeros((nrof_images, embedding_size)) for i in range(nrof_batches): start_index = i*batch_size end_index = min((i+1)*batch_size, nrof_images) paths_batch = paths[start_index:end_index] images = facenet.load_data(paths_batch, False, False, image_size) feed_dict = { images_placeholder:images, phase_train_placeholder:False } emb_array[start_index:end_index,:] = sess.run(embeddings, feed_dict=feed_dict) tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(emb_array, actual_issame, nrof_folds=args.lfw_nrof_folds) print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) auc = metrics.auc(fpr, tpr) print('Area Under Curve (AUC): %1.3f' % auc) eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.) print('Equal Error Rate (EER): %1.3f' % eer)
def build_graph(self, sess): meta_file, ckpt_file = facenet.get_model_filenames(model_path) g = tf.Graph() g.as_default() saver = tf.train.import_meta_graph(os.path.join(model_path, meta_file)) saver.restore(sess, os.path.join(model_path, ckpt_file)) self.images_placeholder = tf.get_default_graph().get_tensor_by_name( 'input:0') self.embeddings = tf.get_default_graph().get_tensor_by_name( 'embeddings:0') self.phase_train_placeholder = tf.get_default_graph( ).get_tensor_by_name('phase_train:0')
def main(): args = parse_args() out_dir = '/tmp/facenet' with tf.Graph().as_default(): image = tf.placeholder("float", shape=[1, image_size, image_size, 3], name='input') prelogits, _ = inception_resnet_v1.inference( image, 1.0, phase_train=False, bottleneck_layer_size=args.output_size) normalized = tf.nn.l2_normalize(prelogits, 1, name='l2_normalize') output = tf.identity(normalized, name='output') # Do not remove saver = tf.train.Saver(tf.global_variables()) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) base_name = args.model_base_path meta_file, ckpt_file = facenet.get_model_filenames(base_name) saver = tf.train.import_meta_graph( os.path.join(base_name, meta_file)) saver.restore(sess, os.path.join(base_name, ckpt_file)) # Save the network for fathom if not os.path.isdir(out_dir): os.makedirs(out_dir) saver.save(sess, out_dir + '/facenet') if args.check: cmd = 'mvNCCheck {0}/facenet.meta -in input -on output -s 12 -cs 0,1,2 -S 255'.format( out_dir) print('Running check:\n') print(cmd) print('') print(subprocess.check_output(cmd, shell=True).decode()) cmd = 'mvNCCompile {0}/facenet.meta -in input -on output -o {1} -s 12'.format( out_dir, args.output_file) print('Run:\n') print(cmd) print('') print(subprocess.check_output(cmd, shell=True).decode()) shutil.rmtree(out_dir)
def facenet_api(img): # network = importlib.import_module('inception_resnet_v1') with tf.Graph().as_default(): prelogits, _ = inception_resnet_v1_1(img, is_training=False, dropout_keep_prob=0.8, bottleneck_layer_size=128, reuse=None) embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings') saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3) with tf.Session() as sess: # Load the model # images = np.zeros((16, 160, 160, 3)) #facenet.load_model(model) # nrof_samples = len(img) # print(nrof_samples) #images = np.zeros((nrof_samples, 160, 160, 3)) #Load the images # for i in range(16): # if img.ndim == 2: # img = facenet.to_rgb(img) # img = facenet.prewhiten(img) # img = facenet.crop(img, False, 160) # img = facenet.flip(img, False) # images[i,:,:,:] = img # min_pixel = np.min(images) # max_pixel = np.max(images) # images = (images - min_pixel) / (max_pixel - min_pixel) # Get input and output tensors model = "/home/fan/face_adv/facenet/src/models/20180402-114759/" # model = "/home/fan/face_adv/facenet/src/models/20180402-114759/20180402-114759.pb" # Load the model # facenet.load_model(model) model_exp = os.path.expanduser(model) print('Model directory: %s' % model_exp) meta_file, ckpt_file = facenet.get_model_filenames(model_exp) print('Metagraph file: %s' % meta_file) print('Checkpoint file: %s' % ckpt_file) saver.restore(tf.get_default_session(), os.path.join(model_exp, ckpt_file)) # Build the inference graph emb = sess.run(embeddings) print(emb) return emb
def execute(self, frame: FrameType, bboxes: bytes) -> bytes: import facenet import cv2 import tensorflow as tf if self.images_placeholder is None: print('Loading model...') with self.g.as_default(): with self.sess.as_default(): model_path = self.config.args['model_dir'] meta_file, ckpt_file = facenet.get_model_filenames( model_path) saver = tf.train.import_meta_graph( os.path.join(model_path, meta_file)) saver.restore(self.sess, os.path.join(model_path, ckpt_file)) self.images_placeholder = tf.get_default_graph( ).get_tensor_by_name('input:0') self.embeddings = tf.get_default_graph( ).get_tensor_by_name('embeddings:0') self.phase_train_placeholder = tf.get_default_graph( ).get_tensor_by_name('phase_train:0') print('Model loaded!') [h, w] = frame.shape[:2] out_size = 160 bboxes = readers.bboxes(bboxes, self.config.protobufs) outputs = b'' for bbox in bboxes: # NOTE: if using output of mtcnn, not-normalized, so removing de-normalization factors here face_img = frame[int(bbox.y1 * h):int(bbox.y2 * h), int(bbox.x1 * w):int(bbox.x2 * w)] [fh, fw] = face_img.shape[:2] if fh == 0 or fw == 0: outputs += np.zeros(128, dtype=np.float32).tobytes() else: face_img = cv2.resize(face_img, (out_size, out_size)) face_img = facenet.prewhiten(face_img) embs = self.sess.run(self.embeddings, feed_dict={ self.images_placeholder: [face_img], self.phase_train_placeholder: False }) outputs += embs[0].tobytes() return ' ' if outputs == b'' else outputs
def main(args): with tf.Graph().as_default(): with tf.Session() as sess: # Load the model metagraph and checkpoint print('Model directory: %s' % args.model_dir) meta_file, ckpt_file = facenet.get_model_filenames(os.path.expanduser(args.model_dir)) print('Metagraph file: %s' % meta_file) print('Checkpoint file: %s' % ckpt_file) model_dir_exp = os.path.expanduser(args.model_dir) saver = tf.train.import_meta_graph(os.path.join(model_dir_exp, meta_file), clear_devices=True) tf.get_default_session().run(tf.global_variables_initializer()) tf.get_default_session().run(tf.local_variables_initializer()) saver.restore(tf.get_default_session(), os.path.join(model_dir_exp, ckpt_file)) # Retrieve the protobuf graph definition and fix the batch norm nodes gd = sess.graph.as_graph_def() for node in gd.node: if node.op == 'RefSwitch': node.op = 'Switch' for index in xrange(len(node.input)): if 'moving_' in node.input[index]: node.input[index] = node.input[index] + '/read' elif node.op == 'AssignSub': node.op = 'Sub' if 'use_locking' in node.attr: del node.attr['use_locking'] elif node.op == 'AssignAdd': node.op = 'Add' if 'use_locking' in node.attr: del node.attr['use_locking'] # Get the list of important nodes output_node_names = 'embeddings' whitelist_names = [] for node in gd.node: if node.name.startswith('InceptionResnetV1') or node.name.startswith('embeddings') or node.name.startswith('phase_train'): print(node.name) whitelist_names.append(node.name) # Replace all the variables in the graph with constants of the same values output_graph_def = graph_util.convert_variables_to_constants( sess, gd, output_node_names.split(","), variable_names_whitelist=whitelist_names) # Serialize and dump the output graph to the filesystem with tf.gfile.GFile(args.output_file, 'wb') as f: f.write(output_graph_def.SerializeToString()) print("%d ops in the final graph." % len(output_graph_def.node))
def main(args): if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) image_list = glob.glob(args.uhdb31_dir + '/*.png') with tf.Graph().as_default(): with tf.Session() as sess: # Read the file containing the pairs used for testing # pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs)) # Get the paths for the corresponding images # paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext) # Load the model print('Model directory: %s' % args.model_dir) meta_file, ckpt_file = facenet.get_model_filenames( os.path.expanduser(args.model_dir)) print('Metagraph file: %s' % meta_file) print('Checkpoint file: %s' % ckpt_file) facenet.load_model(args.model_dir, meta_file, ckpt_file) print('Done') # Get input and output tensors images_placeholder = tf.get_default_graph().get_tensor_by_name( "input:0") embeddings = tf.get_default_graph().get_tensor_by_name( "embeddings:0") phase_train_placeholder = tf.get_default_graph( ).get_tensor_by_name("phase_train:0") image_size = images_placeholder.get_shape()[1] embedding_size = embeddings.get_shape()[1] # Run forward pass to calculate embeddings print('Runnning forward pass on UHDB31 images') for img_path in image_list: base_name = os.path.splitext(os.path.basename(img_path))[0] save_path = os.path.join(args.output_dir, base_name + '.txt') images = facenet.load_data([img_path], False, False, image_size) ft = sess.run(embeddings, feed_dict={ images_placeholder: images, phase_train_placeholder: False }) np.savetxt(save_path, ft)
def get_rep(data_dir,trained_model_dir,batch_size=50): with tf.Graph().as_default(): with tf.Session() as sess: # load the model print("Loading trained model...\n") meta_file, ckpt_file = facenet.get_model_filenames(os.path.expanduser(trained_model_dir)) facenet.load_model(trained_model_dir) # grab all image paths and labels print("Finding image paths and targets...\n") data = load_files(data_dir, load_content=False, shuffle=False) #print(data.keys()) labels_array = data['target'] paths = data['filenames'] # Get input and output tensors images_placeholder = tf.get_default_graph().get_tensor_by_name("input_ID:0") embeddings = tf.get_default_graph().get_tensor_by_name("embeddings_ID:0") phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0") image_size = images_placeholder.get_shape()[1] embedding_size = embeddings.get_shape()[1] print("image_size is :", image_size) print("embedding size is :", embedding_size) # Run forward pass to calculate embeddings print('Generating embeddings from images...\n') start_time = time.time() nrof_images = len(paths) nrof_batches = int(np.ceil(1.0 * nrof_images / batch_size)) emb_array = np.zeros((nrof_images, embedding_size)) for i in xrange(nrof_batches): start_index = i * batch_size end_index = min((i + 1) * batch_size, nrof_images) paths_batch = paths[start_index:end_index] images = facenet.load_data(paths_batch, do_random_crop=False, do_random_flip=False, image_size=image_size, do_prewhiten=True) feed_dict = {images_placeholder: images, phase_train_placeholder: False} emb_array[start_index:end_index, :] = sess.run(embeddings, feed_dict=feed_dict) time_avg_forward_pass = (time.time() - start_time) / float(nrof_images) print("Forward pass took avg of %.3f[seconds/image] for %d images\n" % (time_avg_forward_pass, nrof_images)) labels_name_array = [] for i in range(len(labels_array)): labels_name_array += [data['target_names'][labels_array[i]]] return emb_array,labels_name_array
def load_model(sess, model, input_map=None): model_exp = os.path.expanduser(model) if (os.path.isfile(model_exp)): print('Model filename: %s' % model_exp) with gfile.FastGFile(model_exp, 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) tf.import_graph_def(graph_def, input_map=input_map, name='') else: print('Model directory: %s' % model_exp) meta_file, ckpt_file = facenet.get_model_filenames(model_exp) print('Metagraph file: %s' % meta_file) print('Checkpoint file: %s' % ckpt_file) saver = tf.train.import_meta_graph(os.path.join(model_exp, meta_file), input_map=input_map) saver.restore(sess, os.path.join(model_exp, ckpt_file))
def main(args): project_dir = os.path.dirname(os.getcwd()) with open(join(project_dir, 'config.yaml'), 'r') as f: cfg = yaml.load(f) if cfg['specs']['set_gpu']: os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg['base_conf']['gpu_num']) with tf.Graph().as_default(): with tf.Session() as sess: # Load the model metagraph and checkpoint model_dir = join(project_dir, 'fine_tuning_process', 'models', args.model_dir) print('Model directory: %s' % model_dir) meta_file, ckpt_file = facenet.get_model_filenames(model_dir) print('Metagraph file: %s' % meta_file) print('Checkpoint file: %s' % ckpt_file) model_dir_exp = model_dir saver = tf.train.import_meta_graph(os.path.join( model_dir_exp, meta_file), clear_devices=True) tf.get_default_session().run(tf.global_variables_initializer()) tf.get_default_session().run(tf.local_variables_initializer()) saver.restore(tf.get_default_session(), os.path.join(model_dir_exp, ckpt_file)) # Retrieve the protobuf graph definition and fix the batch norm nodes input_graph_def = sess.graph.as_graph_def() # Freeze the graph def output_graph_def = freeze_graph_def(sess, input_graph_def, 'embeddings') # Serialize and dump the output graph to the filesystem with tf.gfile.GFile(join(project_dir, 'models', args.output_file), 'wb') as f: f.write(output_graph_def.SerializeToString()) print("%d ops in the final graph: %s" % (len(output_graph_def.node), args.output_file))
def main(args): with tf.Graph().as_default(): with tf.Session() as sess: # Read the file containing the pairs used for testing pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs)) # Get the paths for the corresponding images paths, actual_issame = lfw.get_paths( os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext) # Load the model print('Model directory: %s' % args.model_dir) meta_file, ckpt_file = facenet.get_model_filenames( os.path.expanduser(args.model_dir)) print('Metagraph file: %s' % meta_file) print('Checkpoint file: %s' % ckpt_file) facenet.load_model(args.model_dir, meta_file, ckpt_file) # Get input and output tensors images_placeholder = tf.get_default_graph().get_tensor_by_name( "input:0") phase_train_placeholder = tf.get_default_graph( ).get_tensor_by_name("phase_train:0") embeddings = tf.get_default_graph().get_tensor_by_name( "embeddings:0") tpr, fpr, accuracy, val, val_std, far = lfw.validate( sess, paths, actual_issame, args.seed, 60, images_placeholder, phase_train_placeholder, embeddings, nrof_folds=args.lfw_nrof_folds) print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) facenet.plot_roc(fpr, tpr, 'NN4')
def main(args): images = load_and_align_data(args.image_files, args.image_size, args.margin, args.gpu_memory_fraction) with tf.Graph().as_default(): with tf.Session() as sess: # Load the model print('Model directory: %s' % args.model_dir) meta_file, ckpt_file = facenet.get_model_filenames(os.path.expanduser(args.model_dir)) print('Metagraph file: %s' % meta_file) print('Checkpoint file: %s' % ckpt_file) facenet.load_model(args.model_dir, meta_file, ckpt_file) # Get input and output tensors images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0") embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0") #phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0") # Run forward pass to calculate embeddings feed_dict = { images_placeholder: images} #phase_train_placeholder:False } emb = sess.run(embeddings, feed_dict=feed_dict) nrof_images = len(args.image_files) print('Images:') for i in range(nrof_images): print('%1d: %s' % (i, args.image_files[i])) print('') # Print distance matrix print('Distance matrix') print(' ', end='') for i in range(nrof_images): print(' %1d ' % i, end='') print('') for i in range(nrof_images): print('%1d ' % i, end='') for j in range(nrof_images): dist = np.sqrt(np.sum(np.square(np.subtract(emb[i,:], emb[j,:])))) print(' %1.4f ' % dist, end='') print('')
def main(args): with tf.Graph().as_default(): with tf.Session() as sess: # Load the model metagraph and checkpoint print('Model directory: %s' % args.model_dir) meta_file, ckpt_file = facenet.get_model_filenames(os.path.expanduser(args.model_dir)) print('Metagraph file: %s' % meta_file) print('Checkpoint file: %s' % ckpt_file) facenet.load_model(args.model_dir, meta_file, ckpt_file) output_node_names = 'embeddings' whitelist_names = [] for node in sess.graph.as_graph_def().node: if node.name.startswith('InceptionResnetV1') or node.name.startswith('embeddings') or node.name.startswith('phase_train'): print(node.name) whitelist_names.append(node.name) output_graph_def = graph_util.convert_variables_to_constants( sess, sess.graph.as_graph_def(), output_node_names.split(","), variable_names_whitelist=whitelist_names) with tf.gfile.GFile(args.output_file, 'wb') as f: f.write(output_graph_def.SerializeToString()) print("%d ops in the final graph." % len(output_graph_def.node)) #pylint: disable=no-member
def main(args): with tf.Graph().as_default(): with tf.Session() as sess: # Read the file containing the pairs used for testing pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs)) # Get the paths for the corresponding images paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext) # Load the model print('Model directory: %s' % args.model_dir) meta_file, ckpt_file = facenet.get_model_filenames(os.path.expanduser(args.model_dir)) print('Metagraph file: %s' % meta_file) print('Checkpoint file: %s' % ckpt_file) facenet.load_model(args.model_dir, meta_file, ckpt_file) # Get input and output tensors images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0") embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0") image_size = images_placeholder.get_shape()[1] embedding_size = embeddings.get_shape()[1] image_size = image_size.value # pdb.set_trace() # Run forward pass to calculate embeddings print('Runnning forward pass on LFW images') batch_size = args.lfw_batch_size nrof_images = len(paths) nrof_batches = int(math.ceil(1.0*nrof_images / batch_size)) emb_array = np.zeros((nrof_images, embedding_size)) print('nrof_batches :{}'.format(nrof_batches)) all_time = 0 for i in range(nrof_batches): start_index = i*batch_size end_index = min((i+1)*batch_size, nrof_images) paths_batch = paths[start_index:end_index] # pdb.set_trace() images = facenet.load_data(paths_batch, False, False, image_size) feed_dict = {images_placeholder:images} start = time() emb_array[start_index:end_index,:] = sess.run(embeddings, feed_dict=feed_dict) end = time() all_time += (end - start) print('index: {} time: {}'.format(i, (end-start))) # pdb.set_trace() print('all_time :', all_time) msgpack_numpy.dump((paths, emb_array, actual_issame), open('lfw_feature.p', 'wb')) tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(emb_array, args.seed, actual_issame, nrof_folds=args.lfw_nrof_folds) print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy)))