def load(basename, **kwargs): model = RNNTaggerModel() model.sess = kwargs.get('sess', tf.Session()) checkpoint_name = kwargs.get('checkpoint_name', basename) checkpoint_name = checkpoint_name or basename with open(basename + '.state') as f: state = json.load(f) model.mxlen = state.get('mxlen', 100) model.maxw = state.get('maxw', 100) model.crf = bool(state.get('crf', False)) model.proj = bool(state.get('proj', False)) with open(basename + '.saver') as fsv: saver_def = tf.train.SaverDef() text_format.Merge(fsv.read(), saver_def) with gfile.FastGFile(basename + '.graph', 'rb') as f: gd = tf.GraphDef() gd.ParseFromString(f.read()) model.sess.graph.as_default() tf.import_graph_def(gd, name='') model.sess.run(saver_def.restore_op_name, {saver_def.filename_tensor_name: checkpoint_name}) model.x = tf.get_default_graph().get_tensor_by_name('x:0') model.xch = tf.get_default_graph().get_tensor_by_name('xch:0') model.y = tf.get_default_graph().get_tensor_by_name('y:0') model.lengths = tf.get_default_graph().get_tensor_by_name( 'lengths:0') model.pkeep = tf.get_default_graph().get_tensor_by_name('pkeep:0') model.best = tf.get_default_graph().get_tensor_by_name( 'output/ArgMax:0') model.probs = tf.get_default_graph().get_tensor_by_name( 'output/Reshape_1:0') # TODO: rename try: model.A = tf.get_default_graph().get_tensor_by_name( 'Loss/transitions:0') #print('Found transition matrix in graph, setting crf=True') if not model.crf: print( 'Warning: meta-data says no CRF but model contains transition matrix!' ) model.crf = True except: if model.crf is True: print( 'Warning: meta-data says there is a CRF but not transition matrix found!' ) model.A = None model.crf = False with open(basename + '.labels', 'r') as f: model.labels = json.load(f) model.word_vocab = {} if os.path.exists(basename + '-word.vocab'): with open(basename + '-word.vocab', 'r') as f: model.word_vocab = json.load(f) with open(basename + '-char.vocab', 'r') as f: model.char_vocab = json.load(f) model.saver = tf.train.Saver(saver_def=saver_def) return model
Written by Jaewook Kang @ 2017 Dec. #----------------------------------------------------------------- """ from os import getcwd import os import tensorflow as tf from tensorflow.python.platform import gfile import numpy as np import pandas as pd tf.reset_default_graph() filename = 'tf_graph_def.pb' model_dir = getcwd() + '/pb_and_ckpt/ex/' model_filename = os.path.join(model_dir, filename) graph1 = tf.Graph() with graph1.as_default(): # load TF computational graph from a pb file with gfile.FastGFile(model_filename, 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) # Import the graph from "graph_def" into the current default graph _ = tf.import_graph_def(graph_def=graph_def, name='') sess = tf.Session(graph=graph1)
def main(_): if tf.gfile.Exists(FLAGS.summaries_dir): tf.gfile.DeleteRecursively(FLAGS.summaries_dir) tf.gfile.MakeDirs(FLAGS.summaries_dir) detect_devices() # Set up the pre-trained graph. download_inception_v3() inception_graph, bottleneck_tensor, jpeg_data_tensor, resized_image_tensor = (create_inception_graph()) image_lists = load_images(FLAGS.image_dir, FLAGS.testing_percentage, FLAGS.validation_percentage) class_count = len(image_lists.keys()) if class_count == 0: logger.info('No valid folders of images found at ' + FLAGS.image_dir) return -1 if class_count == 1: logger.info('Only one valid folder of images found at ' + FLAGS.image_dir + ' - multiple classes are needed for classification.') return -1 # Check if distortions should be applied. distort_image_enabled = distort_images(FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale, FLAGS.random_brightness) logger.info("Apply distortions: {}".format(distort_image_enabled)) with tf.Session(graph = inception_graph) as sess: if distort_image_enabled: # Create distortions (distorted_jpeg_data_tensor, distorted_image_tensor) = add_input_distortions(FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale, FLAGS.random_brightness) else: # Determine and cache bottleneck images determine_and_cache_bottlenecks(sess, image_lists, FLAGS.image_dir, FLAGS.bottleneck_dir, jpeg_data_tensor, bottleneck_tensor) # Add new layer to train (train_step, cross_entropy, bottleneck_input, ground_truth_input, final_tensor) = add_new_layer( len(image_lists.keys()), FLAGS.final_tensor_name, bottleneck_tensor) # Add evaluation the new layer evaluation_step, prediction = add_evaluation_step(final_tensor, ground_truth_input) # Write down summaries merged = tf.summary.merge_all() logger.info("Writing down train summary at {}".format(FLAGS.summaries_dir + '/train')) train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train', sess.graph) logger.info("Writing down validation summary at {}".format(FLAGS.summaries_dir + '/validation')) validation_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/validation') # Init weights sess.run(tf.global_variables_initializer()) # Init simple output file if os.path.exists(outputFilePath): os.remove(outputFilePath) outputFile = open(outputFilePath, "w") outputFile.write("step,train_accuracy,cross_entropy,validation_accuracy\n") # TRAIN USING THE REQUIRED STEPS QUANTITY logger.info("Training using {} steps".format(FLAGS.training_steps)) for i in range(FLAGS.training_steps): if distort_image_enabled: (train_bottlenecks, train_ground_truth) = get_random_distorted_bottlenecks( sess, image_lists, FLAGS.train_batch_size, 'training', FLAGS.image_dir, distorted_jpeg_data_tensor, distorted_image_tensor, resized_image_tensor, bottleneck_tensor) else: (train_bottlenecks, train_ground_truth, _) = get_random_cached_bottlenecks( sess, image_lists, FLAGS.train_batch_size, 'training', FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor, bottleneck_tensor) train_summary, _ = sess.run([merged, train_step], feed_dict={bottleneck_input: train_bottlenecks, ground_truth_input: train_ground_truth}) train_writer.add_summary(train_summary, i) train_accuracy, cross_entropy_value = sess.run([evaluation_step, cross_entropy], feed_dict={bottleneck_input: train_bottlenecks, ground_truth_input: train_ground_truth}) logger.info('Step %d: Train accuracy = %.1f%%' % (i, train_accuracy * 100)) logger.info('Step %d: Cross entropy = %f' % (i, cross_entropy_value)) validation_bottlenecks, validation_ground_truth, _ = ( get_random_cached_bottlenecks(sess, image_lists, FLAGS.validation_batch_size, 'validation', FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor, bottleneck_tensor) ) validation_summary, validation_accuracy = sess.run([merged, evaluation_step], feed_dict={bottleneck_input: validation_bottlenecks, ground_truth_input: validation_ground_truth}) validation_writer.add_summary(validation_summary, i) outputFile.write("{},{},{},{}\n".format(i, train_accuracy, cross_entropy_value, validation_accuracy)) logger.info('Step %d: Validation accuracy = %.1f%% (N=%d)' % (i, validation_accuracy * 100, len(validation_bottlenecks))) logger.info('==============================================================') # TRAINING COMPLETE outputFile.close() # Run evaluation with some new images not used before. logger.info("Training complete. Running evaluation using {} new images".format(FLAGS.test_batch_size)) test_bottlenecks, test_ground_truth, test_filenames = ( get_random_cached_bottlenecks(sess, image_lists, FLAGS.test_batch_size, 'testing', FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor, bottleneck_tensor)) test_accuracy, predictions = sess.run( [evaluation_step, prediction], feed_dict={bottleneck_input: test_bottlenecks, ground_truth_input: test_ground_truth}) logger.info('Final test accuracy = %.1f%% (N=%d)' % (test_accuracy * 100, len(test_bottlenecks))) if FLAGS.print_misclassified_test_images: logger.info('=== MISCLASSIFIED TEST IMAGES ===') for i, test_filename in enumerate(test_filenames): if predictions[i] != test_ground_truth[i].argmax(): logger.info('%70s %s' % (test_filename, list(image_lists.keys())[predictions[i]])) # Write out the trained graph and labels with the weights stored as constants. logger.info("Writing final model") output_graph_def = graph_util.convert_variables_to_constants(sess, inception_graph.as_graph_def(), [FLAGS.final_tensor_name]) with gfile.FastGFile(FLAGS.output_graph, 'wb') as f: f.write(output_graph_def.SerializeToString()) with gfile.FastGFile(FLAGS.output_labels, 'w') as f: f.write('\n'.join(image_lists.keys()) + '\n') logger.info("FINISHED")
def main(): model_path = "models/20170511-185253.pb" # classifier_output_path = "/mnt/softwares/acv_project_code/Code/classifier_rf1_team.pkl" classifier_output_path = "models/classifier_rf4.pkl" #classifier_output_path = "/mnt/softwares/acv_project_code/Code/classfier_path/classifier_svm.pkl" with gfile.FastGFile(model_path, 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) tf.import_graph_def(graph_def, name='') images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0") embedding_layer = tf.get_default_graph().get_tensor_by_name("embeddings:0") phase_train_placeholder = tf.get_default_graph().get_tensor_by_name( "phase_train:0") gpu_memory_fraction = 1 with tf.Graph().as_default(): gpu_options = tf.GPUOptions( per_process_gpu_memory_fraction=gpu_memory_fraction) sess1 = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False)) # sess1 = tf.Session(config=tf.ConfigProto(device_count = {'GPU': 0})) with sess1.as_default(): pnet, rnet, onet = detect_face.create_mtcnn(sess1, None) model, class_names = pickle.load(open(classifier_output_path, 'rb'), encoding='latin1') cap = cv2.VideoCapture(0) # cap = cv2.VideoCapture('/home/lokender/Downloads/orig_faces/videos/nayeem.mp4') # cap = cv2.VideoCapture('/home/lokender/Downloads/orig_faces/videos/lokender.mp4') fno = 0 det_name = [] det_prob = [] bbs = [] while (~(cv2.waitKey(1) & 0xFF == ord('q'))): # image2 = cv2.imread("/home/lokender/Downloads/T1/both/IMG_20171115_150720.jpg") # image2.set_shape((480, 640, 3)) # image2= cv2.resize(image2, (640,480)) ret, image2 = cap.read() image2 = cv2.resize(image2, (320, 240)) if fno % 5 == 0: # image2 = cv2.imread("/home/lokender/Downloads/T1/both/IMG_20171115_150720.jpg") # image2.set_shape((480, 640, 3)) # image2= cv2.resize(image2, (640,480)) # image2 = cv2.imread("/home/lokender/Downloads/T1/both/IMG_20171115_150720.jpg") # image2.set_shape((480, 640, 3)) # image2= cv2.resize(image2, (640,480)) print(fno) # image2=rotate_bound(image1,90) # image2 = cv2.imread('/home/lokender/Downloads/acv_tmp/tm_al/tmp/frame_0.png', cv2.IMREAD_COLOR) # cv2.imwrite("/home/lokender/Downloads/acv_tmp/tm/tmp/frame.png", image2) image_size = 160 margin = 32 detect_multiple_faces = True minsize = 20 # minimum size of face threshold = [0.6, 0.7, 0.7] # three steps's threshold factor = 0.709 # scale factor img = image2[:, :, 0:3] bounding_boxes, _ = detect_face.detect_face( img, minsize, pnet, rnet, onet, threshold, factor) nrof_faces = bounding_boxes.shape[0] print(nrof_faces) if nrof_faces > 0: det = bounding_boxes[:, 0:4] det_arr = [] img_size = np.asarray(img.shape)[0:2] if nrof_faces > 1: if detect_multiple_faces: for i in range(nrof_faces): det_arr.append(np.squeeze(det[i])) else: bounding_box_size = (det[:, 2] - det[:, 0]) * ( det[:, 3] - det[:, 1]) img_center = img_size / 2 offsets = np.vstack([ (det[:, 0] + det[:, 2]) / 2 - img_center[1], (det[:, 1] + det[:, 3]) / 2 - img_center[0] ]) offset_dist_squared = np.sum(np.power(offsets, 2.0), 0) index = np.argmax( bounding_box_size - offset_dist_squared * 2.0) # some extra weight on the centering det_arr.append(det[index, :]) else: det_arr.append(np.squeeze(det)) det_name = [] det_prob = [] bbs = [] for i, det in enumerate(det_arr): det = np.squeeze(det) bb = np.zeros(4, dtype=np.int32) bb[0] = np.maximum(det[0] - margin / 2, 0) bb[1] = np.maximum(det[1] - margin / 2, 0) bb[2] = np.minimum(det[2] + margin / 2, img_size[1]) bb[3] = np.minimum(det[3] + margin / 2, img_size[0]) cropped = img[bb[1]:bb[3], bb[0]:bb[2], :] bbs.append(bb) scaled = misc.imresize(cropped, (image_size, image_size), interp='bilinear') # nrof_successfully_aligned += 1 # output_filename_n = "{}_{}.{}".format(output_filename.split('.')[0], i, # output_filename.split('.')[-1]) # misc.imsave(output_filename_n, scaled) # config=tf.ConfigProto(device_count = {'GPU': 0}) with tf.Session(config=tf.ConfigProto( gpu_options=(tf.GPUOptions( per_process_gpu_memory_fraction=1)))) as sess: image_paths = [ '/home/nayeem/Desktop/acv_live_face_recognition_project/src/images/frame_0.png' ] image_size = 160 batch_size = 1 num_threads = 1 num_epochs = 1 label_list = [0] images = ops.convert_to_tensor(image_paths, dtype=tf.string) labels = ops.convert_to_tensor(label_list, dtype=tf.int32) # Makes an input queue input_queue = tf.train.slice_input_producer( (images, labels), num_epochs=num_epochs, shuffle=False, ) images_labels = [] image = tf.convert_to_tensor(scaled) label = input_queue[1] # image = tf.random_crop(image, size=[image_size, image_size, 3]) # image.set_shape((image_size, image_size, 3)) image = tf.image.per_image_standardization(image) images_labels.append([image, label]) num_threads = 16 images, labels = tf.train.batch_join( images_labels, batch_size=batch_size, capacity=4 * num_threads, enqueue_many=False, allow_smaller_final_batch=True) init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) sess.run(init_op) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord, sess=sess) emb_array = None batch_images, batch_labels = sess.run([images, labels]) emb = sess.run(embedding_layer, feed_dict={ images_placeholder: batch_images, phase_train_placeholder: False }) emb_array = np.concatenate( [emb_array, emb]) if emb_array is not None else emb coord.request_stop() coord.join(threads=threads) predictions = model.predict_proba(emb_array, ) best_class_indices = np.argmax(predictions, axis=1) best_class_probabilities = predictions[ np.arange(len(best_class_indices)), best_class_indices] for ji in range(len(best_class_indices)): print('%4d %s: %.3f' % (ji, class_names[best_class_indices[ji]], best_class_probabilities[ji])) det_name.append( class_names[best_class_indices[ji]]) det_prob.append(best_class_probabilities[ji]) colors = [[255, 0, 0], [0, 255, 0], [0, 0, 255], [255, 255, 0], [0, 255, 255], [255, 0, 255]] for jk in range(len(det_name)): # print jk bbt = bbs[jk] if det_prob[jk] >= 0.5: cv2.rectangle(image2, (bbt[0], bbt[1]), (bbt[0] + (bbt[2] - bbt[0]), bbt[1] + (bbt[3] - bbt[1])), colors[jk], 2) cv2.putText(image2, det_name[jk], (bbt[0] + (bbt[2] - bbt[0]) + 10, bbt[1] + (bbt[3] - bbt[1])), 0, 0.5, colors[jk]) cv2.imshow('fr', image2) fno = fno + 1 cap.release() cv2.destroyAllWindows()
res = max(res, box[8]) return res, filtered_boxes if __name__ == '__main__': if os.path.exists("data/results/"): shutil.rmtree("data/results/") os.makedirs("data/results/") cfg_from_file('ctpn/text.yml') # init session config = tf.ConfigProto(allow_soft_placement=True) sess = tf.Session(config=config) with gfile.FastGFile('model/ctpn.pb', 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) sess.graph.as_default() tf.import_graph_def(graph_def, name='') sess.run(tf.global_variables_initializer()) input_img = sess.graph.get_tensor_by_name('Placeholder:0') output_cls_prob = sess.graph.get_tensor_by_name('Reshape_2:0') output_box_pred = sess.graph.get_tensor_by_name( 'rpn_bbox_pred/Reshape_1:0') all_images, all_labels, all_langs = load_data_tfrecord() all_labels = np.asarray(all_labels, dtype=int) pred_labels = np.zeros((len(all_labels), ), dtype=int) class_scores = np.zeros((len(all_labels), ), dtype=np.float32)
try: all_images+=[ fold +"/" + f for f in os.listdir(fold)] except: print(fold) neighbor_list = all_images with open('neighbor_list_recom.pickle','wb') as f: pickle.dump(neighbor_list,f) print("saved neighbour list") extracted_features = np.ndarray((num_images, 2048)) sess = tf.Session() graph, bottleneck_tensor, jpeg_data_tensor, resized_image_tensor = (create_inception_graph()) for i, filename in enumerate(neighbor_list): image_data = gfile.FastGFile(filename, 'rb').read() features = run_bottleneck_on_image(sess, image_data, jpeg_data_tensor, bottleneck_tensor) extracted_features[i:i+1] = features if i % 250 == 0: print(i) np.savetxt("saved_features_recom.txt", extracted_features) print("saved exttracted features")
def get_graph_def_from_disk(filename): """Get a GraphDef proto from a disk location.""" with gfile.FastGFile(filename, 'rb') as f: return graph_pb2.GraphDef.FromString(f.read())
Image.open(image).save(image_png, 'PNG') img = mpimg.imread(image_png) plt.imshow(img) display() # COMMAND ---------- # MAGIC %md The following cell downloads the data from the internet and loads the model in memory: # COMMAND ---------- maybe_download_and_extract() node_lookup = load_lookup() model_path = os.path.join(model_dir, 'classify_image_graph_def.pb') with gfile.FastGFile(model_path, 'rb') as f: model_data = f.read() # COMMAND ---------- # MAGIC %md We are now going to download some image URLs from the [ImageNet](http://image-net.org) project. ImageNet is a large collection of images from the internet that is commonly used as a benchmark in image recognition tasks. # COMMAND ---------- batched_data = read_file_index() num_images = sum([len(batch) for batch in batched_data]) print "There are %d images grouped in %d batches" % (num_images, len(batched_data)) # COMMAND ----------
import glob import os.path import random import numpy as np import tensorflow as tf from tensorflow.python.platform import gfile from tensorflow.python.framework import graph_util from configparser import ConfigParser import time import cv2 # 生成tmp文件 with tf.Session(graph=tf.Graph()) as sess: sess.run(tf.global_variables_initializer()) with gfile.FastGFile( './models/inception_dec_2015/tensorflow_inception_graph.pb', 'rb') as rf: graph_def = tf.GraphDef() graph_def.ParseFromString(rf.read()) sess.graph.as_default() tf.import_graph_def(graph_def, name='') # tf.summary.FileWriter('./tb', sess.graph) x1 = sess.graph.get_tensor_by_name('DecodeJpeg/contents:0') x2 = sess.graph.get_tensor_by_name('DecodeJpeg:0') y2048 = sess.graph.get_tensor_by_name('pool_3:0') def set_tmp(name): npar = [] v_path = './datas/videos/%s' % name i_path = './datas/imgs/%s' % name print('开始提取%s' % name)
def create_image_lists(sess, testing_percentage, validation_percentage): sub_dirs = [x[0] for x in os.walk(INPUT_DATA)] is_root_dir = True # 初始化各个数据集 training_images = [] training_labels = [] testing_images = [] testing_labels = [] validation_images = [] validation_labels = [] current_label = 0 # 读取所有的子目录 for sub_dir in sub_dirs: if is_root_dir: is_root_dir = False continue # 获取一个子目录中所有的图片文件 extensions = ['jpg', 'jpeg', 'JPG', 'JPEG'] file_list = [] dir_name = os.path.basename(sub_dir) for extension in extensions: file_glob = os.path.join(INPUT_DATA, dir_name, '*.' + extension) file_list.extend(glob.glob(file_glob)) if not file_list: continue # 处理图片数据 for file_name in file_list: # 读取并解析图片,将图片转化为299*299以便inception-v3模型来处理 image_raw_data = gfile.FastGFile(file_name, 'rb').read() image = tf.image.decode_jpeg(image_raw_data) if image.dtype != tf.float32: image = tf.image.convert_image_dtype(image, dtype=tf.float32) image = tf.image.resize_images(image, [299, 299]) image_value = sess.run(image) # 随机划分数据集 chance = np.random.randint(100) if chance < validation_percentage: validation_images.append(image_value) validation_labels.append(current_label) elif chance < (testing_percentage + validation_percentage): testing_images.append(image_value) testing_labels.append(current_label) else: training_images.append(image_value) training_labels.append(current_label) current_label += 1 # 将训练数据打乱以获得更好的训练效果 state = np.random.get_state() np.random.shuffle(training_images) np.random.set_state(state) np.random.shuffle(training_labels) return np.asarray([ training_images, training_labels, validation_images, validation_labels, training_images, training_labels ])
def main(_): # 读取所有的图片 image_lists = create_image_lists(VALIDATION_PERCENTAGE, TEST_PERCENTAGE) n_classes = len(image_lists.keys()) with tf.Graph().as_default() as graph: # 读取训练好的inception-v3模型 with gfile.FastGFile(os.path.join(MODEL_DIR, MODEL_FILE), 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) # 加载inception-v3模型,并返回数据输入张量和瓶颈层输出张量 bottleneck_tensor, jpeg_data_tensor = tf.import_graph_def( graph_def, return_elements=[ BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME ]) # 定义新的神经网络输入 bottleneck_input = tf.placeholder(tf.float32, [None, BOTTLENECK_TENSOR_SIZE], name='BottleneckInputPlaceholder') # 定义新的标准答案输入 ground_truth_input = tf.placeholder(tf.float32, [None, n_classes], name='GroundTruthInput') # 定义一层全连接层解决新的图片分类问题 with tf.name_scope('final_training_ops'): weights = tf.Variable( tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, n_classes], stddev=0.1)) biases = tf.Variable(tf.zeros([n_classes])) logits = tf.matmul(bottleneck_input, weights) + biases final_tensor = tf.nn.softmax(logits) # 定义交叉熵损失函数 cross_entropy = tf.nn.softmax_cross_entropy_with_logits( logits=logits, labels=ground_truth_input) cross_entropy_mean = tf.reduce_mean(cross_entropy) train_step = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize( cross_entropy_mean) # 计算正确率 with tf.name_scope('evaluation'): correct_prediction = tf.equal(tf.argmax(final_tensor, 1), tf.argmax(ground_truth_input, 1)) evaluation_step = tf.reduce_mean( tf.cast(correct_prediction, tf.float32)) # 训练过程 with tf.Session(graph=graph) as sess: init = tf.global_variables_initializer().run() # 模型和摘要的保存目录 import time timestamp = str(int(time.time())) out_dir = os.path.abspath( os.path.join(os.path.curdir, 'runs', timestamp)) print('\nWriting to {}\n'.format(out_dir)) # 损失值和正确率的摘要 loss_summary = tf.summary.scalar('loss', cross_entropy_mean) acc_summary = tf.summary.scalar('accuracy', evaluation_step) # 训练摘要 train_summary_op = tf.summary.merge([loss_summary, acc_summary]) train_summary_dir = os.path.join(out_dir, 'summaries', 'train') train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph) # 开发摘要 dev_summary_op = tf.summary.merge([loss_summary, acc_summary]) dev_summary_dir = os.path.join(out_dir, 'summaries', 'dev') dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph) # 保存检查点 checkpoint_dir = os.path.abspath(os.path.join(out_dir, 'checkpoints')) checkpoint_prefix = os.path.join(checkpoint_dir, 'model') if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) saver = tf.train.Saver(tf.global_variables(), max_to_keep=NUM_CHECKPOINTS) for i in range(STEPS): # 每次获取一个batch的训练数据 train_bottlenecks, train_ground_truth = get_random_cached_bottlenecks( sess, n_classes, image_lists, BATCH, 'training', jpeg_data_tensor, bottleneck_tensor) _, train_summaries = sess.run( [train_step, train_summary_op], feed_dict={ bottleneck_input: train_bottlenecks, ground_truth_input: train_ground_truth }) # 保存每步的摘要 train_summary_writer.add_summary(train_summaries, i) # 在验证集上测试正确率 if i % 100 == 0 or i + 1 == STEPS: validation_bottlenecks, validation_ground_truth = get_random_cached_bottlenecks( sess, n_classes, image_lists, BATCH, 'validation', jpeg_data_tensor, bottleneck_tensor) validation_accuracy, dev_summaries = sess.run( [evaluation_step, dev_summary_op], feed_dict={ bottleneck_input: validation_bottlenecks, ground_truth_input: validation_ground_truth }) print( 'Step %d : Validation accuracy on random sampled %d examples = %.1f%%' % (i, BATCH, validation_accuracy * 100)) # 每隔checkpoint_every保存一次模型和测试摘要 if i % CHECKPOINT_EVERY == 0: dev_summary_writer.add_summary(dev_summaries, i) path = saver.save(sess, checkpoint_prefix, global_step=i) print('Saved model checkpoint to {}\n'.format(path)) # 最后在测试集上测试正确率 test_bottlenecks, test_ground_truth = get_test_bottlenecks( sess, image_lists, n_classes, jpeg_data_tensor, bottleneck_tensor) test_accuracy = sess.run(evaluation_step, feed_dict={ bottleneck_input: test_bottlenecks, ground_truth_input: test_ground_truth }) print('Final test accuracy = %.1f%%' % (test_accuracy * 100)) # 保存标签 output_labels = os.path.join(out_dir, 'labels.txt') with tf.gfile.FastGFile(output_labels, 'w') as f: keys = list(image_lists.keys()) for i in range(len(keys)): keys[i] = '%2d -> %s' % (i, keys[i]) f.write('\n'.join(keys) + '\n')
def Train(sess, num_actions, feature_sizes, domain_sizes, embedding_dims): """Builds and trains the network. Args: sess: tensorflow session to use. num_actions: number of possible golden actions. feature_sizes: size of each feature vector. domain_sizes: number of possible feature ids in each feature vector. embedding_dims: embedding dimension to use for each feature group. """ t = time.time() hidden_layer_sizes = map(int, FLAGS.hidden_layer_sizes.split(',')) logging.info('Building training network with parameters: feature_sizes: %s ' 'domain_sizes: %s', feature_sizes, domain_sizes) if FLAGS.graph_builder == 'greedy': parser = graph_builder.GreedyParser(num_actions, feature_sizes, domain_sizes, embedding_dims, hidden_layer_sizes, seed=int(FLAGS.seed), gate_gradients=True, averaging_decay=FLAGS.averaging_decay, arg_prefix=FLAGS.arg_prefix) else: parser = structured_graph_builder.StructuredGraphBuilder( num_actions, feature_sizes, domain_sizes, embedding_dims, hidden_layer_sizes, seed=int(FLAGS.seed), gate_gradients=True, averaging_decay=FLAGS.averaging_decay, arg_prefix=FLAGS.arg_prefix, beam_size=FLAGS.beam_size, max_steps=FLAGS.max_steps) task_context = OutputPath('context') if FLAGS.word_embeddings is not None: parser.AddPretrainedEmbeddings(0, FLAGS.word_embeddings, task_context) corpus_name = ('projectivized-training-corpus' if FLAGS.projectivize_training_set else FLAGS.training_corpus) parser.AddTraining(task_context, FLAGS.batch_size, learning_rate=FLAGS.learning_rate, momentum=FLAGS.momentum, decay_steps=FLAGS.decay_steps, corpus_name=corpus_name) parser.AddEvaluation(task_context, FLAGS.batch_size, corpus_name=FLAGS.tuning_corpus) parser.AddSaver(FLAGS.slim_model) # Save graph. if FLAGS.output_path: with gfile.FastGFile(OutputPath('graph'), 'w') as f: f.write(sess.graph_def.SerializeToString()) logging.info('Initializing...') num_epochs = 0 cost_sum = 0.0 num_steps = 0 best_eval_metric = 0.0 sess.run(parser.inits.values()) if FLAGS.pretrained_params is not None: logging.info('Loading pretrained params from %s', FLAGS.pretrained_params) feed_dict = {'save/Const:0': FLAGS.pretrained_params} targets = [] for node in sess.graph_def.node: if (node.name.startswith('save/Assign') and node.input[0] in FLAGS.pretrained_params_names.split(',')): logging.info('Loading %s with op %s', node.input[0], node.name) targets.append(node.name) sess.run(targets, feed_dict=feed_dict) logging.info('Training...') while num_epochs < FLAGS.num_epochs: tf_epochs, tf_cost, _ = sess.run([parser.training[ 'epochs'], parser.training['cost'], parser.training['train_op']]) num_epochs = tf_epochs num_steps += 1 cost_sum += tf_cost if num_steps % FLAGS.report_every == 0: logging.info('Epochs: %d, num steps: %d, ' 'seconds elapsed: %.2f, avg cost: %.2f, ', num_epochs, num_steps, time.time() - t, cost_sum / FLAGS.report_every) cost_sum = 0.0 if num_steps % FLAGS.checkpoint_every == 0: best_eval_metric = Eval(sess, parser, num_steps, best_eval_metric)
batches_indices = prepare_batches(len(images), max_batch_size) print(str(len(images)) + " images divided into " + str(len(batches_indices) - 1) + " batches:") for l_i, i in enumerate(batches_indices): if l_i is len(batches_indices) - 1: continue csv_data = [] start = i end = batches_indices[l_i + 1] image_data = [] for single_dir in images[start:end]: image_data.append(gfile.FastGFile(single_dir, 'rb').read()) bottleneck_values = [] for img in image_data: bottleneck_values.append(run_bottleneck_on_image( sess, img, 'DecodeJPGInput:0', 'Mul_1:0', 'Mul:0', 'pool_3/_reshape:0')) result = sess.run(['Reshape:0'], { 'BottleneckInput:0': bottleneck_values, "GenderInput:0": extract_genders(genders[start:end]) }) for img_dir, gender, age in zip(images[start:end], genders[start:end], unscaleAgeL(result)): print("Image: " + img_dir_get_name(img_dir) + "(" + gender_to_string(gender) + ")" + " age: " + str( int(round(age))))
def main(_): if tf.gfile.Exists(FLAGS.summaries_dir): tf.gfile.DeleteRecursively(FLAGS.summaries_dir) tf.gfile.MakeDirs(FLAGS.summaries_dir) maybe_download_and_extract() graph, bottleneck_tensor, jpeg_data_tensor, resized_image_tensor = ( create_inception_graph()) start = time.time() image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage, FLAGS.validation_percentage) class_count = len(image_lists.keys()) if class_count == 0: print('No valid folders of images found at ' + FLAGS.image_dir) return -1 if class_count == 1: print('Only one valid folder of images found at ' + FLAGS.image_dir + ' - multiple classes are needed for classification.') return -1 do_distort_images = should_distort_images(FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale, FLAGS.random_brightness) with tf.Session(graph=graph) as sess: if do_distort_images: (distorted_jpeg_data_tensor, distorted_image_tensor) = add_input_distortions( FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale, FLAGS.random_brightness) else: cache_bottlenecks(sess, image_lists, FLAGS.image_dir, FLAGS.bottleneck_dir, jpeg_data_tensor, bottleneck_tensor) (train_step, cross_entropy, bottleneck_input, ground_truth_input, final_tensor) = add_final_training_ops(len(image_lists.keys()), FLAGS.final_tensor_name, bottleneck_tensor) evaluation_step, prediction = add_evaluation_step( final_tensor, ground_truth_input) merged = tf.summary.merge_all() train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train', sess.graph) validation_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/validation') init = tf.global_variables_initializer() sess.run(init) for i in range(FLAGS.how_many_training_steps): if do_distort_images: (train_bottlenecks, train_ground_truth) = get_random_distorted_bottlenecks( sess, image_lists, FLAGS.train_batch_size, 'training', FLAGS.image_dir, distorted_jpeg_data_tensor, distorted_image_tensor, resized_image_tensor, bottleneck_tensor) else: (train_bottlenecks, train_ground_truth, _) = get_random_cached_bottlenecks( sess, image_lists, FLAGS.train_batch_size, 'training', FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor, bottleneck_tensor) train_summary, _ = sess.run( [merged, train_step], feed_dict={ bottleneck_input: train_bottlenecks, ground_truth_input: train_ground_truth }) train_writer.add_summary(train_summary, i) is_last_step = (i + 1 == FLAGS.how_many_training_steps) if (i % FLAGS.eval_step_interval) == 0 or is_last_step: train_accuracy, cross_entropy_value = sess.run( [evaluation_step, cross_entropy], feed_dict={ bottleneck_input: train_bottlenecks, ground_truth_input: train_ground_truth }) print('%s: Step %d: Train accuracy = %.1f%%' % (datetime.now(), i, train_accuracy * 100)) print('%s: Step %d: Cross entropy = %f' % (datetime.now(), i, cross_entropy_value)) validation_bottlenecks, validation_ground_truth, _ = ( get_random_cached_bottlenecks( sess, image_lists, FLAGS.validation_batch_size, 'validation', FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor, bottleneck_tensor)) validation_summary, validation_accuracy = sess.run( [merged, evaluation_step], feed_dict={ bottleneck_input: validation_bottlenecks, ground_truth_input: validation_ground_truth }) validation_writer.add_summary(validation_summary, i) print('%s: Step %d: Validation accuracy = %.1f%% (N=%d)' % (datetime.now(), i, validation_accuracy * 100, len(validation_bottlenecks))) test_bottlenecks, test_ground_truth, test_filenames = ( get_random_cached_bottlenecks(sess, image_lists, FLAGS.test_batch_size, 'testing', FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor, bottleneck_tensor)) test_accuracy, predictions = sess.run( [evaluation_step, prediction], feed_dict={ bottleneck_input: test_bottlenecks, ground_truth_input: test_ground_truth }) print('Final test accuracy = %.1f%% (N=%d)' % (test_accuracy * 100, len(test_bottlenecks))) if FLAGS.print_misclassified_test_images: print('=== MISCLASSIFIED TEST IMAGES ===') for i, test_filename in enumerate(test_filenames): if predictions[i] != test_ground_truth[i].argmax(): print('%70s %s' % (test_filename, list( image_lists.keys())[predictions[i]])) output_graph_def = graph_util.convert_variables_to_constants( sess, graph.as_graph_def(), [FLAGS.final_tensor_name]) with gfile.FastGFile(FLAGS.output_graph, 'wb') as f: f.write(output_graph_def.SerializeToString()) with gfile.FastGFile(FLAGS.output_labels, 'w') as f: f.write('\n'.join(image_lists.keys()) + '\n') end = time.time() print( "Total Time to Train The Model including Creating BottleNecks : ") print(str(end - start))
interpolation=cv2.INTER_LINEAR) cv2.imwrite(os.path.join("data/results", base_name), img) if __name__ == '__main__': if os.path.exists("data/results/"): shutil.rmtree("data/results/") os.makedirs("data/results/") cfg_from_file('ctpn/text.yml') # init session config = tf.ConfigProto(allow_soft_placement=True) sess = tf.Session(config=config) with gfile.FastGFile('data/ctpn.pb', 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) sess.graph.as_default() tf.import_graph_def(graph_def, name='') sess.run(tf.global_variables_initializer()) input_img = sess.graph.get_tensor_by_name('Placeholder:0') output_cls_prob = sess.graph.get_tensor_by_name('Reshape_2:0') output_box_pred = sess.graph.get_tensor_by_name( 'rpn_bbox_pred/Reshape_1:0') im_names = glob.glob(os.path.join(cfg.DATA_DIR, 'demo', '*.png')) + \ glob.glob(os.path.join(cfg.DATA_DIR, 'demo', '*.jpg')) for im_name in im_names:
def create_graph(model_path): with gfile.FastGFile(model_path, 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) _ = tf.import_graph_def(graph_def, name='')
def main(_): graph, bottleneckTensor, jpegDataTensor, resizedImageTensor = createInceptionGraph( ) #load all images imageLists = createImageLists(IMAGE_DIR, TESTING_PERCENT, VALIDATION_PERCENT) #count the class number classCount = len(imageLists.keys()) if classCount == 0 or classCount == 1: return with tf.Session(graph=graph) as sess: cacheBottlenecks(sess, imageLists, IMAGE_DIR, BOTTLENECK_DIR, jpegDataTensor, bottleneckTensor) #add finaly layer (trainStep, crossEntropy, bottleneckInput, groundTruthInput, finalTensor) = finalTrainingLayer(classCount, FINAL_TENSOR_NAME, bottleneckTensor) evalutionStep, prediction = evaluation(finalTensor, groundTruthInput) merged = tf.summary.merge_all() trainWriter = tf.summary.FileWriter(SUMMARIES_DIR + '/train', sess.graph) validationWriter = tf.summary.FileWriter(SUMMARIES_DIR + '/validation') init = tf.global_variables_initializer() sess.run(init) for i in range(TRAINING_STEPS): trainBottlenecks, trainGroundTruth, _ = getRandomCachedBottlenecks( sess, imageLists, TRAIN_BATCH_SIZE, 'training', BOTTLENECK_DIR, IMAGE_DIR, jpegDataTensor, bottleneckTensor) trainSummary, _ = sess.run( [merged, trainStep], feed_dict={ bottleneckInput: trainBottlenecks, groundTruthInput: trainGroundTruth }) trainWriter.add_summary(trainSummary, i) isLastStep = ((i + 1) == TRAINING_STEPS) if (i % EVAL_STEP_INTERVAL) == 0 or isLastStep: trainAccuracy, crossEntropyValue = sess.run( [evalutionStep, crossEntropy], feed_dict={ bottleneckInput: trainBottlenecks, groundTruthInput: trainGroundTruth }) print('Step %d: Train accuracy = %.1f%%' % (i, trainAccuracy * 100)) print('Step %d: Cross entropy = %f' % (i, crossEntropyValue)) validationBottlenecks, validationGroundTruth, _ = ( getRandomCachedBottlenecks(sess, imageLists, VALID_BATCH_SIZE, 'validation', BOTTLENECK_DIR, IMAGE_DIR, jpegDataTensor, bottleneckTensor)) validationSummary, validationAccuracy = sess.run( [merged, evalutionStep], feed_dict={ bottleneckInput: validationBottlenecks, groundTruthInput: validationGroundTruth }) validationWriter.add_summary(validationSummary, i) print( 'Step %d: Validation accuracy = %.1f%% (N=%d)' % (i, validationAccuracy * 100, len(validationBottlenecks))) print('@@__Here are final test evaluation__@@') testBottlenecks, testGroundTruth, testFileNames = ( getRandomCachedBottlenecks(sess, imageLists, TEST_BATCH_SIZE, 'testing', BOTTLENECK_DIR, IMAGE_DIR, jpegDataTensor, bottleneckTensor)) testAccuracy, predictions = sess.run([evalutionStep, prediction], feed_dict={ bottleneckInput: testBottlenecks, groundTruthInput: testGroundTruth }) print('Final test accuracy = %.1f%% (N=%d)' % (testAccuracy * 100, len(testBottlenecks))) if PRINT_MISCLASSIFIED_TEST_IMAGES: print('@@__Misclassified test images__@@') for i, testFileName in enumerate(testFileNames): if predictions[i] != testGroundTruth[i].argmax(): print('%70s %s' % (testFileName, list( imageLists.keys())[predictions[i]])) outputGraphDef = graph_util.convert_variables_to_constants( sess, graph.as_graph_def(), [FINAL_TENSOR_NAME]) with gfile.FastGFile(OUTPUT_GRPAH, 'wb') as f: f.write(outputGraphDef.SerializeToString()) with gfile.FastGFile(OUTPUT_LABELS, 'w') as f: f.write('\n'.join(imageLists.keys()) + '\n')
def extract_features(path, model_path): feature_dimension = 2048 labels = [] create_graph(model_path) i = 0 samples_path = os.path.join(path, 'NORMAL_AUGMENTED') samples_dir = os.listdir(samples_path) list_images = [] count = 1 for f in samples_dir: if ('jpeg' in f): list_images.append(f) features = np.empty((len(list_images), feature_dimension)) with tf.Session() as sess: flattened_tensor = sess.graph.get_tensor_by_name('pool_3:0') for photo in list_images: photo_path = os.path.join(samples_path, photo) labels.append('NORMAL') print('CLASS NORMAL: PROCESSING IMAGE %d OF %d)' % (count, len(list_images))) image_data = gfile.FastGFile(photo_path, 'rb').read() feature = sess.run(flattened_tensor, {'DecodeJpeg/contents:0': image_data}) features[i, :] = np.squeeze(feature) i = i + 1 count = count + 1 features_normal = features samples_path = os.path.join(path, 'PNEUMONIA_AUGMENTED') samples_dir = os.listdir(samples_path) list_images = [] count = 1 i = 0 for f in samples_dir: if ('jpeg' in f): list_images.append(f) features = np.empty((len(list_images), feature_dimension)) with tf.Session() as sess: flattened_tensor = sess.graph.get_tensor_by_name('pool_3:0') for photo in list_images: photo_path = os.path.join(samples_path, photo) labels.append('PNEUMONIA') print('CLASS PNEUMONIA: PROCESSING IMAGE %d OF %d)' % (count, len(list_images))) image_data = gfile.FastGFile(photo_path, 'rb').read() feature = sess.run(flattened_tensor, {'DecodeJpeg/contents:0': image_data}) features[i, :] = np.squeeze(feature) i = i + 1 count = count + 1 features_pneumonia = features all_features = np.concatenate((features_normal, features_pneumonia), axis=0) return all_features, labels
if os.path.isfile(inception_model_path) == 0: print('Downloading Inception model...') urlretrieve( "https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip", os.path.join('data', 'inception5h.zip')) # Unzipping the file zip_ref = zipfile.ZipFile(os.path.join('data', 'inception5h.zip'), 'r') zip_ref.extract('tensorflow_inception_graph.pb', 'data') zip_ref.close() model = os.path.join(inception_model_path) # Load the Inception model with gfile.FastGFile(model, 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) persisted_sess.graph.as_default() tf.import_graph_def(graph_def, name='') persisted_sess.graph.get_operations() persisted_input = persisted_sess.graph.get_tensor_by_name("input:0") persisted_output = persisted_sess.graph.get_tensor_by_name( "softmax2_pre_activation:0") print('>> Computing feedforward function...') def f(image_inp): return persisted_sess.run(persisted_output,
def main(_): # 读取所有的图片 image_lists = create_image_lists(VALIDATION_PERCENTAGE, TEST_PERCENTAGE) n_classes = len(image_lists.keys()) # 读取训练好的inception-v3模型 with gfile.FastGFile(os.path.join(MODEL_DIR, MODEL_FILE), 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) # 加载inception-v3模型,并返回数据输入张量和瓶颈层输出张量 bottleneck_tensor, jpeg_data_tensor = tf.import_graph_def( graph_def, return_elements=[BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME]) # 定义新的神经网络输入 bottleneck_input = tf.placeholder( tf.float32, [None, BOTTLENECK_TENSOR_SIZE], name='BottleneckInputPlaceholder') # 定义新的标准答案输入 ground_truth_input = tf.placeholder( tf.float32, [None, n_classes], name='GroundTruthInput') # 定义一层全连接层解决新的图片分类问题 with tf.name_scope('final_training_ops'): weights = tf.Variable( tf.truncated_normal( [BOTTLENECK_TENSOR_SIZE, n_classes], stddev=0.1)) biases = tf.Variable(tf.zeros([n_classes])) logits = tf.matmul(bottleneck_input, weights) + biases final_tensor = tf.nn.softmax(logits) # 定义交叉熵损失函数 cross_entropy = tf.nn.softmax_cross_entropy_with_logits( logits=logits, labels=ground_truth_input) cross_entropy_mean = tf.reduce_mean(cross_entropy) train_step = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize( cross_entropy_mean) # 计算正确率 with tf.name_scope('evaluation'): correct_prediction = tf.equal( tf.argmax(final_tensor, 1), tf.argmax(ground_truth_input, 1)) evaluation_step = tf.reduce_mean( tf.cast(correct_prediction, tf.float32)) # 训练过程 with tf.Session() as sess: init = tf.global_variables_initializer().run() for i in range(STEPS): # 每次获取一个batch的训练数据 train_bottlenecks, train_ground_truth = get_random_cached_bottlenecks( sess, n_classes, image_lists, BATCH, 'training', jpeg_data_tensor, bottleneck_tensor) sess.run( train_step, feed_dict={ bottleneck_input: train_bottlenecks, ground_truth_input: train_ground_truth }) # 在验证集上测试正确率 if i % 100 == 0 or i + 1 == STEPS: validation_bottlenecks, validation_ground_truth = get_random_cached_bottlenecks( sess, n_classes, image_lists, BATCH, 'validation', jpeg_data_tensor, bottleneck_tensor) validation_accuracy = sess.run( evaluation_step, feed_dict={ bottleneck_input: validation_bottlenecks, ground_truth_input: validation_ground_truth }) print( 'Step %d : Validation accuracy on random sampled %d examples = %.1f%%' % (i, BATCH, validation_accuracy * 100)) # 最后在测试集上测试正确率 test_bottlenecks, test_ground_truth = get_test_bottlenecks( sess, image_lists, n_classes, jpeg_data_tensor, bottleneck_tensor) test_accuracy = sess.run( evaluation_step, feed_dict={ bottleneck_input: test_bottlenecks, ground_truth_input: test_ground_truth }) print('Final test accuracy = %.1f%%' % (test_accuracy * 100))
def create_graph(): with gfile.FastGFile('model/model.pb', 'r') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) _ = tf.import_graph_def(graph_def, name='')
def main(_): # Set up the pre-trained graph. maybe_download_and_extract() graph, bottleneck_tensor, jpeg_data_tensor, resized_image_tensor = ( create_inception_graph()) # Look at the folder structure, and create lists of all the images. image_lists = create_image_lists(FLAGS.image_dir, FLAGS.training_subdir, FLAGS.validation_subdir, FLAGS.testing_subdir) class_count = FLAGS.final_tensor_size # Load ground-truth data from json files f = open(FLAGS.path_json_training) training_gt = json.load(f) f.close() f = open(FLAGS.path_json_validation) validation_gt = json.load(f) f.close() f = open(FLAGS.path_json_testing) testing_gt = json.load(f) f.close() # See if the command-line flags mean we're applying any distortions. do_distort_images = should_distort_images(FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale, FLAGS.random_brightness) sess = tf.Session() if do_distort_images: # We will be applying distortions, so setup the operations we'll need. distorted_jpeg_data_tensor, distorted_image_tensor = add_input_distortions( FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale, FLAGS.random_brightness) else: # We'll make sure we've calculated the 'bottleneck' image summaries and # cached them on disk. cache_bottlenecks(sess, image_lists, FLAGS.image_dir, FLAGS.bottleneck_dir, jpeg_data_tensor, bottleneck_tensor) # Add the new layer that we'll be training. (train_step, cross_entropy, bottleneck_input, ground_truth_input, final_tensor) = add_final_training_ops(FLAGS.final_tensor_size, FLAGS.final_tensor_name, bottleneck_tensor) # Set up all our weights to their initial default values. init = tf.initialize_all_variables() sess.run(init) # Create the operations we need to evaluate the accuracy of our new layer. evaluation_step = add_evaluation_step(final_tensor, ground_truth_input) # Run the training for as many cycles as requested on the command line. for i in range(FLAGS.how_many_training_steps): # Get a catch of input bottleneck values, either calculated fresh every time # with distortions applied, or from the cache stored on disk. if do_distort_images: train_bottlenecks, train_ground_truth = get_random_distorted_bottlenecks( sess, image_lists, FLAGS.train_batch_size, 'training', FLAGS.image_dir, distorted_jpeg_data_tensor, distorted_image_tensor, resized_image_tensor, bottleneck_tensor) else: train_bottlenecks, train_ground_truth = get_random_cached_bottlenecks( sess, image_lists, FLAGS.train_batch_size, 'training', FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor, bottleneck_tensor, training_gt) # Feed the bottlenecks and ground truth into the graph, and run a training # step. sess.run(train_step, feed_dict={ bottleneck_input: train_bottlenecks, ground_truth_input: train_ground_truth }) # Every so often, print out how well the graph is training. is_last_step = (i + 1 == FLAGS.how_many_training_steps) if (i % FLAGS.eval_step_interval) == 0 or is_last_step: train_accuracy, cross_entropy_value = sess.run( [evaluation_step, cross_entropy], feed_dict={ bottleneck_input: train_bottlenecks, ground_truth_input: train_ground_truth }) print('%s: Step %d: Train accuracy = %.1f%%' % (datetime.now(), i, train_accuracy * 100)) print('%s: Step %d: Cross entropy = %f' % (datetime.now(), i, cross_entropy_value)) validation_bottlenecks, validation_ground_truth = ( get_random_cached_bottlenecks( sess, image_lists, FLAGS.validation_batch_size, 'validation', FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor, bottleneck_tensor, validation_gt)) validation_accuracy = sess.run(evaluation_step, feed_dict={ bottleneck_input: validation_bottlenecks, ground_truth_input: validation_ground_truth }) print('%s: Step %d: Validation accuracy = %.1f%%' % (datetime.now(), i, validation_accuracy * 100)) if (i % FLAGS.graph_save_interval) == 0 or is_last_step: # Write out the trained graph and labels with the weights stored as constants. output_graph_def = graph_util.convert_variables_to_constants( sess, graph.as_graph_def(), [FLAGS.final_tensor_name]) with gfile.FastGFile(FLAGS.output_graph, 'wb') as f: f.write(output_graph_def.SerializeToString()) # We've completed all our training, so run a final test evaluation on # some new images we haven't used before. test_bottlenecks, test_ground_truth = get_random_cached_bottlenecks( sess, image_lists, FLAGS.test_batch_size, 'testing', FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor, bottleneck_tensor, testing_gt) test_accuracy = sess.run(evaluation_step, feed_dict={ bottleneck_input: test_bottlenecks, ground_truth_input: test_ground_truth }) print('Final test accuracy = %.1f%%' % (test_accuracy * 100))
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2018/2/19 13:53 # @Author : Jasontang # @Site : # @File : variable2_rb.py # @ToDo : 使用convert_variables_to_constants函数将计算图中的变量和取值 # @ToDo : 通过常量的方式保存,这样整个TensorFlow计算图可以统一存放在一个文件中 import tensorflow as tf from tensorflow.python.platform import gfile with tf.Session() as sess: model_filename = "./variable2_model.pb" # 读取保存的模型文件,并将文件解析成对应的GraphDef Protocol Buffer with gfile.FastGFile(model_filename, "rb") as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) # 将graph_def中保存的图假造到当前的图中.return_elements=["add:0"]给出了返回 # 的张量名称。在保存的时候给出的计算结点的名称,所以为"add"。在加载的时候给出的是张量 # 的名称,所以是"add:0" result = tf.import_graph_def(graph_def, return_elements=["add:0"]) # 输出[array([3.], dtype=float32)] print(sess.run(result))
def single_infer(pb_file, input_node): with tf.Graph().as_default() as graph: # Set default graph as graph with tf.Session() as sess: # Load the graph in graph_def # We load the protobuf file from the disk and parse it to retrive the unserialized graph_drf with gfile.FastGFile(pb_file, 'rb') as f: for fn in range(len(TEST_FILES)): print('----' + str(fn) + '----') current_data, current_label = provider.loadDataFile(TEST_FILES[fn]) current_data = current_data[:, 0:NUM_POINT, :] # current_data = np.expand_dims(current_data, axis=-2) current_label = np.squeeze(current_label) print(current_data.shape) file_size = current_data.shape[0] num_batches = file_size // BATCH_SIZE print(file_size) data = current_data[0:1, :, :] # print(data) # data = np.round(data * 128 + 127) # quant to [0, 255] # data = (data - 127) / 128 # print(data) label = current_label[0] # interpreter.set_tensor(input_details[0]['index'], current_data[f_idx:f_idx+1, :, :]) # Set FCN graph to the default graph graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) sess.graph.as_default() # Import a graph_def into the current default Graph (In this case, the weights are (typically) embedded in the graph) tf.import_graph_def( graph_def, input_map=None, return_elements=None, name="", op_dict=None, producer_op_list=None ) nodes = {} for ind, op in enumerate(graph.get_operations()): # INFERENCE Here # print([i for i in op.inputs], op.outputs,) # print(op.type) feed_dict = {} if ind == 0: init_input = graph.get_tensor_by_name(input_node) nodes[input_node] = data for inp in op.inputs: # print(inp) t = graph.get_tensor_by_name(inp.name) # print(t.op.type) if t.op.type != "Const" and t.op.type != 'Range': feed_dict[t] = nodes[t.name] # else: # if "mul_fold" in t.name.lower(): # w = sess.run(t) # if len(w.shape) == 4: # w = w.transpose([3, 0, 1, 2]) # print(t.name, np.max(w), np.min(w), w.shape, w.flatten()[:100]) # print(op.outputs) if not op.outputs: continue l_outputs = [] for output in op.outputs: l_outputs.append(graph.get_tensor_by_name(output.name)) # Output Tensor if len(op.inputs) > 0: # print(feed_dict.keys()) # if ind == len(ops) - 1: # feed_dict.setdefault(init_input, [image]) Session_out = sess.run(l_outputs, feed_dict=feed_dict) # Session_out1 = sess.run(l_output, feed_dict={init_input: [image]}) # print(Session_out) for ind, output in enumerate(op.outputs): fea = Session_out[ind] print(output.name, fea.shape, np.mean(fea), np.std(fea), np.sum(fea)) # if 'quant' in output.name: # print(np.max(fea), np.min(fea)) # if len(fea.flatten()) == 9: # print(fea) if output.name == 'DGCNN/Reshape:0' or output.name == 'PointNet/Reshape:0': print(fea, np.argmax(fea), label) if output.name == 'DGCNN/dgcnn1/Conv/act_quant/FakeQuantWithMinMaxVars:0'\ or output.name == 'DGCNN/dgcnn1/Max:0' \ or output.name == 'DGCNN/get_edge_feature/concat_quant/FakeQuantWithMinMaxVars:0': # print(fea.flatten()[:200], fea.shape) print('\n'.join(map(str, (fea[0, 0, :20, :20])))) if output.name in ['DGCNN/pairwise_distance/MatMul_quant/FakeQuantWithMinMaxVars:0', 'DGCNN/pairwise_distance/mul_quant/FakeQuantWithMinMaxVars:0', 'DGCNN/pairwise_distance/Mul_1_quant/FakeQuantWithMinMaxVars:0', 'DGCNN/pairwise_distance/Sum_quant/FakeQuantWithMinMaxVars:0', 'DGCNN/pairwise_distance/sub_quant/FakeQuantWithMinMaxVars:0', 'DGCNN/pairwise_distance/sub_1_quant/FakeQuantWithMinMaxVars:0', # 'DGCNN/knn/TopKV2:0', 'DGCNN/knn/TopKV2:1', 'DGCNN/get_edge_feature/GatherV2:0', 'DGCNN/transform_net/tconv1/act_quant/FakeQuantWithMinMaxVars:0', 'DGCNN/transform_net/transform_XYZ/act_quant/FakeQuantWithMinMaxVars:0', 'DGCNN/Transform/MatMul_quant/FakeQuantWithMinMaxVars:0', # 'DGCNN/transform_net/tconv1/weights/read:0', ]: print(fea[0, :20, :20]) # if output.name == 'DGCNN/get_edge_feature_1/Tile:0' \ # or output.name == 'DGCNN/get_edge_feature_1/GatherV2:0' \ # or output.name == 'DGCNN/get_edge_feature_1/sub_quant/FakeQuantWithMinMaxVars:0': # print(fea.flatten()[:200]) if output not in nodes: nodes[output.name] = Session_out[ind]
from http.server import HTTPServer, BaseHTTPRequestHandler import io, shutil, json, time, socketserver, threading, socket from tensorflow.python.platform import gfile import tensorflow as tf import numpy as np import random import sys sess = tf.Session() with gfile.FastGFile('Models/model.pb', 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) sess.graph.as_default() tf.import_graph_def(graph_def, name='') # 导入计算图 # 需要有一个初始化的过程 sess.run(tf.global_variables_initializer()) # 输入 X = sess.graph.get_tensor_by_name('X:0') P = sess.graph.get_tensor_by_name('P:0') output = sess.graph.get_tensor_by_name('output/BiasAdd:0') user_list = ['zc', 'wjw', 'wz'] passwords_list = ['19941020', '12345678', '87654321'] dict_lastT = {} dict_challenge = {} dict_condition = {} # 0:外部 1:认证1 2:认证2 3:内部 -1:不通过 current_user_enterT = 0
def infer_graph(pb_file, input_node, output_nodes): if not isinstance(output_nodes, list): output_nodes = [output_nodes] with tf.Graph().as_default() as graph: # Set default graph as graph with tf.Session() as sess: # Load the graph in graph_def # We load the protobuf file from the disk and parse it to retrive the unserialized graph_drf with gfile.FastGFile(pb_file, 'rb') as f: num_votes = 1 total_correct = 0 total_seen = 0 for fn in range(len(TEST_FILES)): print('----' + str(fn) + '----') h5f = os.path.join(BASE_DIR, "..", TEST_FILES[fn]) current_data, current_label = provider.loadDataFile(h5f) current_data = current_data[:, 0:NUM_POINT, :] # current_data = np.expand_dims(current_data, axis=-2) current_label = np.squeeze(current_label) print(current_data.shape) file_size = current_data.shape[0] # num_batches = file_size // BATCH_SIZE # print(file_size) for f_idx in range(file_size): for vote_idx in range(num_votes): # rotated_data = provider.rotate_point_cloud_by_angle(current_data[f_idx:f_idx+1, :, :], # vote_idx / float(num_votes) * np.pi * 2) data = current_data[f_idx:f_idx + 1, :, :] label = current_label[f_idx] # interpreter.set_tensor(input_details[0]['index'], current_data[f_idx:f_idx+1, :, :]) # Set FCN graph to the default graph graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) sess.graph.as_default() # Import a graph_def into the current default Graph (In this case, the weights are (typically) embedded in the graph) tf.import_graph_def( graph_def, input_map=None, return_elements=None, name="", op_dict=None, producer_op_list=None ) res = [] for output_node in output_nodes: # INFERENCE Here l_input = graph.get_tensor_by_name(input_node) # Input Tensor l_output = graph.get_tensor_by_name(output_node) # Output Tensor # initialize_all_variables tf.global_variables_initializer() Session_out = sess.run(l_output, feed_dict={l_input: data}) res.append(Session_out) # return res # print(Session_out, np.argmax(Session_out), label) pred = np.argmax(Session_out) # correct = np.sum(pred_val_topk[:,0:topk] == label_val) total_correct += 1 if pred == label else 0 total_seen += 1 print('eval accuracy: %f' % (total_correct / float(total_seen)))
print('loss:', sess.run(loss)) print('W:', sess.run(W)) sess.run(train_op) print(sess.run(Global_step)) os.system('rm ./tmp2/my_graph.pb') tf.train.write_graph(sess.graph_def, './tmp2', 'my_graph.pb', False) # os.system('rm ./tmp2/my_model') saver.save(sess, './tmp2/my_model', global_step=Global_step) ############################################## tf.reset_default_graph() ############################################### sess = tf.Session() #after reset_default_graph(),a new session is necessary with gfile.FastGFile("./tmp2/my_graph.pb", 'rb') as f: #"tmp/load/test.pb" graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) sess.graph.as_default() tf.import_graph_def(graph_def, name='') X = tf.placeholder(dtype=tf.float32) W = sess.graph.get_tensor_by_name("weight:0") #you have to get a graph firstly Prediction = X * W print('before:', tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)) tf.add_to_collection(tf.GraphKeys.GLOBAL_VARIABLES, W) print('after:', tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)) try: saver = tf.train.Saver(tf.global_variables())
import tensorflow as tf import network as net import utils as u import params as p from PIL import Image from scipy import misc import numpy as np from tensorflow.python.platform import gfile net_name = 'squeeze_normal-drone-dev' folder_name = './networks/%s' % net_name with gfile.FastGFile(folder_name + "/minimal_graph_quant.pb", 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) tf.import_graph_def(graph_def, name='') sq_graph = tf.get_default_graph() inp_batch = sq_graph.get_tensor_by_name('Input_batching/batch:0') t_activations = sq_graph.get_tensor_by_name('activation/activations:0') print(inp_batch) k = p.ANCHOR_COUNT t_deltas = tf.slice(t_activations, [0, 0, 0, 0], [-1, -1, -1, 4 * k]) t_gammas = tf.sigmoid( tf.slice(t_activations, [0, 0, 0, 4 * k], [-1, -1, -1, k])) t_classes = tf.slice(t_activations, [0, 0, 0, 5 * k], [-1, -1, -1, p.OUT_CLASSES * k]) t_chosen_anchor = tf.argmax(t_gammas, axis=3) all_out = [t_deltas, t_gammas, t_classes, t_chosen_anchor]
def main(_): # Setup the directory we'll write summaries to for TensorBoard if tf.gfile.Exists(FLAGS.summaries_dir): tf.gfile.DeleteRecursively(FLAGS.summaries_dir) tf.gfile.MakeDirs(FLAGS.summaries_dir) # Set up the pre-trained graph. maybe_download_and_extract() graph, bottleneck_tensor, jpeg_data_tensor, resized_image_tensor = ( create_inception_graph()) # Look at the folder structure, and create lists of all the images. image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage, FLAGS.validation_percentage) class_count = len(image_lists.keys()) if class_count == 0: print('No valid folders of images found at ' + FLAGS.image_dir) return -1 if class_count == 1: print('Only one valid folder of images found at ' + FLAGS.image_dir + ' - multiple classes are needed for classification.') return -1 # See if the command-line flags mean we're applying any distortions. do_distort_images = should_distort_images(FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale, FLAGS.random_brightness) sess = tf.Session() if do_distort_images: # We will be applying distortions, so setup the operations we'll need. distorted_jpeg_data_tensor, distorted_image_tensor = add_input_distortions( FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale, FLAGS.random_brightness) else: # We'll make sure we've calculated the 'bottleneck' image summaries and # cached them on disk. cache_bottlenecks(sess, image_lists, FLAGS.image_dir, FLAGS.bottleneck_dir, jpeg_data_tensor, bottleneck_tensor) # Add the new layer that we'll be training. (train_step, cross_entropy, bottleneck_input, ground_truth_input, final_tensor) = add_final_training_ops(len(image_lists.keys()), FLAGS.final_tensor_name, bottleneck_tensor) # Create the operations we need to evaluate the accuracy of our new layer. evaluation_step, prediction = add_evaluation_step(final_tensor, ground_truth_input) # Merge all the summaries and write them out to /tmp/retrain_logs (by default) merged = tf.summary.merge_all() train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train', sess.graph) validation_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/validation') # Set up all our weights to their initial default values. init = tf.global_variables_initializer() sess.run(init) # Run the training for as many cycles as requested on the command line. for i in range(FLAGS.how_many_training_steps): # Get a batch of input bottleneck values, either calculated fresh every time # with distortions applied, or from the cache stored on disk. if do_distort_images: train_bottlenecks, train_ground_truth = get_random_distorted_bottlenecks( sess, image_lists, FLAGS.train_batch_size, 'training', FLAGS.image_dir, distorted_jpeg_data_tensor, distorted_image_tensor, resized_image_tensor, bottleneck_tensor) else: train_bottlenecks, train_ground_truth, _ = get_random_cached_bottlenecks( sess, image_lists, FLAGS.train_batch_size, 'training', FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor, bottleneck_tensor) # Feed the bottlenecks and ground truth into the graph, and run a training # step. Capture training summaries for TensorBoard with the `merged` op. train_summary, _ = sess.run( [merged, train_step], feed_dict={ bottleneck_input: train_bottlenecks, ground_truth_input: train_ground_truth }) train_writer.add_summary(train_summary, i) # Every so often, print out how well the graph is training. is_last_step = (i + 1 == FLAGS.how_many_training_steps) if (i % FLAGS.eval_step_interval) == 0 or is_last_step: train_accuracy, cross_entropy_value = sess.run( [evaluation_step, cross_entropy], feed_dict={ bottleneck_input: train_bottlenecks, ground_truth_input: train_ground_truth }) print('%s: Step %d: Train accuracy = %.1f%%' % (datetime.now(), i, train_accuracy * 100)) print('%s: Step %d: Cross entropy = %f' % (datetime.now(), i, cross_entropy_value)) validation_bottlenecks, validation_ground_truth, _ = ( get_random_cached_bottlenecks( sess, image_lists, FLAGS.validation_batch_size, 'validation', FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor, bottleneck_tensor)) # Run a validation step and capture training summaries for TensorBoard # with the `merged` op. validation_summary, validation_accuracy = sess.run( [merged, evaluation_step], feed_dict={ bottleneck_input: validation_bottlenecks, ground_truth_input: validation_ground_truth }) validation_writer.add_summary(validation_summary, i) print('%s: Step %d: Validation accuracy = %.1f%% (N=%d)' % (datetime.now(), i, validation_accuracy * 100, len(validation_bottlenecks))) # We've completed all our training, so run a final test evaluation on # some new images we haven't used before. test_bottlenecks, test_ground_truth, test_filenames = ( get_random_cached_bottlenecks(sess, image_lists, FLAGS.test_batch_size, 'testing', FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor, bottleneck_tensor)) test_accuracy, predictions = sess.run([evaluation_step, prediction], feed_dict={ bottleneck_input: test_bottlenecks, ground_truth_input: test_ground_truth }) print('Final test accuracy = %.1f%% (N=%d)' % (test_accuracy * 100, len(test_bottlenecks))) if FLAGS.print_misclassified_test_images: print('=== MISCLASSIFIED TEST IMAGES ===') for i, test_filename in enumerate(test_filenames): if predictions[i] != test_ground_truth[i].argmax(): print( '%70s %s' % (test_filename, list(image_lists.keys())[predictions[i]])) # Write out the trained graph and labels with the weights stored as constants. output_graph_def = graph_util.convert_variables_to_constants( sess, graph.as_graph_def(), [FLAGS.final_tensor_name]) with gfile.FastGFile(FLAGS.output_graph, 'wb') as f: f.write(output_graph_def.SerializeToString()) with gfile.FastGFile(FLAGS.output_labels, 'w') as f: f.write('\n'.join(image_lists.keys()) + '\n')
def main(): # with tf.Graph().as_default(): with tf.Session() as sess: pnet, rnet, onet = detect_and_align.create_mtcnn(sess, None) #load_model('model/20170512-110547.pb') #Load model model_exp = os.path.expanduser('model/20170512-110547.pb') if (os.path.isfile(model_exp)): print('Model filename: %s' % model_exp) with gfile.FastGFile(model_exp, 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) tf.import_graph_def(graph_def, name='') # done loading images_placeholder = tf.get_default_graph().get_tensor_by_name( "input:0") embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0") phase_train_placeholder = tf.get_default_graph().get_tensor_by_name( "phase_train:0") cap = cv2.VideoCapture(0) frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) show_landmarks = True show_bb = True show_id = False show_fps = True while (True): start = time.time() _, frame = cap.read() face_patches, padded_bounding_boxes, landmarks = detect_and_align.align_image( frame, pnet, rnet, onet) if len(face_patches) > 0: face_patches = np.stack(face_patches) feed_dict = { images_placeholder: face_patches, phase_train_placeholder: False } embs = sess.run(embeddings, feed_dict=feed_dict) print('Matches in frame:') for i in range(len(embs)): bb = padded_bounding_boxes[i] if show_id: font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(frame, matching_id, (bb[0], bb[3]), font, 1, (255, 255, 255), 1, cv2.LINE_AA) if show_bb: cv2.rectangle(frame, (bb[0], bb[1]), (bb[2], bb[3]), (255, 0, 0), 2) if show_landmarks: for j in range(5): size = 1 top_left = (int(landmarks[i, j]) - size, int(landmarks[i, j + 5]) - size) bottom_right = (int(landmarks[i, j]) + size, int(landmarks[i, j + 5]) + size) cv2.rectangle(frame, top_left, bottom_right, (255, 0, 255), 2) else: print('Couldn\'t find a face') end = time.time() seconds = end - start fps = round(1 / seconds, 2) if show_fps: font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(frame, str(fps), (0, int(frame_height) - 5), font, 1, (255, 255, 255), 1, cv2.LINE_AA) cv2.imshow('frame', frame) key = cv2.waitKey(1) if key == ord('q'): break cap.release() cv2.destroyAllWindows()