def evaluate_single(): # PreProcess if not FLAGS.image: raise ValueError('Please supply a image') if tf.gfile.Exists(FLAGS.image) == False: raise ValueError('Image not found.') if tf.gfile.Exists(FLAGS.ckpt) == False: raise ValueError('Please supply a checkpoint') if FLAGS.num_of_classes == None: raise ValueError('Please supply num_of_classes.') if FLAGS.event_dir == None: FLAGS.event_dir = os.path.join(FLAGS.ckpt, 'event') #raise ValueError('Please supply a event_dir') if tf.gfile.Exists(FLAGS.event_dir): tf.gfile.DeleteRecursively(FLAGS.event_dir) tf.gfile.MakeDirs(FLAGS.event_dir) with tf.Graph().as_default() as g: nn = CNN(FLAGS.network, FLAGS.num_of_classes, 1, FLAGS.image_size, FLAGS.image_crop_size, True, FLAGS.grayscale, True, FLAGS.use_fp16) with tf.device("/cpu:0"): file_data = tf.read_file(FLAGS.image) image = tf.image.decode_jpeg(file_data, channels=3) image = nn.input(image, True, False) dtype = tf.float16 if FLAGS.use_fp16 else tf.float32 keep_prob = tf.constant(1.0, dtype=dtype) batch_size = tf.constant(1) logits = nn.inference(image, keep_prob, batch_size) # Calculate predictions. logits = tf.cast(logits, tf.float32) softmax = tf.nn.softmax(logits) # Restore the moving average version of the learned variables for eval. variable_averages = tf.train.ExponentialMovingAverage( CNN.MOVING_AVERAGE_DECAY) variables_to_restore = variable_averages.variables_to_restore() saver = tf.train.Saver(variables_to_restore) # Build the summary operation based on the TF collection of Summaries. info = [['Image', str(FLAGS.image)], ['Image size', str(FLAGS.image_size)], ['Image Crop size', str(FLAGS.image_crop_size)], ['Grayscale', str(FLAGS.grayscale)], ['Use Float16', str(FLAGS.use_fp16)]] info_summary = tf.summary.text('Info', tf.convert_to_tensor(info), collections=[]) summary_op = tf.summary.merge_all() summary_writer = tf.summary.FileWriter(FLAGS.event_dir, g) with tf.Session() as sess: summary_writer.add_summary(sess.run(info_summary)) ckpt = tf.train.get_checkpoint_state(FLAGS.ckpt) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) global_step = ckpt.model_checkpoint_path.split('/')[-1].split( '-')[-1] else: print('No checkpoint file found') return result = sess.run(softmax) result = result[0] print('- Result') for i, v in enumerate(result): print('[%d]: %f' % (i, v)) summary = tf.Summary() summary.ParseFromString(sess.run(summary_op)) summary_writer.add_summary(summary, global_step)
def export(): output_dir = os.path.join(FLAGS.ckpt, "export") if tf.gfile.Exists(output_dir) == True: tf.gfile.DeleteRecursively(output_dir) if tf.gfile.Exists(output_dir) == False: tf.gfile.MakeDirs(output_dir) ### EDIT TRAINING GRAPH ### with tf.Graph().as_default() as g: nn = CNN(FLAGS.network, FLAGS.num_of_classes, 1, FLAGS.image_size, FLAGS.image_crop_size, FLAGS.log_input, FLAGS.grayscale, FLAGS.log_feature, FLAGS.use_fp16) with tf.device("/cpu:0"): image = tf.placeholder(tf.float32, shape=(None, None, 3), name="image") image = nn.input(image, True, False) dtype = tf.float16 if FLAGS.use_fp16 else tf.float32 keep_prob = tf.constant(1.0, dtype=dtype) batch_size = tf.constant(1) logits = nn.inference(image, keep_prob, batch_size) # Calculate predictions. logits = tf.cast(logits, tf.float32) softmax = tf.nn.softmax(logits) # Restore the moving average version of the learned variables for eval. variable_averages = tf.train.ExponentialMovingAverage( CNN.MOVING_AVERAGE_DECAY) variables_to_restore = variable_averages.variables_to_restore() saver = tf.train.Saver(variables_to_restore) # Build the summary operation based on the TF collection of Summaries. """ summary_op = tf.summary.merge_all() summary_writer = tf.summary.FileWriter(output_dir, g) """ with tf.Session() as sess: ckpt = tf.train.get_checkpoint_state(FLAGS.ckpt) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) global_step = ckpt.model_checkpoint_path.split('/')[-1].split( '-')[-1] else: print('No checkpoint file found') return tf.train.Saver().save( sess, os.path.join(output_dir, 'model.ckpt'), global_step=tf.convert_to_tensor(global_step)) tf.train.write_graph(sess.graph.as_graph_def(), output_dir, 'graph.pbtxt', as_text=True) """ summary = tf.Summary() summary.ParseFromString(sess.run(summary_op)) summary_writer.add_summary(summary, global_step) """ ### EXPORT MODEL ### graph_path = os.path.join(output_dir, 'graph.pbtxt') if tf.gfile.Exists(graph_path) == False: raise ValueError('Graph not found({})'.format(graph_path)) ckpt = tf.train.get_checkpoint_state(output_dir) ckpt_path = ckpt.model_checkpoint_path if ckpt == False or ckpt_path == False: raise ValueError('Check point not found.') output_path = os.path.join(output_dir, 'frozen.pb') optimized_output_path = os.path.join(output_dir, 'optimized.pb') freeze_graph.freeze_graph(input_graph=graph_path, input_saver="", input_binary=False, input_checkpoint=ckpt_path, output_node_names="softmax_linear/softmax", restore_op_name="save/restore_all", filename_tensor_name="save/Const:0", output_graph=output_path, clear_devices=True, initializer_nodes="") input_graph_def = tf.GraphDef() with tf.gfile.Open(output_path, "r") as f: data = f.read() input_graph_def.ParseFromString(data) output_graph_def = optimize_for_inference_lib.optimize_for_inference( input_graph_def, ['image'], ["softmax_linear/softmax"], tf.float32.as_datatype_enum) f = tf.gfile.FastGFile(optimized_output_path, "w") f.write(output_graph_def.SerializeToString()) output_size = os.path.getsize(output_path) optimized_output_size = os.path.getsize(optimized_output_path) print('Model Exported successfuly.') print('- Frozen Model: {} ({})'.format(output_path, _humansize(output_size))) print('- Optimized Model: {} ({})'.format( optimized_output_path, _humansize(optimized_output_size)))
def export_serving(): output_dir = os.path.join(FLAGS.ckpt, "export_serving") if tf.gfile.Exists(output_dir) == True: tf.gfile.DeleteRecursively(output_dir) if tf.gfile.Exists(output_dir) == False: tf.gfile.MakeDirs(output_dir) ### EDIT TRAINING GRAPH ### with tf.Graph().as_default() as g: nn = CNN(FLAGS.network, FLAGS.num_of_classes, 1, FLAGS.image_size, FLAGS.image_crop_size, FLAGS.log_input, FLAGS.grayscale, FLAGS.log_feature, FLAGS.use_fp16) with tf.device("/cpu:0"): image = tf.placeholder(tf.float32, shape=(None, None, 3), name="image") image = nn.input(image, True, False) dtype = tf.float16 if FLAGS.use_fp16 else tf.float32 keep_prob = tf.constant(1.0, dtype=dtype) batch_size = tf.constant(1) logits = nn.inference(image, keep_prob, batch_size) # Calculate predictions. logits = tf.cast(logits, tf.float32) softmax = tf.nn.softmax(logits) # Restore the moving average version of the learned variables for eval. variable_averages = tf.train.ExponentialMovingAverage( CNN.MOVING_AVERAGE_DECAY) variables_to_restore = variable_averages.variables_to_restore() saver = tf.train.Saver(variables_to_restore) # Build the summary operation based on the TF collection of Summaries. """ summary_op = tf.summary.merge_all() summary_writer = tf.summary.FileWriter(output_dir, g) """ with tf.Session() as sess: ckpt = tf.train.get_checkpoint_state(FLAGS.ckpt) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) global_step = ckpt.model_checkpoint_path.split('/')[-1].split( '-')[-1] else: print('No checkpoint file found') return tf.train.Saver().save( sess, os.path.join(output_dir, 'model.ckpt'), global_step=tf.convert_to_tensor(global_step)) tf.train.write_graph(sess.graph.as_graph_def(), output_dir, 'graph.pbtxt', as_text=True) """ summary = tf.Summary() summary.ParseFromString(sess.run(summary_op)) summary_writer.add_summary(summary, global_step) """ ### EXPORT MODEL ### export_path_base = output_dir export_path = os.path.join( tf.compat.as_bytes(export_path_base), tf.compat.as_bytes(str(FLAGS.model_version))) print('Exporting model to '.format(export_path)) builder = tf.saved_model.builder.SavedModelBuilder(export_path) # Build the signature_def_map. classification_inputs = tf.saved_model.utils.build_tensor_info( image) classification_outputs_classes = tf.saved_model.utils.build_tensor_info( softmax) classification_outputs_scores = tf.saved_model.utils.build_tensor_info( softmax) classification_signature = ( tf.saved_model.signature_def_utils.build_signature_def( inputs={ tf.saved_model.signature_constants.CLASSIFY_INPUTS: classification_inputs }, outputs={ tf.saved_model.signature_constants.CLASSIFY_OUTPUT_CLASSES: classification_outputs_classes, tf.saved_model.signature_constants.CLASSIFY_OUTPUT_SCORES: classification_outputs_scores }, method_name=tf.saved_model.signature_constants. CLASSIFY_METHOD_NAME)) tensor_info_x = tf.saved_model.utils.build_tensor_info(image) tensor_info_y = tf.saved_model.utils.build_tensor_info(softmax) prediction_signature = ( tf.saved_model.signature_def_utils.build_signature_def( inputs={'image': tensor_info_x}, outputs={'score': tensor_info_y}, method_name=tf.saved_model.signature_constants. PREDICT_METHOD_NAME)) legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op') builder.add_meta_graph_and_variables( sess, [tf.saved_model.tag_constants.SERVING], signature_def_map={ 'predict_image': prediction_signature, tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: classification_signature, }, legacy_init_op=legacy_init_op) builder.save() print('Done exporting!')