Exemple #1
0
def main(args):
    # If output_model path is relative and in cwd, make it absolute from root
    output_model = FLAGS.output_model
    if str(Path(output_model).parent) == '.':
        output_model = str((Path.cwd() / output_model))

    output_fld = Path(output_model).parent
    output_model_name = Path(output_model).name
    output_model_stem = Path(output_model).stem
    output_model_pbtxt_name = output_model_stem + '.pbtxt'

    # Create output directory if it does not exist
    Path(output_model).parent.mkdir(parents=True, exist_ok=True)

    if FLAGS.channels_first:
        K.set_image_data_format('channels_first')
    else:
        K.set_image_data_format('channels_last')

    custom_object_dict = get_custom_objects()

    model = load_input_model(FLAGS.input_model, FLAGS.input_model_json,
                             FLAGS.input_model_yaml, custom_objects=custom_object_dict)

    # TODO(amirabdi): Support networks with multiple inputs
    orig_output_node_names = [node.op.name for node in model.outputs]
    if FLAGS.output_nodes_prefix:
        num_output = len(orig_output_node_names)
        pred = [None] * num_output
        converted_output_node_names = [None] * num_output

        # Create dummy tf nodes to rename output
        for i in range(num_output):
            converted_output_node_names[i] = '{}{}'.format(
                FLAGS.output_nodes_prefix, i)
            pred[i] = tf.identity(model.outputs[i],
                                  name=converted_output_node_names[i])
    else:
        converted_output_node_names = orig_output_node_names
    logging.info('Converted output node names are: %s',
                 str(converted_output_node_names))

    sess = K.get_session()
    if FLAGS.output_meta_ckpt:
        saver = tf.train.Saver()
        saver.save(sess, str(output_fld / output_model_stem))

    if FLAGS.save_graph_def:
        tf.train.write_graph(sess.graph.as_graph_def(), str(output_fld),
                             output_model_pbtxt_name, as_text=True)
        logging.info('Saved the graph definition in ascii format at %s',
                     str(Path(output_fld) / output_model_pbtxt_name))

    if FLAGS.quantize:
        from tensorflow.tools.graph_transforms import TransformGraph
        transforms = ["quantize_weights", "quantize_nodes"]
        transformed_graph_def = TransformGraph(sess.graph.as_graph_def(), [],
                                               converted_output_node_names,
                                               transforms)
        constant_graph = graph_util.convert_variables_to_constants(
            sess,
            transformed_graph_def,
            converted_output_node_names)
    else:
        constant_graph = graph_util.convert_variables_to_constants(
            sess,
            sess.graph.as_graph_def(),
            converted_output_node_names)

    graph_io.write_graph(constant_graph, str(output_fld), output_model_name,
                         as_text=False)
    logging.info('Saved the freezed graph at %s',
                 str(Path(output_fld) / output_model_name))
Exemple #2
0
    def __init__(self,
                 meta_file,
                 checkpoint_file,
                 dest_nodes,
                 inputShape=None,
                 in_nodes=None):
        super(TensorflowParser, self).__init__()

        # load model files into TensorFlow graph
        if meta_file:
            model = TensorflowParser._load_meta(meta_file)

        if checkpoint_file:
            self.ckpt_data = TensorflowParser._load_weights(checkpoint_file)
            self.weight_loaded = True

        # extract subgraph using in_nodes and dest_nodes
        if in_nodes != None and inputShape != None:
            from tensorflow.python.tools import strip_unused_lib
            from tensorflow.python.framework import dtypes
            from tensorflow.python.platform import gfile
            model = strip_unused_lib.strip_unused(
                input_graph_def=model,
                input_node_names=in_nodes,
                output_node_names=dest_nodes,
                placeholder_type_enum=dtypes.float32.as_datatype_enum)

            input_list = [None]
            for i in range(len(inputShape)):
                input_list.append(tensorflow.Dimension(inputShape[i]))
            tensor_input = tensorflow.TensorShape(input_list)
            # Build network graph
            self.tf_graph = TensorflowGraph(model)
            for node in self.tf_graph.model.node:
                if node.name in in_nodes:
                    node.attr['shape'].shape.CopyFrom(tensor_input.as_proto())
                    node.attr['_output_shapes'].list.shape.pop(
                    )  #unknown_rank pop
                    node.attr['_output_shapes'].list.shape.extend(
                        [tensor_input.as_proto()])

        # extract subgraph using dest_nodes
        elif dest_nodes != None:
            from tensorflow.python.framework.graph_util import extract_sub_graph
            model = extract_sub_graph(model, dest_nodes)

        #  Get input node name
        if not in_nodes:
            in_nodes = []
            for node in model.node:
                if node.op == 'Placeholder':
                    in_nodes.append(node.name)

        # Graph Transform
        transforms = ["fold_constants(ignore_errors=true)"]
        transformed_graph_def = TransformGraph(model, in_nodes, dest_nodes,
                                               transforms)
        in_type_list = {}
        in_shape_list = {}

        for n in transformed_graph_def.node:
            if n.name in in_nodes:
                in_type_list[n.name] = n.attr['dtype'].type
                in_node_shape = n.attr['shape'].shape
                in_node_shape_str = self._shapeToStr(in_node_shape)
                in_shape_list[n.name] = in_node_shape_str

        dtype = tensorflow.float32
        with tensorflow.Graph().as_default() as g:
            input_map = {}
            for in_node in in_nodes:
                if in_type_list[in_node] == 1 or in_type_list[in_node] == 0:
                    dtype = tensorflow.float32

                elif in_type_list[in_node] == 3:
                    dtype = tensorflow.int32

                elif in_type_list[in_node] == 10:
                    dtype = tensorflow.bool

                x = tensorflow.placeholder(dtype, shape=in_shape_list[in_node])
                input_map[in_node] = x

            tensorflow.import_graph_def(transformed_graph_def,
                                        name='',
                                        input_map=input_map)

        with tensorflow.Session(graph=g) as sess:
            tempdir = tempfile.mkdtemp()
            meta_graph_def = tensorflow.train.export_meta_graph(
                filename=os.path.join(tempdir, 'my-model.meta'))
            model = meta_graph_def.graph_def
            shutil.rmtree(tempdir)

        self.tf_graph = TensorflowGraph(model)
        self.tf_graph.build()

        process_graph(self.tf_graph, self.ckpt_data)
pred = [None]*num_output
pred_node_names = [None]*num_output
for i in range(num_output):
    pred_node_names[i] = args.output_node_prefix+str(i)
    pred[i] = tf.identity(net_model.outputs[i], name=pred_node_names[i])
print('output nodes names are: ', pred_node_names)


# [optional] write graph definition in ascii
sess = K.get_session()

if args.graph_def:
    f = args.output_graphdef_file 
    tf.train.write_graph(sess.graph.as_graph_def(), output_fld, f, as_text=True)
    print('saved the graph definition in ascii format at: ', osp.join(output_fld, f))


# convert variables to constants and save
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import graph_io
from tensorflow.tools.graph_transforms import TransformGraph
if args.quantize:
    transforms = ["quantize_weights", "quantize_nodes"]
    transformed_graph_def = TransformGraph(sess.graph.as_graph_def(), [], pred_node_names, transforms)
    constant_graph = graph_util.convert_variables_to_constants(sess, transformed_graph_def, pred_node_names)
else:
    constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), pred_node_names)    
graph_io.write_graph(constant_graph, output_fld, args.output_model_file, as_text=False)
print('saved the freezed graph (ready for inference) at: ', osp.join(output_fld, args.output_model_file))

Exemple #4
0
def optimize_graph(params):

    config = tf.ConfigProto(device_count={'GPU': 0}, allow_soft_placement=True)

    init_checkpoint = params.ckpt_dir

    tf.logging.info('build graph...')
    # input placeholders, not sure if they are friendly to XLA
    input_ids = tf.placeholder(tf.int32, (None, params.max_seq_len),
                               'input_ids')
    input_mask = tf.placeholder(tf.int32, (None, params.max_seq_len),
                                'input_mask')
    input_type_ids = tf.placeholder(tf.int32, (None, params.max_seq_len),
                                    'segment_ids')

    jit_scope = tf.contrib.compiler.jit.experimental_jit_scope

    with jit_scope():
        features = {}
        features['input_ids'] = input_ids
        features['input_mask'] = input_mask
        features['segment_ids'] = input_type_ids
        model = BertMultiTask(params)
        hidden_feature = model.body(features, tf.estimator.ModeKeys.PREDICT)
        pred = model.top(features, hidden_feature,
                         tf.estimator.ModeKeys.PREDICT)

        output_tensors = [pred[k] for k in pred]

        tvars = tf.trainable_variables()

        (assignment_map, initialized_variable_names
         ) = modeling.get_assignment_map_from_checkpoint(
             tvars, init_checkpoint)

        tf.train.init_from_checkpoint(init_checkpoint, assignment_map)

        tmp_g = tf.get_default_graph().as_graph_def()

    input_node_names = ['input_ids', 'input_mask', 'segment_ids']
    output_node_names = [
        '%s_top/%s_predict' %
        (params.share_top[problem], params.share_top[problem])
        for problem in params.problem_list
    ]

    transforms = [
        'remove_nodes(op=Identity)',
        'fold_constants(ignore_errors=true)',
        'fold_batch_norms',
        # 'quantize_weights',
        # 'quantize_nodes',
        'merge_duplicate_nodes',
        'strip_unused_nodes',
        'sort_by_execution_order'
    ]

    with tf.Session(config=config) as sess:
        tf.logging.info('load parameters from checkpoint...')
        sess.run(tf.global_variables_initializer())
        tf.logging.info('freeze...')
        tmp_g = tf.graph_util.convert_variables_to_constants(
            sess, tmp_g, [n.name[:-2] for n in output_tensors])
        tmp_g = TransformGraph(tmp_g, input_node_names, output_node_names,
                               transforms)
    tmp_file = os.path.join(params.ckpt_dir, 'export_model')
    tf.logging.info('write graph to a tmp file: %s' % tmp_file)
    with tf.gfile.GFile(tmp_file, 'wb') as f:
        f.write(tmp_g.SerializeToString())
    return tmp_file
input_node_names = [the_inp.name.split(':')[0]]
output_node_names = [the_out.name.split(':')[0]]

print("Input node names:", input_node_names)
print("Output node names:", output_node_names)

graph_def = tf.graph_util.convert_variables_to_constants(
    sess, sess.graph_def, output_node_names)

#%%
from tensorflow.tools.graph_transforms import TransformGraph
graph_def = TransformGraph(graph_def, input_node_names, output_node_names, [
    'strip_unused_nodes()',
    'remove_nodes(op=Identity, op=CheckNumerics)',
    'fold_constants()',
    'fold_old_batch_norms',
    'fold_batch_norms',
    'round_weights(num_steps=128)',
])

#%%

with open('batched_elmo128.pb', 'wb') as f:
    f.write(graph_def.SerializeToString())

#%%
newg = tf.Graph()

with newg.as_default():
    tf.import_graph_def(graph_def)
Exemple #6
0
with gfile.FastGFile(FREEZED_PATH, 'wb') as f:
    f.write(optimized_graph_def.SerializeToString())

print("Starting graph optimization ... ")
transforms = [
    'strip_unused_nodes(type=float, shape="1,160,576,3")',
    'remove_nodes(op=Identity, op=CheckNumerics)',
    'fold_constants(ignore_errors=false)',
    'fold_batch_norms',
    'remove_device',
    'fold_old_batch_norms',
    'fold_constants(ignore_errors=false)',
    'round_weights(num_steps=256)',
]

# 'fuse_convolutions',
for transform in transforms:
    print("Starting transform: `%s` ... " % transform)
    optimized_graph_def = TransformGraph(optimized_graph_def,
                                         [INPUT_TENSOR_NAME],
                                         [FINAL_TENSOR_NAME], [transform])

tf.summary.FileWriter('opt_log', graph_def=optimized_graph_def)
print("Wrote optimized graph to `%s` ... " % 'opt_log')

with gfile.FastGFile(OPTIMIZED_PATH, 'wb') as f:
    f.write(optimized_graph_def.SerializeToString())

print("Done! Wrote results to `%s`." % 'tf_files')
def fix_fp16_modelerror(model_path,
                        targetop_name_expectedfp32=None,
                        targetop_input_expectedfp32=None,
                        targetop_name_expectedfp16=None,
                        targetop_input_expectedfp16=None,
                        input_names=[],
                        output_names=[]):
    """
    If you find errors like the following, you can try the following solutions
    e.g.1
    error1:
    Input 1 of node tower-pred-0/multilevel_roi_align/roi_level2/roi_align/crop_and_resize/CropAndResize was passed half from 
    tower-pred-0/multilevel_roi_align/roi_level2/roi_align/crop_and_resize/transform_fpcoor_for_tf/concat:0 incompatible with expected float.
    error2:
    Input 1 of node tower-pred-0/multilevel_roi_align/roi_level3/roi_align/crop_and_resize/CropAndResize was passed half from 
    tower-pred-0/multilevel_roi_align/roi_level3/roi_align/crop_and_resize/transform_fpcoor_for_tf/concat:0 incompatible with expected float.

    opexpected_type is 'fp32', opinput_type is 'fp16'
    targetop_name_expectedfp32: ['tower-pred-0/multilevel_roi_align/roi_level2/roi_align/crop_and_resize/CropAndResize']
    targetop_input_expectedfp32: ['tower-pred-0/multilevel_roi_align/roi_level2/roi_align/crop_and_resize/transform_fpcoor_for_tf/concat']

    e.g.2
    error1:
    Input 0 of node tower-pred-0/multilevel_roi_align/roi_level2/roi_align/crop_and_resize/transpose_1 was passed float from 
    tower-pred-0/multilevel_roi_align/roi_level2/roi_align/crop_and_resize/CropAndResize:0 incompatible with expected half.

    opexpected_type is 'fp16', opinput_type is 'fp32'
    targetop_name_expectedfp16: ['tower-pred-0/multilevel_roi_align/roi_level2/roi_align/crop_and_resize/transpose_1']
    targetop_input_expectedfp16: ['tower-pred-0/multilevel_roi_align/roi_level2/roi_align/crop_and_resize/CropAndResize']
    """
    source_graph_def = convert_graph_all2fp16(model_path,
                                              target_type='fp16',
                                              first_modify=False)

    for node in source_graph_def.node:
        ## add cast node, opexpected_type='fp32', opinput_type='fp16'
        if node.name in targetop_input_expectedfp32:  #'tower-pred-0/multilevel_roi_align/roi_level2/roi_align/crop_and_resize/transform_fpcoor_for_tf/concat'
            new_node_ = source_graph_def.node.add()
            new_node_.name = node.name + '_cast'
            new_node_.op = "Cast"
            new_node_.input.append(node.name)
            new_node_.attr["SrcT"].type = types_pb2.DT_HALF
            new_node_.attr["DstT"].type = types_pb2.DT_FLOAT
            new_node_.attr["Truncate"].b = False
            continue

        ## add cast node, opexpected_type='fp16', opinput_type='fp32'
        if node.name in targetop_input_expectedfp16:  #'tower-pred-0/multilevel_roi_align/roi_level2/roi_align/crop_and_resize/transform_fpcoor_for_tf/concat'
            new_node_ = source_graph_def.node.add()
            new_node_.name = node.name + '_cast'
            new_node_.op = "Cast"
            new_node_.input.append(node.name)
            new_node_.attr["SrcT"].type = types_pb2.DT_FLOAT
            new_node_.attr["DstT"].type = types_pb2.DT_HALF
            new_node_.attr["Truncate"].b = False
            continue

    for node in source_graph_def.node:
        # convert targetop'input to cast node op name
        if (node.name in targetop_name_expectedfp32) or (
                node.name in targetop_name_expectedfp16):
            for i, name in enumerate(node.input):
                if (name in targetop_input_expectedfp32) or (
                        name in targetop_input_expectedfp16):
                    node.input[i] = name + '_cast'
            continue

    # transform graph
    if output_names:
        if not input_names:
            input_names = []
        transforms = ["strip_unused_nodes"]
        source_graph_def = TransformGraph(source_graph_def, input_names,
                                          output_names, transforms)
    # write graph_def to model
    tf.io.write_graph(source_graph_def,
                      logdir=args.savepath,
                      name=args.outpbname + '-new.pb',
                      as_text=False)
    print("Converting new pb done ...")
Exemple #8
0
def optimize_graph_for_inference(model_dir, input_node_names,
                                 output_node_names):
    input_graph_path = os.path.join(model_dir, 'graph.pbtxt')
    input_checkpoint = tf.train.latest_checkpoint(model_dir)

    input_binary = False
    clear_devices = True

    print("Loading Graph `{}` ...".format(input_graph_path))
    print("Loading Checkpoint `{}` ...".format(input_checkpoint))

    print("Freezing Graph ...")
    # https://github.com/tensorflow/tensorflow/blob/r1.15/tensorflow/python/tools/freeze_graph.py#L286-L301
    frozen_graph_def = freeze_graph.freeze_graph(
        input_graph=input_graph_path,  # A `GraphDef` file to load.
        input_saver="",  #  A TensorFlow Saver file.
        input_binary=
        input_binary,  #  A Bool. True means input_graph is .pb, False indicates .pbtxt.
        input_checkpoint=input_checkpoint,
        output_node_names=",".join(
            output_node_names
        ),  # The name(s) of the output nodes, comma separated.
        restore_op_name="",  # Unused
        filename_tensor_name="",  # Unused
        output_graph=os.path.join(
            "/tmp", 'frozen_saved_model.pb'
        ),  # String where to write the frozen `GraphDef`.
        clear_devices=
        clear_devices,  #  A Bool whether to remove device specifications.
        initializer_nodes=
        "",  # Comma separated list of initializer nodes to run before freezing.
        variable_names_whitelist=
        "",  # The set of variable names to convert (optional, bydefault, all variables are converted)
        variable_names_blacklist=
        "",  # The set of variable names to omit converting to constants (optional)
        input_meta_graph=None,  # A `MetaGraphDef` file to load (optional).
        input_saved_model_dir=
        None,  # Path to the dir with TensorFlow 'SavedModel' file and variables (optional).
        saved_model_tags=tag_constants.
        SERVING,  # Group of comma separated tag(s) of the MetaGraphDef to load, in string format.
        checkpoint_version=saver_pb2.SaverDef.
        V2  # Tensorflow variable file format (saver_pb2.SaverDef.V1 or saver_pb2.SaverDef.V2
    )

    print("Optimizing Graph for Inference ...")
    optimized_frozen_graph = optimize_for_inference_lib.optimize_for_inference(
        frozen_graph_def,
        input_node_names,  # an array of the input node(s)
        output_node_names,  # an array of output nodes
        tf.float32.as_datatype_enum)

    transforms = [
        'remove_nodes(op=Identity)', 'merge_duplicate_nodes',
        'strip_unused_nodes', 'fold_constants(ignore_errors=true)',
        'fold_batch_norms'
    ]

    print("Applying Graph Transformations ...")
    return TransformGraph(
        optimized_frozen_graph,
        input_node_names,  # an array of the input node(s)
        output_node_names,  # an array of output nodes
        transforms)
Exemple #9
0
import tensorflow as tf
from tensorflow.python.tools import optimize_for_inference_lib
from tensorflow.tools.graph_transforms import TransformGraph

output_names = ['conv2d_input', 'conv2d/kernel/Initializer/random_uniform/shape', 'conv2d/kernel/Initializer/random_uniform/min', 'conv2d/kernel/Initializer/random_uniform/max', 'conv2d/kernel/Initializer/random_uniform/RandomUniform', 'conv2d/kernel/Initializer/random_uniform/sub', 'conv2d/kernel/Initializer/random_uniform/mul', 'conv2d/kernel/Initializer/random_uniform', 'conv2d/kernel', 'conv2d/kernel/IsInitialized/VarIsInitializedOp', 'conv2d/kernel/Assign', 'conv2d/kernel/Read/ReadVariableOp', 'conv2d/bias/Initializer/zeros', 'conv2d/bias', 'conv2d/bias/IsInitialized/VarIsInitializedOp', 'conv2d/bias/Assign', 'conv2d/bias/Read/ReadVariableOp', 'conv2d/dilation_rate', 'conv2d/Conv2D/ReadVariableOp', 'conv2d/Conv2D', 'conv2d/BiasAdd/ReadVariableOp', 'conv2d/BiasAdd', 'activation/Relu', 'max_pooling2d/MaxPool', 'conv2d_1/kernel/Initializer/random_uniform/shape', 'conv2d_1/kernel/Initializer/random_uniform/min', 'conv2d_1/kernel/Initializer/random_uniform/max', 'conv2d_1/kernel/Initializer/random_uniform/RandomUniform', 'conv2d_1/kernel/Initializer/random_uniform/sub', 'conv2d_1/kernel/Initializer/random_uniform/mul', 'conv2d_1/kernel/Initializer/random_uniform', 'conv2d_1/kernel', 'conv2d_1/kernel/IsInitialized/VarIsInitializedOp', 'conv2d_1/kernel/Assign', 'conv2d_1/kernel/Read/ReadVariableOp', 'conv2d_1/bias/Initializer/zeros', 'conv2d_1/bias', 'conv2d_1/bias/IsInitialized/VarIsInitializedOp', 'conv2d_1/bias/Assign', 'conv2d_1/bias/Read/ReadVariableOp', 'conv2d_1/dilation_rate', 'conv2d_1/Conv2D/ReadVariableOp', 'conv2d_1/Conv2D', 'conv2d_1/BiasAdd/ReadVariableOp', 'conv2d_1/BiasAdd', 'activation_1/Relu', 'max_pooling2d_1/MaxPool', 'flatten/Shape', 'flatten/strided_slice/stack', 'flatten/strided_slice/stack_1', 'flatten/strided_slice/stack_2', 'flatten/strided_slice', 'flatten/Reshape/shape/1', 'flatten/Reshape/shape', 'flatten/Reshape', 'dense/kernel/Initializer/random_uniform/shape', 'dense/kernel/Initializer/random_uniform/min', 'dense/kernel/Initializer/random_uniform/max', 'dense/kernel/Initializer/random_uniform/RandomUniform', 'dense/kernel/Initializer/random_uniform/sub', 'dense/kernel/Initializer/random_uniform/mul', 'dense/kernel/Initializer/random_uniform', 'dense/kernel', 'dense/kernel/IsInitialized/VarIsInitializedOp', 'dense/kernel/Assign', 'dense/kernel/Read/ReadVariableOp', 'dense/bias/Initializer/zeros', 'dense/bias', 'dense/bias/IsInitialized/VarIsInitializedOp', 'dense/bias/Assign', 'dense/bias/Read/ReadVariableOp', 'dense/MatMul/ReadVariableOp', 'dense/MatMul', 'dense/BiasAdd/ReadVariableOp', 'dense/BiasAdd', 'dense_1/kernel/Initializer/random_uniform/shape', 'dense_1/kernel/Initializer/random_uniform/min', 'dense_1/kernel/Initializer/random_uniform/max', 'dense_1/kernel/Initializer/random_uniform/RandomUniform', 'dense_1/kernel/Initializer/random_uniform/sub', 'dense_1/kernel/Initializer/random_uniform/mul', 'dense_1/kernel/Initializer/random_uniform', 'dense_1/kernel', 'dense_1/kernel/IsInitialized/VarIsInitializedOp', 'dense_1/kernel/Assign', 'dense_1/kernel/Read/ReadVariableOp', 'dense_1/bias/Initializer/zeros', 'dense_1/bias', 'dense_1/bias/IsInitialized/VarIsInitializedOp', 'dense_1/bias/Assign', 'dense_1/bias/Read/ReadVariableOp', 'dense_1/MatMul/ReadVariableOp', 'dense_1/MatMul', 'dense_1/BiasAdd/ReadVariableOp', 'dense_1/BiasAdd', 'activation_2/Sigmoid', 'activation_2_target', 'total/Initializer/zeros', 'total', 'total/IsInitialized/VarIsInitializedOp', 'total/Assign', 'total/Read/ReadVariableOp', 'count/Initializer/zeros', 'count', 'count/IsInitialized/VarIsInitializedOp', 'count/Assign', 'count/Read/ReadVariableOp', 'metrics/acc/Cast/x', 'metrics/acc/Greater', 'metrics/acc/Cast_1', 'metrics/acc/Equal', 'metrics/acc/Cast_2', 'metrics/acc/Mean/reduction_indices', 'metrics/acc/Mean', 'metrics/acc/Const', 'metrics/acc/Sum', 'metrics/acc/AssignAddVariableOp', 'metrics/acc/ReadVariableOp', 'metrics/acc/Size', 'metrics/acc/Cast_3', 'metrics/acc/AssignAddVariableOp_1', 'metrics/acc/ReadVariableOp_1', 'metrics/acc/div_no_nan/ReadVariableOp', 'metrics/acc/div_no_nan/ReadVariableOp_1', 'metrics/acc/div_no_nan', 'metrics/acc/Identity', 'loss/activation_2_loss/Const', 'loss/activation_2_loss/logistic_loss/zeros_like', 'loss/activation_2_loss/logistic_loss/GreaterEqual', 'loss/activation_2_loss/logistic_loss/Select', 'loss/activation_2_loss/logistic_loss/Neg', 'loss/activation_2_loss/logistic_loss/Select_1', 'loss/activation_2_loss/logistic_loss/mul', 'loss/activation_2_loss/logistic_loss/sub', 'loss/activation_2_loss/logistic_loss/Exp', 'loss/activation_2_loss/logistic_loss/Log1p', 'loss/activation_2_loss/logistic_loss', 'loss/activation_2_loss/Mean/reduction_indices', 'loss/activation_2_loss/Mean', 'loss/activation_2_loss/weighted_loss/Cast/x', 'loss/activation_2_loss/weighted_loss/broadcast_weights/assert_broadcastable/weights/shape', 'loss/activation_2_loss/weighted_loss/broadcast_weights/assert_broadcastable/weights/rank', 'loss/activation_2_loss/weighted_loss/broadcast_weights/assert_broadcastable/values/shape', 'loss/activation_2_loss/weighted_loss/broadcast_weights/assert_broadcastable/values/rank', 'loss/activation_2_loss/weighted_loss/broadcast_weights/assert_broadcastable/static_scalar_check_success', 'loss/activation_2_loss/weighted_loss/broadcast_weights/ones_like/Shape', 'loss/activation_2_loss/weighted_loss/broadcast_weights/ones_like/Const', 'loss/activation_2_loss/weighted_loss/broadcast_weights/ones_like', 'loss/activation_2_loss/weighted_loss/broadcast_weights', 'loss/activation_2_loss/weighted_loss/Mul', 'loss/activation_2_loss/Const_1', 'loss/activation_2_loss/Sum', 'loss/activation_2_loss/num_elements', 'loss/activation_2_loss/num_elements/Cast', 'loss/activation_2_loss/Const_2', 'loss/activation_2_loss/Sum_1', 'loss/activation_2_loss/value', 'loss/mul/x', 'loss/mul', 'keras_learning_phase/input', 'keras_learning_phase', 'training/Adam/gradients/gradients/Shape', 'training/Adam/gradients/gradients/grad_ys_0', 'training/Adam/gradients/gradients/Fill', 'training/Adam/gradients/gradients/loss/mul_grad/Mul', 'training/Adam/gradients/gradients/loss/mul_grad/Mul_1', 'training/Adam/gradients/gradients/loss/activation_2_loss/value_grad/Shape', 'training/Adam/gradients/gradients/loss/activation_2_loss/value_grad/Shape_1', 'training/Adam/gradients/gradients/loss/activation_2_loss/value_grad/BroadcastGradientArgs', 'training/Adam/gradients/gradients/loss/activation_2_loss/value_grad/div_no_nan', 'training/Adam/gradients/gradients/loss/activation_2_loss/value_grad/Sum', 'training/Adam/gradients/gradients/loss/activation_2_loss/value_grad/Reshape', 'training/Adam/gradients/gradients/loss/activation_2_loss/value_grad/Neg', 'training/Adam/gradients/gradients/loss/activation_2_loss/value_grad/div_no_nan_1', 'training/Adam/gradients/gradients/loss/activation_2_loss/value_grad/div_no_nan_2', 'training/Adam/gradients/gradients/loss/activation_2_loss/value_grad/mul', 'training/Adam/gradients/gradients/loss/activation_2_loss/value_grad/Sum_1', 'training/Adam/gradients/gradients/loss/activation_2_loss/value_grad/Reshape_1', 'training/Adam/gradients/gradients/loss/activation_2_loss/Sum_1_grad/Reshape/shape', 'training/Adam/gradients/gradients/loss/activation_2_loss/Sum_1_grad/Reshape', 'training/Adam/gradients/gradients/loss/activation_2_loss/Sum_1_grad/Const', 'training/Adam/gradients/gradients/loss/activation_2_loss/Sum_1_grad/Tile', 'training/Adam/gradients/gradients/loss/activation_2_loss/Sum_grad/Reshape/shape', 'training/Adam/gradients/gradients/loss/activation_2_loss/Sum_grad/Reshape', 'training/Adam/gradients/gradients/loss/activation_2_loss/Sum_grad/Shape', 'training/Adam/gradients/gradients/loss/activation_2_loss/Sum_grad/Tile', 'training/Adam/gradients/gradients/loss/activation_2_loss/weighted_loss/Mul_grad/Shape', 'training/Adam/gradients/gradients/loss/activation_2_loss/weighted_loss/Mul_grad/Shape_1', 'training/Adam/gradients/gradients/loss/activation_2_loss/weighted_loss/Mul_grad/BroadcastGradientArgs', 'training/Adam/gradients/gradients/loss/activation_2_loss/weighted_loss/Mul_grad/Mul', 'training/Adam/gradients/gradients/loss/activation_2_loss/weighted_loss/Mul_grad/Sum', 'training/Adam/gradients/gradients/loss/activation_2_loss/weighted_loss/Mul_grad/Reshape', 'training/Adam/gradients/gradients/loss/activation_2_loss/weighted_loss/Mul_grad/Mul_1', 'training/Adam/gradients/gradients/loss/activation_2_loss/weighted_loss/Mul_grad/Sum_1', 'training/Adam/gradients/gradients/loss/activation_2_loss/weighted_loss/Mul_grad/Reshape_1', 'training/Adam/gradients/gradients/loss/activation_2_loss/Mean_grad/Shape', 'training/Adam/gradients/gradients/loss/activation_2_loss/Mean_grad/Size', 'training/Adam/gradients/gradients/loss/activation_2_loss/Mean_grad/add', 'training/Adam/gradients/gradients/loss/activation_2_loss/Mean_grad/mod', 'training/Adam/gradients/gradients/loss/activation_2_loss/Mean_grad/Shape_1', 'training/Adam/gradients/gradients/loss/activation_2_loss/Mean_grad/range/start', 'training/Adam/gradients/gradients/loss/activation_2_loss/Mean_grad/range/delta', 'training/Adam/gradients/gradients/loss/activation_2_loss/Mean_grad/range', 'training/Adam/gradients/gradients/loss/activation_2_loss/Mean_grad/Fill/value', 'training/Adam/gradients/gradients/loss/activation_2_loss/Mean_grad/Fill', 'training/Adam/gradients/gradients/loss/activation_2_loss/Mean_grad/DynamicStitch', 'training/Adam/gradients/gradients/loss/activation_2_loss/Mean_grad/Maximum/y', 'training/Adam/gradients/gradients/loss/activation_2_loss/Mean_grad/Maximum', 'training/Adam/gradients/gradients/loss/activation_2_loss/Mean_grad/floordiv', 'training/Adam/gradients/gradients/loss/activation_2_loss/Mean_grad/Reshape', 'training/Adam/gradients/gradients/loss/activation_2_loss/Mean_grad/Tile', 'training/Adam/gradients/gradients/loss/activation_2_loss/Mean_grad/Shape_2', 'training/Adam/gradients/gradients/loss/activation_2_loss/Mean_grad/Shape_3', 'training/Adam/gradients/gradients/loss/activation_2_loss/Mean_grad/Const', 'training/Adam/gradients/gradients/loss/activation_2_loss/Mean_grad/Prod', 'training/Adam/gradients/gradients/loss/activation_2_loss/Mean_grad/Const_1', 'training/Adam/gradients/gradients/loss/activation_2_loss/Mean_grad/Prod_1', 'training/Adam/gradients/gradients/loss/activation_2_loss/Mean_grad/Maximum_1/y', 'training/Adam/gradients/gradients/loss/activation_2_loss/Mean_grad/Maximum_1', 'training/Adam/gradients/gradients/loss/activation_2_loss/Mean_grad/floordiv_1', 'training/Adam/gradients/gradients/loss/activation_2_loss/Mean_grad/Cast', 'training/Adam/gradients/gradients/loss/activation_2_loss/Mean_grad/truediv', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss_grad/Shape', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss_grad/Shape_1', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss_grad/BroadcastGradientArgs', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss_grad/Sum', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss_grad/Reshape', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss_grad/Sum_1', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss_grad/Reshape_1', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss/sub_grad/Shape', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss/sub_grad/Shape_1', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss/sub_grad/BroadcastGradientArgs', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss/sub_grad/Sum', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss/sub_grad/Reshape', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss/sub_grad/Neg', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss/sub_grad/Sum_1', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss/sub_grad/Reshape_1', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss/Log1p_grad/add/x', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss/Log1p_grad/add', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss/Log1p_grad/Reciprocal', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss/Log1p_grad/mul', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss/Select_grad/zeros_like', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss/Select_grad/Select', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss/Select_grad/Select_1', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss/mul_grad/Shape', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss/mul_grad/Shape_1', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss/mul_grad/BroadcastGradientArgs', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss/mul_grad/Mul', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss/mul_grad/Sum', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss/mul_grad/Reshape', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss/mul_grad/Mul_1', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss/mul_grad/Sum_1', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss/mul_grad/Reshape_1', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss/Exp_grad/mul', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss/Select_1_grad/zeros_like', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss/Select_1_grad/Select', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss/Select_1_grad/Select_1', 'training/Adam/gradients/gradients/loss/activation_2_loss/logistic_loss/Neg_grad/Neg', 'training/Adam/gradients/gradients/AddN', 'training/Adam/gradients/gradients/dense_1/BiasAdd_grad/BiasAddGrad', 'training/Adam/gradients/gradients/dense_1/MatMul_grad/MatMul', 'training/Adam/gradients/gradients/dense_1/MatMul_grad/MatMul_1', 'training/Adam/gradients/gradients/dense/BiasAdd_grad/BiasAddGrad', 'training/Adam/gradients/gradients/dense/MatMul_grad/MatMul', 'training/Adam/gradients/gradients/dense/MatMul_grad/MatMul_1', 'training/Adam/gradients/gradients/flatten/Reshape_grad/Shape', 'training/Adam/gradients/gradients/flatten/Reshape_grad/Reshape', 'training/Adam/gradients/gradients/max_pooling2d_1/MaxPool_grad/MaxPoolGrad', 'training/Adam/gradients/gradients/activation_1/Relu_grad/ReluGrad', 'training/Adam/gradients/gradients/conv2d_1/BiasAdd_grad/BiasAddGrad', 'training/Adam/gradients/gradients/conv2d_1/Conv2D_grad/ShapeN', 'training/Adam/gradients/gradients/conv2d_1/Conv2D_grad/Conv2DBackpropInput', 'training/Adam/gradients/gradients/conv2d_1/Conv2D_grad/Conv2DBackpropFilter', 'training/Adam/gradients/gradients/max_pooling2d/MaxPool_grad/MaxPoolGrad', 'training/Adam/gradients/gradients/activation/Relu_grad/ReluGrad', 'training/Adam/gradients/gradients/conv2d/BiasAdd_grad/BiasAddGrad', 'training/Adam/gradients/gradients/conv2d/Conv2D_grad/ShapeN', 'training/Adam/gradients/gradients/conv2d/Conv2D_grad/Conv2DBackpropInput', 'training/Adam/gradients/gradients/conv2d/Conv2D_grad/Conv2DBackpropFilter', 'training/Adam/iter/Initializer/zeros', 'training/Adam/iter', 'training/Adam/iter/IsInitialized/VarIsInitializedOp', 'training/Adam/iter/Assign', 'training/Adam/iter/Read/ReadVariableOp', 'training/Adam/beta_1/Initializer/initial_value', 'training/Adam/beta_1', 'training/Adam/beta_1/IsInitialized/VarIsInitializedOp', 'training/Adam/beta_1/Assign', 'training/Adam/beta_1/Read/ReadVariableOp', 'training/Adam/beta_2/Initializer/initial_value', 'training/Adam/beta_2', 'training/Adam/beta_2/IsInitialized/VarIsInitializedOp', 'training/Adam/beta_2/Assign', 'training/Adam/beta_2/Read/ReadVariableOp', 'training/Adam/decay/Initializer/initial_value', 'training/Adam/decay', 'training/Adam/decay/IsInitialized/VarIsInitializedOp', 'training/Adam/decay/Assign', 'training/Adam/decay/Read/ReadVariableOp', 'training/Adam/learning_rate/Initializer/initial_value', 'training/Adam/learning_rate', 'training/Adam/learning_rate/IsInitialized/VarIsInitializedOp', 'training/Adam/learning_rate/Assign', 'training/Adam/learning_rate/Read/ReadVariableOp', 'training/Adam/conv2d/kernel/m/Initializer/zeros/shape_as_tensor', 'training/Adam/conv2d/kernel/m/Initializer/zeros/Const', 'training/Adam/conv2d/kernel/m/Initializer/zeros', 'training/Adam/conv2d/kernel/m', 'training/Adam/conv2d/kernel/m/IsInitialized/VarIsInitializedOp', 'training/Adam/conv2d/kernel/m/Assign', 'training/Adam/conv2d/kernel/m/Read/ReadVariableOp', 'training/Adam/conv2d/bias/m/Initializer/zeros', 'training/Adam/conv2d/bias/m', 'training/Adam/conv2d/bias/m/IsInitialized/VarIsInitializedOp', 'training/Adam/conv2d/bias/m/Assign', 'training/Adam/conv2d/bias/m/Read/ReadVariableOp', 'training/Adam/conv2d_1/kernel/m/Initializer/zeros/shape_as_tensor', 'training/Adam/conv2d_1/kernel/m/Initializer/zeros/Const', 'training/Adam/conv2d_1/kernel/m/Initializer/zeros', 'training/Adam/conv2d_1/kernel/m', 'training/Adam/conv2d_1/kernel/m/IsInitialized/VarIsInitializedOp', 'training/Adam/conv2d_1/kernel/m/Assign', 'training/Adam/conv2d_1/kernel/m/Read/ReadVariableOp', 'training/Adam/conv2d_1/bias/m/Initializer/zeros', 'training/Adam/conv2d_1/bias/m', 'training/Adam/conv2d_1/bias/m/IsInitialized/VarIsInitializedOp', 'training/Adam/conv2d_1/bias/m/Assign', 'training/Adam/conv2d_1/bias/m/Read/ReadVariableOp', 'training/Adam/dense/kernel/m/Initializer/zeros/shape_as_tensor', 'training/Adam/dense/kernel/m/Initializer/zeros/Const', 'training/Adam/dense/kernel/m/Initializer/zeros', 'training/Adam/dense/kernel/m', 'training/Adam/dense/kernel/m/IsInitialized/VarIsInitializedOp', 'training/Adam/dense/kernel/m/Assign', 'training/Adam/dense/kernel/m/Read/ReadVariableOp', 'training/Adam/dense/bias/m/Initializer/zeros', 'training/Adam/dense/bias/m', 'training/Adam/dense/bias/m/IsInitialized/VarIsInitializedOp', 'training/Adam/dense/bias/m/Assign', 'training/Adam/dense/bias/m/Read/ReadVariableOp', 'training/Adam/dense_1/kernel/m/Initializer/zeros', 'training/Adam/dense_1/kernel/m', 'training/Adam/dense_1/kernel/m/IsInitialized/VarIsInitializedOp', 'training/Adam/dense_1/kernel/m/Assign', 'training/Adam/dense_1/kernel/m/Read/ReadVariableOp', 'training/Adam/dense_1/bias/m/Initializer/zeros', 'training/Adam/dense_1/bias/m', 'training/Adam/dense_1/bias/m/IsInitialized/VarIsInitializedOp', 'training/Adam/dense_1/bias/m/Assign', 'training/Adam/dense_1/bias/m/Read/ReadVariableOp', 'training/Adam/conv2d/kernel/v/Initializer/zeros/shape_as_tensor', 'training/Adam/conv2d/kernel/v/Initializer/zeros/Const', 'training/Adam/conv2d/kernel/v/Initializer/zeros', 'training/Adam/conv2d/kernel/v', 'training/Adam/conv2d/kernel/v/IsInitialized/VarIsInitializedOp', 'training/Adam/conv2d/kernel/v/Assign', 'training/Adam/conv2d/kernel/v/Read/ReadVariableOp', 'training/Adam/conv2d/bias/v/Initializer/zeros', 'training/Adam/conv2d/bias/v', 'training/Adam/conv2d/bias/v/IsInitialized/VarIsInitializedOp', 'training/Adam/conv2d/bias/v/Assign', 'training/Adam/conv2d/bias/v/Read/ReadVariableOp', 'training/Adam/conv2d_1/kernel/v/Initializer/zeros/shape_as_tensor', 'training/Adam/conv2d_1/kernel/v/Initializer/zeros/Const', 'training/Adam/conv2d_1/kernel/v/Initializer/zeros', 'training/Adam/conv2d_1/kernel/v', 'training/Adam/conv2d_1/kernel/v/IsInitialized/VarIsInitializedOp', 'training/Adam/conv2d_1/kernel/v/Assign', 'training/Adam/conv2d_1/kernel/v/Read/ReadVariableOp', 'training/Adam/conv2d_1/bias/v/Initializer/zeros', 'training/Adam/conv2d_1/bias/v', 'training/Adam/conv2d_1/bias/v/IsInitialized/VarIsInitializedOp', 'training/Adam/conv2d_1/bias/v/Assign', 'training/Adam/conv2d_1/bias/v/Read/ReadVariableOp', 'training/Adam/dense/kernel/v/Initializer/zeros/shape_as_tensor', 'training/Adam/dense/kernel/v/Initializer/zeros/Const', 'training/Adam/dense/kernel/v/Initializer/zeros', 'training/Adam/dense/kernel/v', 'training/Adam/dense/kernel/v/IsInitialized/VarIsInitializedOp', 'training/Adam/dense/kernel/v/Assign', 'training/Adam/dense/kernel/v/Read/ReadVariableOp', 'training/Adam/dense/bias/v/Initializer/zeros', 'training/Adam/dense/bias/v', 'training/Adam/dense/bias/v/IsInitialized/VarIsInitializedOp', 'training/Adam/dense/bias/v/Assign', 'training/Adam/dense/bias/v/Read/ReadVariableOp', 'training/Adam/dense_1/kernel/v/Initializer/zeros', 'training/Adam/dense_1/kernel/v', 'training/Adam/dense_1/kernel/v/IsInitialized/VarIsInitializedOp', 'training/Adam/dense_1/kernel/v/Assign', 'training/Adam/dense_1/kernel/v/Read/ReadVariableOp', 'training/Adam/dense_1/bias/v/Initializer/zeros', 'training/Adam/dense_1/bias/v', 'training/Adam/dense_1/bias/v/IsInitialized/VarIsInitializedOp', 'training/Adam/dense_1/bias/v/Assign', 'training/Adam/dense_1/bias/v/Read/ReadVariableOp', 'training/Adam/Identity/ReadVariableOp', 'training/Adam/Identity', 'training/Adam/ReadVariableOp', 'training/Adam/add/y', 'training/Adam/add', 'training/Adam/Cast', 'training/Adam/Identity_1/ReadVariableOp', 'training/Adam/Identity_1', 'training/Adam/Identity_2/ReadVariableOp', 'training/Adam/Identity_2', 'training/Adam/Pow', 'training/Adam/Pow_1', 'training/Adam/sub/x', 'training/Adam/sub', 'training/Adam/Sqrt', 'training/Adam/sub_1/x', 'training/Adam/sub_1', 'training/Adam/truediv', 'training/Adam/mul', 'training/Adam/Const', 'training/Adam/sub_2/x', 'training/Adam/sub_2', 'training/Adam/sub_3/x', 'training/Adam/sub_3', 'training/Adam/Adam/update_conv2d/kernel/ResourceApplyAdam', 'training/Adam/Adam/update_conv2d/bias/ResourceApplyAdam', 'training/Adam/Adam/update_conv2d_1/kernel/ResourceApplyAdam', 'training/Adam/Adam/update_conv2d_1/bias/ResourceApplyAdam', 'training/Adam/Adam/update_dense/kernel/ResourceApplyAdam', 'training/Adam/Adam/update_dense/bias/ResourceApplyAdam', 'training/Adam/Adam/update_dense_1/kernel/ResourceApplyAdam', 'training/Adam/Adam/update_dense_1/bias/ResourceApplyAdam', 'training/Adam/Adam/Const', 'training/Adam/Adam/AssignAddVariableOp', 'training/Adam/Adam/ReadVariableOp', 'training_1/group_deps', 'conv2d/kernel_0/tag', 'conv2d/kernel_0/ReadVariableOp', 'conv2d/kernel_0', 'conv2d/bias_0/tag', 'conv2d/bias_0/ReadVariableOp', 'conv2d/bias_0', 'conv2d_out/tag', 'conv2d_out', 'activation_out/tag', 'activation_out', 'max_pooling2d_out/tag', 'max_pooling2d_out', 'conv2d_1/kernel_0/tag', 'conv2d_1/kernel_0/ReadVariableOp', 'conv2d_1/kernel_0', 'conv2d_1/bias_0/tag', 'conv2d_1/bias_0/ReadVariableOp', 'conv2d_1/bias_0', 'conv2d_1_out/tag', 'conv2d_1_out', 'activation_1_out/tag', 'activation_1_out', 'max_pooling2d_1_out/tag', 'max_pooling2d_1_out', 'flatten_out/tag', 'flatten_out', 'dense/kernel_0/tag', 'dense/kernel_0/ReadVariableOp', 'dense/kernel_0', 'dense/bias_0/tag', 'dense/bias_0/ReadVariableOp', 'dense/bias_0', 'dense_out/tag', 'dense_out', 'dense_1/kernel_0/tag', 'dense_1/kernel_0/ReadVariableOp', 'dense_1/kernel_0', 'dense_1/bias_0/tag', 'dense_1/bias_0/ReadVariableOp', 'dense_1/bias_0', 'dense_1_out/tag', 'dense_1_out', 'activation_2_out/tag', 'activation_2_out', 'Merge/MergeSummary', 'Placeholder', 'AssignVariableOp', 'ReadVariableOp', 'Placeholder_1', 'AssignVariableOp_1', 'ReadVariableOp_1', 'VarIsInitializedOp', 'VarIsInitializedOp_1', 'VarIsInitializedOp_2', 'VarIsInitializedOp_3', 'VarIsInitializedOp_4', 'VarIsInitializedOp_5', 'VarIsInitializedOp_6', 'VarIsInitializedOp_7', 'VarIsInitializedOp_8', 'VarIsInitializedOp_9', 'VarIsInitializedOp_10', 'VarIsInitializedOp_11', 'VarIsInitializedOp_12', 'VarIsInitializedOp_13', 'VarIsInitializedOp_14', 'VarIsInitializedOp_15', 'VarIsInitializedOp_16', 'VarIsInitializedOp_17', 'VarIsInitializedOp_18', 'VarIsInitializedOp_19', 'VarIsInitializedOp_20', 'VarIsInitializedOp_21', 'VarIsInitializedOp_22', 'VarIsInitializedOp_23', 'VarIsInitializedOp_24', 'VarIsInitializedOp_25', 'VarIsInitializedOp_26', 'VarIsInitializedOp_27', 'VarIsInitializedOp_28', 'VarIsInitializedOp_29', 'VarIsInitializedOp_30', 'init', 'evaluation/group_deps']
with tf.gfile.FastGFile('TF1Model_Frozen_Graph.pb', 'rb') as f:
    graph_def = tf.GraphDef()
    graph_def.ParseFromString(f.read())
    graph_def = optimize_for_inference_lib.optimize_for_inference(graph_def, ['Placeholder'], output_names, tf.float32.as_datatype_enum)
    graph_def = TransformGraph(graph_def, ['Placeholder'], output_names, ['remove_nodes(op=PlaceholderWithDefault)', 'sort_by_execution_order'])
    with tf.gfile.FastGFile('TF1Model_Consts_Fused.pb', 'wb') as f:
        f.write(graph_def.SerializeToString())

def convert_graph_all2fp16(model_path,
                           target_type='fp16',
                           first_modify=True,
                           input_names=None,
                           output_names=None):
    if target_type == 'fp16':
        dtype = types_pb2.DT_HALF
    elif target_type == 'fp64':
        dtype = types_pb2.DT_DOUBLE
    else:
        dtype = types_pb2.DT_FLOAT
    source_sess = load_graph(model_path)
    source_graph_def = source_sess.graph.as_graph_def()
    target_graph_def = graph_pb2.GraphDef()
    target_graph_def.versions.CopyFrom(source_graph_def.versions)

    for node in source_graph_def.node:
        # fused batch norm node
        if node.op == "FusedBatchNorm":
            rewrite_batch_norm_node_v2(node,
                                       target_graph_def,
                                       target_type=target_type)
            continue
        # replicate node
        new_node = target_graph_def.node.add()
        new_node.op = node.op
        new_node.name = node.name
        new_node.input.extend(node.input)
        attrs = list(node.attr.keys())
        # keep batch norm params node
        if ("BatchNorm" in node.name) or ('batch_normalization' in node.name) \
                or ('bn/gamma' in node.name) or ('bn/beta' in node.name) or ('bn/mean' in node.name) or ('bn/variance' in node.name): ## make sure FusedBatchNormV2'inputs scale/offset/mean/variance will be U/float type
            for attr in attrs:
                new_node.attr[attr].CopyFrom(node.attr[attr])
            continue

        # replace dtype in node attr with target dtype
        for attr in attrs:
            # keep special node in fp32
            if node.name in keep_fp32_node_name:
                new_node.attr[attr].CopyFrom(node.attr[attr])
                continue
            if node.attr[attr].type == types_pb2.DT_FLOAT:
                # modify node dtype
                node.attr[attr].type = dtype
            if attr == "value":
                tensor = node.attr[attr].tensor
                if tensor.dtype == types_pb2.DT_FLOAT:
                    # if float_val exists
                    if tensor.float_val:
                        float_val = tf.make_ndarray(node.attr[attr].tensor)
                        new_node.attr[attr].tensor.CopyFrom(
                            tf.make_tensor_proto(float_val, dtype=dtype))
                        continue
                    # if tensor content exists
                    if tensor.tensor_content:
                        tensor_shape = [
                            x.size for x in tensor.tensor_shape.dim
                        ]
                        tensor_weights = tf.make_ndarray(tensor)
                        # reshape tensor
                        tensor_weights = np.reshape(tensor_weights,
                                                    tensor_shape)
                        tensor_proto = tf.make_tensor_proto(tensor_weights,
                                                            dtype=dtype)
                        new_node.attr[attr].tensor.CopyFrom(tensor_proto)
                        continue
            new_node.attr[attr].CopyFrom(node.attr[attr])

    if not first_modify:
        return target_graph_def
    else:
        # transform graph
        if output_names:
            if not input_names:
                input_names = []
            transforms = ["strip_unused_nodes"]
            target_graph_def = TransformGraph(target_graph_def, input_names,
                                              output_names, transforms)
        # write graph_def to model
        tf.io.write_graph(target_graph_def,
                          logdir=args.savepath,
                          name=args.outpbname + '.pb',
                          as_text=False)
        print("Converting done ...")
Exemple #11
0
    def __init__(self, option, src_model_file):
        # Keep in lexicographical order
        self._op_converters = {
            TFOpType.Abs.name: self.convert_elementwise,
            TFOpType.Add.name: self.convert_add,
            TFOpType.AddV2.name: self.convert_add,
            TFOpType.ArgMax.name: self.convert_argmax,
            TFOpType.AvgPool.name: self.convert_pooling,
            TFOpType.BatchMatMul.name: self.convert_matmul,
            TFOpType.BatchMatMulV2.name: self.convert_matmul,
            TFOpType.BatchToSpaceND.name: self.convert_space_batch,
            TFOpType.BiasAdd.name: self.convert_biasadd,
            TFOpType.Cast.name: self.convert_cast,
            TFOpType.ConcatV2.name: self.convert_concat,
            TFOpType.Const.name: self.convert_nop,
            TFOpType.Conv2D.name: self.convert_conv2d,
            TFOpType.Conv2DBackpropInput.name: self.convert_conv2d,
            TFOpType.Cumsum.name: self.convert_cumsum,
            TFOpType.DepthwiseConv2dNative.name: self.convert_conv2d,
            TFOpType.DepthToSpace.name: self.convert_space_depth,
            TFOpType.Div.name: self.convert_elementwise,
            TFOpType.Elu.name: self.convert_activation,
            TFOpType.Equal.name: self.convert_elementwise,
            TFOpType.ExpandDims.name: self.convert_expand_dims,
            TFOpType.ExtractImagePatches.name:
            self.convert_extract_image_patches,
            TFOpType.FakeQuantWithMinMaxVars.name: self.convert_fake_quantize,
            TFOpType.FakeQuantWithMinMaxArgs.name: self.convert_fake_quantize,
            TFOpType.Fill.name: self.convert_fill,
            TFOpType.FloorDiv.name: self.convert_elementwise,
            TFOpType.FusedBatchNorm.name: self.convert_fused_batchnorm,
            TFOpType.FusedBatchNormV2.name: self.convert_fused_batchnorm,
            TFOpType.FusedBatchNormV3.name: self.convert_fused_batchnorm,
            TFOpType.Gather.name: self.convert_gather,
            TFOpType.GatherV2.name: self.convert_gather,
            TFOpType.Identity.name: self.convert_identity,
            TFOpType.LeakyRelu.name: self.convert_activation,
            TFOpType.MatMul.name: self.convert_matmul,
            TFOpType.Max.name: self.convert_reduce,
            TFOpType.Maximum.name: self.convert_elementwise,
            TFOpType.MaxPool.name: self.convert_pooling,
            TFOpType.Mean.name: self.convert_reduce,
            TFOpType.Min.name: self.convert_reduce,
            TFOpType.Minimum.name: self.convert_elementwise,
            TFOpType.MirrorPad.name: self.convert_pad,
            TFOpType.Mul.name: self.convert_elementwise,
            TFOpType.Neg.name: self.convert_elementwise,
            TFOpType.NotEqual.name: self.convert_elementwise,
            TFOpType.OneHot.name: self.convert_one_hot,
            TFOpType.Pack.name: self.convert_stack,
            TFOpType.Pad.name: self.convert_pad,
            TFOpType.PadV2.name: self.convert_pad,
            TFOpType.Placeholder.name: self.convert_nop,
            TFOpType.Pow.name: self.convert_elementwise,
            TFOpType.Prod.name: self.convert_reduce,
            TFOpType.Sub.name: self.convert_elementwise,
            TFOpType.RealDiv.name: self.convert_elementwise,
            TFOpType.SquaredDifference.name: self.convert_elementwise,
            TFOpType.Square.name: self.convert_elementwise,
            TFOpType.Rsqrt.name: self.convert_elementwise,
            TFOpType.Relu.name: self.convert_activation,
            TFOpType.Relu6.name: self.convert_activation,
            TFOpType.Tanh.name: self.convert_activation,
            TFOpType.Reshape.name: self.convert_reshape,
            TFOpType.ResizeBicubic.name: self.convert_resize_bicubic,
            TFOpType.ResizeBilinear.name: self.convert_resize_bilinear,
            TFOpType.ResizeNearestNeighbor.name:
            self.convert_resize_nearest_neighbor,
            TFOpType.ReverseV2.name: self.convert_reverse,
            TFOpType.Select.name: self.convert_select,
            TFOpType.Shape.name: self.convert_shape,
            TFOpType.Sigmoid.name: self.convert_activation,
            TFOpType.Sign.name: self.convert_elementwise,
            TFOpType.Slice.name: self.convert_slice,
            TFOpType.Softmax.name: self.convert_softmax,
            TFOpType.SpaceToBatchND.name: self.convert_space_batch,
            TFOpType.SpaceToDepth.name: self.convert_space_depth,
            TFOpType.Split.name: self.convert_split,
            TFOpType.SplitV.name: self.convert_splitv,
            TFOpType.Sqrt.name: self.convert_elementwise,
            TFOpType.Squeeze.name: self.convert_squeeze,
            TFOpType.Stack.name: self.convert_stack,
            TFOpType.StridedSlice.name: self.convert_stridedslice,
            TFOpType.Sum.name: self.convert_reduce,
            TFOpType.Tile.name: self.convert_tile,
            TFOpType.Transpose.name: self.convert_transpose,
            TFOpType.Unpack.name: self.convert_unstack,
            TFOpType.Unstack.name: self.convert_unstack,
        }
        self._option = option
        self._mace_net_def = mace_pb2.NetDef()
        ConverterUtil.set_filter_format(self._mace_net_def, DataFormat.HWIO)
        ConverterUtil.add_data_format_arg(self._mace_net_def, DataFormat.NHWC)
        ConverterUtil.set_framework_type(self._mace_net_def,
                                         FrameworkType.TENSORFLOW.value)

        # import tensorflow graph
        tf_graph_def = tf.GraphDef()
        with tf.gfile.Open(src_model_file, 'rb') as f:
            tf_graph_def.ParseFromString(f.read())

        self._placeholders = {}
        self._skip_tensor = set()
        self._output_shape = {}

        print("Run transform_graph: %s" % TFTransformGraphOptions)
        try:
            print("output keys: ", option.output_nodes.keys())
            transformed_graph_def = TransformGraph(tf_graph_def,
                                                   option.input_nodes.keys(),
                                                   option.output_nodes.keys(),
                                                   TFTransformGraphOptions)
        except Exception as ex:
            print("Failed to transform graph using tf tool: %s" % ex)
            transformed_graph_def = tf_graph_def

        # To check optimized model, uncomment following code.
        # tf.io.write_graph(
        #     transformed_graph_def,
        #     ".",
        #     os.path.basename(src_model_file)[:-3] + "_opt.pb",
        #     as_text=False
        # )

        self.add_shape_info(transformed_graph_def)

        # reset default graph to clear earlier import
        tf.reset_default_graph()
        with tf.Session() as session:
            with session.graph.as_default() as graph:
                tf.import_graph_def(transformed_graph_def, name='')
                self._tf_graph = graph
                self.update_output_shapes(session)

        # we have polluted graph with 'shape' ops, so reset it and reload it
        # again
        tf.reset_default_graph()
        with tf.Session() as session:
            with session.graph.as_default() as graph:
                tf.import_graph_def(transformed_graph_def, name='')
                self._tf_graph = graph
Exemple #12
0
def main(config):
    print(config)
    # Build Networks
    tf.reset_default_graph()

    photo_ph = tf.placeholder(tf.float32, [1, None, None, 1]) # input grayscale image, normalized by 0~1
    is_training = tf.constant(False) # Always False in testing

    ops = build_networks(config, photo_ph, is_training)
    print(ops)
    tfconfig = tf.ConfigProto()
    tfconfig.gpu_options.allow_growth = True 
    sess = tf.Session(config=tfconfig)
    sess.run(tf.global_variables_initializer())

    # load model
    saver = tf.train.Saver()
    print('Load trained models...')

    if os.path.isdir(config.model):
        checkpoint = tf.train.latest_checkpoint(config.model)
        model_dir = config.model
    else:
        checkpoint = config.model
        model_dir = os.path.dirname(config.model)


    if checkpoint is not None:
        print('Checkpoint', os.path.basename(checkpoint))
        print("[{}] Resuming...".format(time.asctime()))
        saver.restore(sess, checkpoint)
    else:
        raise ValueError('Cannot load model from {}'.format(model_dir))    
    print('Done.')
   
    output_graph="export/normal.pb"
    print(tf.all_variables())
    #with tf.gfile.GFile(output_graph, "wb") as f:
    #    f.write(sess.graph.as_graph_def().SerializeToString())
    #tf.train.write_graph(sess.graph.as_graph_def(), 'export/', 'normaltxt.pbtxt',as_text=True)
  
    input_graph_def = sess.graph.as_graph_def()

    output_nodes_names=[ops['kpts'].op.name,ops['feats'].op.name,ops['scale_maps'].op.name,ops['kpts_scale'].op.name,ops['degree_maps'].op.name,ops['kpts_ori'].op.name]
    output_graph_def = graph_util.convert_variables_to_constants(
            sess, # The session
            input_graph_def, # input_graph_def is useful for retrieving the nodes 
            output_nodes_names  
    )
    # output_graph_def.save("export/frozen2.pb")
    output_graph_name="export/frozen.pb"
    #with tf.gfile.GFile(output_graph_name, "wb") as f:
    #    f.write(output_graph_def.SerializeToString())
    #tf.train.write_graph(output_graph_def, 'export/', 'frozen.pbtxt',as_text=True)



    inp_node = ['Placeholder']
    optimize_graph_def = optimize_for_inference_lib.optimize_for_inference(output_graph_def, inp_node, output_nodes_names,
                                                                tf.float32.as_datatype_enum)
    optimize_graph_def = TransformGraph(optimize_graph_def, inp_node, output_nodes_names, ["sort_by_execution_order"])

    removeUnusedNodesAndAttrs(to_remove, optimize_graph_def)

    output_graph_name="export/optimize.pb"
    with tf.gfile.GFile(output_graph_name, "wb") as f:
        f.write(optimize_graph_def.SerializeToString())
    tf.train.write_graph(optimize_graph_def, "export/", 'optimize.pbtxt', as_text=True)
Exemple #13
0
def save_tf_image(keras_model,
                  odir,
                  model_file='',
                  graph_file='',
                  quantize=False,
                  num_output=1,
                  output_node_prefix='output_node',
                  backend='tf'):
    """
    Helper function to save TF model, see
    https://github.com/amir-abdi/keras_to_tensorflow/blob/master/keras_to_tensorflow.py

    quantize: if set to True, use the quantize feature of Tensorflow
    (https://www.tensorflow.org/performance/quantization)

    num_output: this value has nothing to do with the number of classes, batch_size, etc., 
    and it is mostly equal to 1. If the network is a **multi-stream network** 
    (forked network with multiple outputs), set the value to the number of outputs.

    output_node_prefix: the prefix to use for output nodes
    """
    from keras.models import load_model
    from keras import backend as K
    import tensorflow as tf

    if not os.path.isdir(odir):
        os.mkdir(odir)

    # Load keras model and rename output
    K.set_learning_phase(0)
    if backend == 'theano':
        K.set_image_data_format('channels_first')
    else:
        K.set_image_data_format('channels_last')

    # load keras model
    net_model = load_model(keras_model)

    if not model_file:
        model_file = 'model-%s.pb' % (time.strftime('%Y%m%d', time.gmtime()))

    pred = [None] * num_output
    pred_node_names = [None] * num_output
    for i in range(num_output):
        pred_node_names[i] = output_node_prefix + str(i)
        pred[i] = tf.identity(net_model.outputs[i], name=pred_node_names[i])
    print('output nodes names are: ', pred_node_names)

    # [optional] write graph definition in ascii
    sess = K.get_session()

    if graph_file:
        tf.train.write_graph(sess.graph.as_graph_def(),
                             odir,
                             graph_file,
                             as_text=True)
        print('saved the graph definition in ascii format at: ',
              os.path.join(odir, graph_file))

    # convert variables to constants and save
    from tensorflow.python.framework import graph_util
    from tensorflow.python.framework import graph_io

    if quantize:  # https://www.tensorflow.org/performance/quantization
        from tensorflow.tools.graph_transforms import TransformGraph
        transforms = ["quantize_weights", "quantize_nodes"]
        transformed_graph_def = TransformGraph(sess.graph.as_graph_def(), [],
                                               pred_node_names, transforms)
        constant_graph = graph_util.convert_variables_to_constants(
            sess, transformed_graph_def, pred_node_names)
    else:
        constant_graph = graph_util.convert_variables_to_constants(
            sess, sess.graph.as_graph_def(), pred_node_names)
    graph_io.write_graph(constant_graph, odir, model_file, as_text=False)
    graph_io.write_graph(constant_graph,
                         odir,
                         model_file + 'txt',
                         as_text=True)
    print('saved TF model %s' % os.path.join(odir, model_file))
Exemple #14
0
def append_postprocessing_op(frozen_graph_def, max_detections,
                             max_classes_per_detection, nms_score_threshold,
                             nms_iou_threshold, num_classes, scale_values):
  """Appends postprocessing custom op.

  Args:
    frozen_graph_def: Frozen GraphDef for SSD model after freezing the
      checkpoint
    max_detections: Maximum number of detections (boxes) to show
    max_classes_per_detection: Number of classes to display per detection
    nms_score_threshold: Score threshold used in Non-maximal suppression in
      post-processing
    nms_iou_threshold: Intersection-over-union threshold used in Non-maximal
      suppression in post-processing
    num_classes: number of classes in SSD detector
    scale_values: scale values is a dict with following key-value pairs
      {y_scale: 10, x_scale: 10, h_scale: 5, w_scale: 5} that are used in decode
      centersize boxes

  Returns:
    transformed_graph_def: Frozen GraphDef with postprocessing custom op
    appended
    TFLite_Detection_PostProcess custom op node has four outputs:
    detection_boxes: a float32 tensor of shape [1, num_boxes, 4] with box
    locations
    detection_classes: a float32 tensor of shape [1, num_boxes]
    with class indices
    detection_scores: a float32 tensor of shape [1, num_boxes]
    with class scores
    num_boxes: a float32 tensor of size 1 containing the number of detected
    boxes
  """
  new_output = frozen_graph_def.node.add()
  new_output.op = 'TFLite_Detection_PostProcess'
  new_output.name = 'TFLite_Detection_PostProcess'
  new_output.attr['_output_quantized'].CopyFrom(
      attr_value_pb2.AttrValue(b=True))
  new_output.attr['max_detections'].CopyFrom(
      attr_value_pb2.AttrValue(i=max_detections))
  new_output.attr['max_classes_per_detection'].CopyFrom(
      attr_value_pb2.AttrValue(i=max_classes_per_detection))
  new_output.attr['nms_score_threshold'].CopyFrom(
      attr_value_pb2.AttrValue(f=nms_score_threshold.pop()))
  new_output.attr['nms_iou_threshold'].CopyFrom(
      attr_value_pb2.AttrValue(f=nms_iou_threshold.pop()))
  new_output.attr['num_classes'].CopyFrom(
      attr_value_pb2.AttrValue(i=num_classes))

  new_output.attr['y_scale'].CopyFrom(
      attr_value_pb2.AttrValue(f=scale_values['y_scale'].pop()))
  new_output.attr['x_scale'].CopyFrom(
      attr_value_pb2.AttrValue(f=scale_values['x_scale'].pop()))
  new_output.attr['h_scale'].CopyFrom(
      attr_value_pb2.AttrValue(f=scale_values['h_scale'].pop()))
  new_output.attr['w_scale'].CopyFrom(
      attr_value_pb2.AttrValue(f=scale_values['w_scale'].pop()))

  new_output.input.extend(
      ['raw_outputs/box_encodings', 'raw_outputs/class_predictions', 'anchors'])
  # Transform the graph to append new postprocessing op
  input_names = []
  output_names = ['TFLite_Detection_PostProcess']
  transforms = ['strip_unused_nodes']
  transformed_graph_def = TransformGraph(frozen_graph_def, input_names,
                                         output_names, transforms)
  return transformed_graph_def
    # Save a graph
    graph_def = sess.graph.as_graph_def()

    # Freeze graph. Replaces variables to constants.
    graph_def = tf.graph_util.convert_variables_to_constants(
        sess, graph_def, out_nodes)
    # Optimize graph. Removes training-only ops, unused nodes.
    graph_def = optimize_for_inference_lib.optimize_for_inference(
        graph_def, inp_nodes, out_nodes, dtype.as_datatype_enum)
    # Fuse constant operations.
    transforms = ["fold_constants(ignore_errors=True)"]
    if args.quantize:
        transforms += ["quantize_weights(minimum_size=0)"]
    transforms += ["sort_by_execution_order"]
    graph_def = TransformGraph(graph_def, inp_nodes, out_nodes, transforms)

    # By default, float16 weights are stored in repeated tensor's field called
    # `half_val`. It has type int32 with leading zeros for unused bytes.
    # This type is encoded by Varint that means only 7 bits are used for value
    # representation but the last one is indicated the end of encoding. This way
    # float16 might takes 1 or 2 or 3 bytes depends on value. To impove compression,
    # we replace all `half_val` values to `tensor_content` using only 2 bytes for everyone.
    for node in graph_def.node:
        if 'value' in node.attr:
            halfs = node.attr["value"].tensor.half_val
            if not node.attr["value"].tensor.tensor_content and halfs:
                node.attr["value"].tensor.tensor_content = struct.pack(
                    'H' * len(halfs), *halfs)
                node.attr["value"].tensor.ClearField('half_val')
Exemple #16
0
    def __init__(self, option, src_model_file):
        self._op_converters = {
            TFOpType.Conv2D.name: self.convert_conv2d,
            TFOpType.DepthwiseConv2dNative.name: self.convert_conv2d,
            TFOpType.Conv2DBackpropInput.name: self.convert_conv2d,
            TFOpType.BiasAdd.name: self.convert_biasadd,
            TFOpType.Add.name: self.convert_add,
            TFOpType.Sub.name: self.convert_elementwise,
            TFOpType.Mul.name: self.convert_elementwise,
            TFOpType.Div.name: self.convert_elementwise,
            TFOpType.Min.name: self.convert_elementwise,
            TFOpType.Minimum.name: self.convert_elementwise,
            TFOpType.Max.name: self.convert_elementwise,
            TFOpType.Maximum.name: self.convert_elementwise,
            TFOpType.Neg.name: self.convert_elementwise,
            TFOpType.Abs.name: self.convert_elementwise,
            TFOpType.Pow.name: self.convert_elementwise,
            TFOpType.RealDiv.name: self.convert_elementwise,
            TFOpType.SquaredDifference.name: self.convert_elementwise,
            TFOpType.Square.name: self.convert_elementwise,
            TFOpType.Rsqrt.name: self.convert_elementwise,
            TFOpType.Equal.name: self.convert_elementwise,
            TFOpType.Relu.name: self.convert_activation,
            TFOpType.LeakyRelu.name: self.convert_activation,
            TFOpType.Relu6.name: self.convert_activation,
            TFOpType.Tanh.name: self.convert_activation,
            TFOpType.Sigmoid.name: self.convert_activation,
            TFOpType.Fill.name: self.convert_fill,
            TFOpType.FusedBatchNorm.name: self.convert_fused_batchnorm,
            TFOpType.AvgPool.name: self.convert_pooling,
            TFOpType.MaxPool.name: self.convert_pooling,
            TFOpType.MatMul.name: self.convert_matmul,
            TFOpType.BatchMatMul.name: self.convert_matmul,
            TFOpType.Identity.name: self.convert_identity,
            TFOpType.Reshape.name: self.convert_reshape,
            TFOpType.Shape.name: self.convert_shape,
            TFOpType.ExpandDims.name: self.convert_expand_dims,
            TFOpType.Squeeze.name: self.convert_squeeze,
            TFOpType.Transpose.name: self.convert_transpose,
            TFOpType.Softmax.name: self.convert_softmax,
            TFOpType.ResizeBicubic.name: self.convert_resize_bicubic,
            TFOpType.ResizeBilinear.name: self.convert_resize_bilinear,
            TFOpType.ResizeNearestNeighbor.name:
            self.convert_resize_nearest_neighbor,  # noqa
            TFOpType.Placeholder.name: self.convert_nop,
            TFOpType.SpaceToBatchND.name: self.convert_space_batch,
            TFOpType.BatchToSpaceND.name: self.convert_space_batch,
            TFOpType.DepthToSpace.name: self.convert_space_depth,
            TFOpType.SpaceToDepth.name: self.convert_space_depth,
            TFOpType.Pad.name: self.convert_pad,
            TFOpType.ConcatV2.name: self.convert_concat,
            TFOpType.Mean.name: self.convert_mean,
            TFOpType.Const.name: self.convert_nop,
            TFOpType.Gather.name: self.convert_gather,
            TFOpType.StridedSlice.name: self.convert_stridedslice,
            TFOpType.Slice.name: self.convert_slice,
            TFOpType.ReverseV2.name: self.convert_reverse,
            TFOpType.Pack.name: self.convert_stack,
            TFOpType.Stack.name: self.convert_stack,
            TFOpType.Unpack.name: self.convert_unstack,
            TFOpType.Unstack.name: self.convert_unstack,
            TFOpType.Cast.name: self.convert_cast,
            TFOpType.ArgMax.name: self.convert_argmax,
            TFOpType.Split.name: self.convert_split,
            TFOpType.FakeQuantWithMinMaxVars.name: self.convert_fake_quantize,
            TFOpType.FloorDiv.name: self.convert_elementwise,
            TFOpType.Sqrt.name: self.convert_elementwise,
            TFOpType.MirrorPad.name: self.convert_pad,
        }
        self._option = option
        self._mace_net_def = mace_pb2.NetDef()
        ConverterUtil.set_filter_format(self._mace_net_def, FilterFormat.HWIO)

        # import tensorflow graph
        tf_graph_def = tf.GraphDef()
        with tf.gfile.Open(src_model_file, 'rb') as f:
            tf_graph_def.ParseFromString(f.read())

        self._placeholders = {}

        print("Run transform_graph: %s" %
              TFTransformGraphOptions[option.device])
        try:
            print("output keys: ", option.output_nodes.keys())
            transformed_graph_def = TransformGraph(
                tf_graph_def, option.input_nodes.keys(),
                option.output_nodes.keys(),
                TFTransformGraphOptions[option.device])
        except Exception as ex:
            print("Failed to transform graph using tf tool: %s" % ex)
            transformed_graph_def = tf_graph_def

        # To check optimized model, uncomment following code.
        # tf.io.write_graph(
        #     transformed_graph_def,
        #     ".",
        #     os.path.basename(src_model_file)[:-3] + "_opt.pb",
        #     as_text=False
        # )

        self.add_shape_info(transformed_graph_def)

        with tf.Session() as session:
            with session.graph.as_default() as graph:
                tf.import_graph_def(transformed_graph_def, name='')
                self._tf_graph = graph

        self._skip_tensor = set()
        self._output_shape_list = []
        self._output_shape_op_list = []
Exemple #17
0
# Node with which prefixes should be removed
prefixesToRemove = ('MultipleGridAnchorGenerator/', 'Postprocessor/',
                    'Preprocessor/map')

# Read the graph.
with tf.gfile.FastGFile(args.input, 'rb') as f:
    graph_def = tf.GraphDef()
    graph_def.ParseFromString(f.read())

inpNames = ['image_tensor']
outNames = [
    'num_detections', 'detection_scores', 'detection_boxes',
    'detection_classes'
]
graph_def = TransformGraph(graph_def, inpNames, outNames,
                           ['sort_by_execution_order'])


def getUnconnectedNodes():
    unconnected = []
    for node in graph_def.node:
        unconnected.append(node.name)
        for inp in node.input:
            if inp in unconnected:
                unconnected.remove(inp)
    return unconnected


# Detect unfused batch normalization nodes and fuse them.
def fuse_batch_normalization():
    # Add_0 <-- moving_variance, add_y