def main(_):
    args = parse_args()

    saved_models = sorted(glob(os.path.join(args.saved_model_dir, '*')))

    assert len(saved_models) > 0

    with tf.Session() as sess:
        tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], saved_models[-1])

        graph = tf.get_default_graph()

        convert_variables_to_constants = tf.graph_util.convert_variables_to_constants

        output_graph_def = convert_variables_to_constants(
            sess,
            graph.as_graph_def(),
            ["transferred"])

        input_names = ["images"]
        output_names = ["transferred"]

        transforms = ["strip_unused_nodes", "fold_batch_norms", "fold_constants", "quantize_weights"]

        transformed_graph_def = TransformGraph(
            output_graph_def,
            input_names,
            output_names,
            transforms)

        with tf.gfile.GFile(args.output_path, "wb") as f:
            f.write(transformed_graph_def.SerializeToString())
Esempio n. 2
0
def save_as_pb(file_path,
               sess,
               input_node_names,
               output_node_names,
               as_text=False):
    from google.protobuf import text_format
    from tensorflow.python.framework import graph_util
    from tensorflow.tools.graph_transforms import TransformGraph

    graph_def = tf.get_default_graph().as_graph_def()
    output_graph_def = graph_convert_variables_to_constants(
        sess, graph_def, output_node_names)

    transforms = [
        "fold_constants(ignore_errors=true)", "fold_batch_norms",
        "fold_old_batch_norms"
    ]

    output_graph_def = TransformGraph(output_graph_def, input_node_names,
                                      output_node_names, transforms)

    with tf.gfile.GFile(file_path, "wb") as f:
        if as_text:
            f.write(text_format.MessageToString(output_graph_def))
        else:
            f.write(output_graph_def.SerializeToString())
    return output_graph_def
Esempio n. 3
0
def quantization(opt_graph, model_path, tf_records, eval_min_max_every_epoch):
    # first_quantize
    rewriter = quantize_graph.GraphRewriter(opt_graph, 'eightbit', None, None,
                                            True)
    first_quantize_graph = rewriter.rewrite(["policy_output", "value_output"])

    if eval_min_max_every_epoch:
        # insert_min_max_log
        transform = 'insert_logging(op=RequantizationRange, show_name=true, message="__requant_min_max:")'
        log_graph = TransformGraph(first_quantize_graph, ["pos_tensor"],
                                   ["policy_output", "value_output"],
                                   [transform])
        with tf.gfile.FastGFile(model_path + '_for_min_max.pb', 'wb') as f:
            f.write(log_graph.SerializeToString())

        # generate_min_max_log
        with logged_timer('minmax time'):
            generate_min_max_log(model_path + '_for_min_max.pb', tf_records,
                                 model_path + 'log.txt')

    # apply_calibration
    transform = 'freeze_requantization_ranges(min_max_log_file="{0}")'.format(
        model_path + 'log.txt')
    calibration_graph = TransformGraph(first_quantize_graph, ["pos_tensor"],
                                       ["policy_output", "value_output"],
                                       [transform])

    # fuse_requantize
    transform = 'fuse_quantized_conv_and_requantize strip_unused_nodes'
    output_graph = TransformGraph(calibration_graph, ["pos_tensor"],
                                  ["policy_output", "value_output"],
                                  [transform])
    return output_graph
Esempio n. 4
0
def prepare_for_dnn(sess,
                    graph_def,
                    in_node,
                    out_node,
                    out_graph,
                    dtype,
                    optimize=True,
                    quantize=False):
    # Freeze graph. Replaces variables to constants.
    graph_def = tf.graph_util.convert_variables_to_constants(
        sess, graph_def, [out_node])
    if optimize:
        # Optimize graph. Removes training-only ops, unused nodes.
        graph_def = optimize_for_inference_lib.optimize_for_inference(
            graph_def, [in_node], [out_node], dtype.as_datatype_enum)
        # Fuse constant operations.
        transforms = ["fold_constants(ignore_errors=True)"]
        if quantize:
            transforms += ["quantize_weights(minimum_size=0)"]
        transforms += ["sort_by_execution_order"]
        graph_def = TransformGraph(graph_def, [in_node], [out_node],
                                   transforms)
    # Serialize
    with tf.gfile.FastGFile(out_graph, 'wb') as f:
        f.write(graph_def.SerializeToString())
Esempio n. 5
0
def export_model(input_node_names, output_node_name):
    freeze_graph.freeze_graph(OUTPUT_DIR + '/' + MODEL_NAME + '.pbtxt', None,
                              False, OUTPUT_DIR + '/' + MODEL_NAME + '.chkp',
                              output_node_name, "save/restore_all",
                              "save/Const:0",
                              OUTPUT_DIR + '/frozen_' + MODEL_NAME + '.pb',
                              True, "")

    input_graph_def = tf.GraphDef()

    with tf.gfile.Open(OUTPUT_DIR + '/frozen_' + MODEL_NAME + '.pb',
                       "rb") as f2:
        input_graph_def.ParseFromString(f2.read())
    transfroms = ['quantize_weights']
    to_out = TransformGraph(input_graph_def, "input", output_node_name,
                            transfroms)

    with tf.gfile.FastGFile(OUTPUT_DIR + '/trans_' + MODEL_NAME + '.pb',
                            "wb") as f3:
        f3.write(to_out.SerializeToString())
    print("transform succeed")

    with tf.gfile.Open(OUTPUT_DIR + '/trans_' + MODEL_NAME + '.pb', "rb") as f:
        input_graph_def.ParseFromString(f.read())

    output_graph_def = optimize_for_inference_lib.optimize_for_inference(
        input_graph_def, input_node_names, [output_node_name],
        tf.float32.as_datatype_enum)

    with tf.gfile.FastGFile(OUTPUT_DIR + '/opt_' + MODEL_NAME + '.pb',
                            "wb") as f:
        f.write(output_graph_def.SerializeToString())

    print("graph saved!")
def convert_to_pb(model,
                  path,
                  input_layer_name,
                  output_layer_name,
                  pbfilename,
                  verbose=False):

    model.load(path, weights_only=True)
    print("[INFO] Loaded CNN network weights from " + path + " ...")

    print("[INFO] Re-export model ...")
    del tf.get_collection_ref(tf.GraphKeys.TRAIN_OPS)[:]
    model.save("model-tmp.tfl")

    # taken from: https://stackoverflow.com/questions/34343259/is-there-an-example-on-how-to-generate-protobuf-files-holding-trained-tensorflow

    print("[INFO] Re-import model ...")

    input_checkpoint = "model-tmp.tfl"
    saver = tf.train.import_meta_graph(input_checkpoint + '.meta', True)
    sess = tf.Session()
    saver.restore(sess, input_checkpoint)

    # print out all layers to find name of output

    if (verbose):
        op = sess.graph.get_operations()
        [print(m.values()) for m in op][1]

    print("[INFO] Freeze model to " + pbfilename + " ...")

    # freeze and removes nodes which are not related to feedforward prediction

    minimal_graph = convert_variables_to_constants(sess,
                                                   sess.graph.as_graph_def(),
                                                   [output_layer_name])

    graph_def = optimize_for_inference_lib.optimize_for_inference(
        minimal_graph, [input_layer_name], [output_layer_name],
        tf.float32.as_datatype_enum)
    graph_def = TransformGraph(graph_def, [input_layer_name],
                               [output_layer_name],
                               ["sort_by_execution_order"])

    with tf.gfile.GFile(pbfilename, 'wb') as f:
        f.write(graph_def.SerializeToString())

    # write model to logs dir so we can visualize it as:
    # tensorboard --logdir="logs"

    if (verbose):
        writer = tf.summary.FileWriter('logs', graph_def)
        writer.close()

    # tidy up tmp files

    for f in glob.glob("model-tmp.tfl*"):
        os.remove(f)

    os.remove('checkpoint')
Esempio n. 7
0
def freeze_graph(input_checkpoint, output_graph):
    output_node_names = "input_dataset/input_dataset_x,fc2/add"  #输出的节点
    input_node_names = 'input_dataset/input_dataset_x'  #输入的节点
    saver = tf.train.import_meta_graph(input_checkpoint + '.meta',
                                       clear_devices=True)
    graph = tf.get_default_graph()  # 获得默认的图
    input_graph_def = graph.as_graph_def()  # 返回一个序列化的图代表当前的图

    with tf.Session() as sess:
        saver.restore(sess, input_checkpoint)  # 恢复图并得到数据

        # 输出所有Layer的tensor
        # op = sess.graph.get_operations()
        # [print(m.values()) for m in op][1]

        output_graph_def = tf.graph_util.convert_variables_to_constants(  # 模型持久化,将变量值固定
            sess=sess,
            input_graph_def=input_graph_def,  # 等于:sess.graph_def
            output_node_names=output_node_names.split(","))  # 如果有多个输出节点,以逗号隔开

        # 压缩Graph,只保留输入输出
        output_graph_def = optimize_for_inference_lib.optimize_for_inference(
            output_graph_def, [input_node_names], output_node_names.split(","),
            tf.float32.as_datatype_enum)
        output_graph_def = TransformGraph(output_graph_def, [input_node_names],
                                          output_node_names.split(","),
                                          ["sort_by_execution_order"])

        with tf.gfile.GFile(output_graph, "wb") as f:  # 保存模型
            f.write(output_graph_def.SerializeToString())  # 序列化输出
Esempio n. 8
0
def save(sess):
    print(tf.all_variables())
    input_graph_def = sess.graph.as_graph_def()

    #for op in input_graph_def.node:
    #     print(op.name)

    output_nodes_names=["init_26"]
    output_graph_def = graph_util.convert_variables_to_constants(
            sess, # The session
            input_graph_def, # input_graph_def is useful for retrieving the nodes 
            output_nodes_names  
    )

    output_graph_name="freeze.pb"
    with tf.gfile.GFile(output_graph_name, "wb") as f:
        f.write(output_graph_def.SerializeToString())

    inp_node = ['Placeholder']
    optimize_graph_def = optimize_for_inference_lib.optimize_for_inference(output_graph_def, [], output_nodes_names,
                                                               tf.float32.as_datatype_enum)
    print("!")
    optimize_graph_def = TransformGraph(optimize_graph_def, inp_node, output_nodes_names, ["sort_by_execution_order"])

    output_graph_name="optimize.pb"
    with tf.gfile.GFile(output_graph_name, "wb") as f:
        f.write(optimize_graph_def.SerializeToString())
Esempio n. 9
0
def optimizePb(pbFilePath):
    from tensorflow.python.tools import optimize_for_inference_lib
    from tensorflow.tools.graph_transforms import TransformGraph

    with tf.gfile.FastGFile(pbFilePath, 'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
        graph_def = optimize_for_inference_lib.optimize_for_inference(graph_def, ['Placeholder'], ['final_result'], tf.float32.as_datatype_enum)
        graph_def = TransformGraph(graph_def, ['module_apply_default/hub_input/Sub'], ['final_result'], ['remove_nodes(op=PlaceholderWithDefault)', 'strip_unused_nodes(type=float, shape=\"1,224,224,3\")', 'sort_by_execution_order'])
        with tf.gfile.FastGFile('./inference_graph.pb', 'wb') as f:
            f.write(graph_def.SerializeToString())
Esempio n. 10
0
def export(ARGS):
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    SCALE = ARGS['SCALE']

    print("\nStart exporting the model...\n")
    with tf.Session(config=config) as sess:
        ckpt_name = ARGS["CKPT"] + ".meta"
        saver = tf.train.import_meta_graph(ckpt_name)
        saver.restore(sess, tf.train.latest_checkpoint(ARGS["CKPT_dir"]))

        graph_def = sess.graph.as_graph_def()
        graph_def = tf.graph_util.convert_variables_to_constants(sess, graph_def, ['NHWC_output'])
        graph_def = optimize_for_inference_lib.optimize_for_inference(graph_def, ["IteratorGetNext"],
                                                                      ["NHWC_output"],
                                                                      tf.float32.as_datatype_enum)
        graph_def = TransformGraph(graph_def, ["IteratorGetNext"], ["NHWC_output"], ["sort_by_execution_order"])

        filename = './frozen-pb/frozen_ESPCN_graph_x' + str(SCALE) + '.pb'
        with tf.gfile.FastGFile(filename, 'wb') as f:
            f.write(graph_def.SerializeToString())

        tf.train.write_graph(graph_def, ".", './frozen-pb/train.pbtxt')

        #SAVE NCHW
        graph_def = sess.graph.as_graph_def()
        graph_def = tf.graph_util.convert_variables_to_constants(sess, graph_def, ['NCHW_output'])
        graph_def = optimize_for_inference_lib.optimize_for_inference(graph_def, ["IteratorGetNext"],
                                                                      ["NCHW_output"],
                                                                      tf.float32.as_datatype_enum)
        graph_def = TransformGraph(graph_def, ["IteratorGetNext"], ["NCHW_output"], ["sort_by_execution_order"])
        filename = './frozen-pb/nchw_frozen_ESPCN_graph_x' + str(SCALE) + '.pb'
        with tf.gfile.FastGFile(filename, 'wb') as f:
            f.write(graph_def.SerializeToString())

        tf.train.write_graph(graph_def, ".", './frozen-pb/nchw_train.pbtxt')

    print("\nExporting done!\n")
Esempio n. 11
0
def optimize_model(model_path):
    inputGraph = tf.GraphDef()
    with tf.gfile.Open(model_path, 'rb') as model:
        data2read = model.read()
        inputGraph.ParseFromString(data2read)

    with tf.variable_scope('lanenet'):
        input_tensor = tf.placeholder(dtype=tf.float32, shape=[1, 256, 512, 3], name='input_tensor')

    net = lanenet.LaneNet(phase='test', net_flag='vgg')
    binary_seg_ret, instance_seg_ret = net.inference(input_tensor=input_tensor, name='lanenet_model')

    with tf.variable_scope('lanenet/'):
        binary_seg_ret = tf.cast(binary_seg_ret, dtype=tf.float32)
        binary_seg_ret = tf.identity(binary_seg_ret, name= 'final_binary_output')
        instance_seg_ret = tf.identity(instance_seg_ret, name='final_pixel_embedding_output')
        # binary_seg_ret = tf.squeeze(binary_seg_ret, axis=0, name='final_binary_output')  # 删除所有大小是 1 的维度
        # instance_seg_ret = tf.squeeze(instance_seg_ret, axis=0, name='final_pixel_embedding_output')


    """outputGraph = optimize_for_inference_lib.optimize_for_inference(
        inputGraph,
        input_node_names=['lanenet/input_tensor'],
        output_node_names=[
                'lanenet/final_binary_output',
                'lanenet/final_pixel_embedding_output'],
        placeholder_type_enum=tf.int32.as_datatype_enum
    )"""

    outputGraph = TransformGraph(
        inputGraph,
        ['lanenet/input_tensor'],
        ['lanenet/final_binary_output',
         'lanenet/final_pixel_embedding_output'],
        ['remove_nodes(op=Identity, op=CheckNumerics)',
         'merge_duplicate_nodes',
         'strip_unused_nodes',
         'fold_constants(ignore_errors=true)',
         'fold_batch_norms',
         'fold_old_batch_norms',
         'quantize_weights',
         'quantize_nodes',
         'sort_by_execution_order']
    )

    new_name = model_path.split('/')[-2] + '/OptimizedGraph.pb'
    model = tf.gfile.FastGFile(new_name, 'w')
    model.write(outputGraph.SerializeToString())
Esempio n. 12
0
    def _transform_graph(self, in_graph, out_graph, transforms):
        """Transforms input graph.

        :param in_graph: input graph file or graphDef.
        :param out_graph: output graph file or graphDef.
        :param transforms: list of transforms.

        :return:
        """
        in_graph_def = in_graph if isinstance(
            in_graph, tf.compat.v1.GraphDef) else self._read_graph(in_graph)
        out_graph_def = TransformGraph(in_graph_def, self.inputs, self.outputs,
                                       transforms)
        if out_graph and not isinstance(out_graph, tf.compat.v1.GraphDef):
            f = gfile.GFile(out_graph, 'wb')
            f.write(out_graph_def.SerializeToString())
        return out_graph_def
Esempio n. 13
0
def optimizing(graph_def):
    #graph = 'trying/trying_model.pb'
    # with tf.gfile.FastGFile(graph, 'rb') as f:
    # 	graph_def = tf.GraphDef()
    # 	graph_def.ParseFromString(f.read())
    # 	tf.summary.FileWriter('logs', graph_def)

    inp_node = 'input_1'
    out_node = 'dense_3/Softmax'
    graph_def = optimize_for_inference_lib.optimize_for_inference(
        graph_def, [inp_node], [out_node], tf.float32.as_datatype_enum)
    graph_def = TransformGraph(graph_def, [inp_node], [out_node],
                               ["sort_by_execution_order"])

    with tf.gfile.FastGFile(
            os.path.join(save_dir, 'frozen_inference_graph_opt.pb'),
            'wb') as f:
        f.write(graph_def.SerializeToString())
Esempio n. 14
0
def optimizeGraph(pathTemp, input_nodes, output_nodes):
    # freeze graph
    input_graph_path = "{}/".format(pathTemp) + 'myFinalGraph.pbtxt'
    checkpoint_path = "{}/".format(pathTemp) + 'myFinalModel.ckpt'
    input_saver_def_path = ""
    input_binary = False
    restore_op_name = "save/restore_all"
    filename_tensor_name = "save/Const:0"
    output_frozen_graph_name = "{}/".format(pathTemp) + 'frozenModel.pb'
    # output_optimized_graph_name = 'optimized_'+MODEL_NAME+'.pb'
    clear_devices = True
    output_optimized_graph_name = "{}/".format(pathTemp) + 'optimizedModel.pb'
    output_transformed_graph_name = "{}/".format(
        pathTemp) + 'transformedModel.pb'

    freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
                              input_binary, checkpoint_path, output_nodes[0],
                              restore_op_name, filename_tensor_name,
                              output_frozen_graph_name, clear_devices, "")
    # Optimize for inference
    with tf.gfile.Open(output_frozen_graph_name, "rb") as f:
        input_graph_def = tf.GraphDef()
        input_graph_def.ParseFromString(f.read())

    output_graph_def = optimize_for_inference_lib.optimize_for_inference(
        input_graph_def,
        input_nodes,  # an array of the input node(s)
        output_nodes,  # an array of output nodes
        tf.float32.as_datatype_enum)
    # Save the optimized graph
    f = tf.gfile.FastGFile(output_optimized_graph_name, "wb")
    f.write(output_graph_def.SerializeToString())

    #graph transform
    with tf.gfile.Open(output_optimized_graph_name, "rb") as f:
        input_graph_def = tf.GraphDef()
        input_graph_def.ParseFromString(f.read())
    transforms = ["strip_unused_nodes"]
    transformed_graph_def = TransformGraph(input_graph_def, input_nodes,
                                           output_nodes, transforms)
    # Save the transformed graph
    f = tf.gfile.FastGFile(output_transformed_graph_name, "wb")
    f.write(transformed_graph_def.SerializeToString())
    return
    def export(self):

        if not os.path.exists("./models"):
            os.makedirs("./models")

        print("Exporting model...")

        graph = tf.get_default_graph()
        with graph.as_default():
            with tf.Session(config=self.config) as sess:

                ### Restore checkpoint
                ckpt_name = self.ckpt_path + "fsrcnn_ckpt" + ".meta"
                saver = tf.train.import_meta_graph(ckpt_name)
                saver.restore(sess, tf.train.latest_checkpoint(self.ckpt_path))

                # Return a serialized GraphDef representation of this graph
                graph_def = sess.graph.as_graph_def()

                # All variables to constants
                graph_def = tf.graph_util.convert_variables_to_constants(
                    sess, graph_def, ['NCHW_output'])

                # Optimize for inference
                graph_def = optimize_for_inference_lib.optimize_for_inference(
                    graph_def,
                    ["IteratorGetNext"],
                    ["NCHW_output"],  # ["NHWC_output"],
                    tf.float32.as_datatype_enum)

                graph_def = TransformGraph(graph_def, ["IteratorGetNext"],
                                           ["NCHW_output"],
                                           ["sort_by_execution_order"])

                pb_filename = "./models/FSRCNN_x{}.pb".format(self.scale)
                if self.smallFlag:
                    pb_filename = "./models/FSRCNN-small_x{}.pb".format(
                        self.scale)

                with tf.gfile.FastGFile(pb_filename, 'wb') as f:
                    f.write(graph_def.SerializeToString())

                tf.train.write_graph(graph_def, ".", 'train.pbtxt')
Esempio n. 16
0
def freeze_and_quantize(sess, model, fpath):
    out_name = model.output.name.split(':')[0]
    frozen_graph_def = convert_variables_to_constants(sess, sess.graph_def,
                                                      [out_name])

    frozen_graph_def = freeze_models(sess, out_name, fpath + '.pb')

    inputs = [model.input.name.split(':')[0]]
    outputs = [out_name]
    # quantize_weights quantize_nodes
    # don't quantize mobilenet https://stackoverflow.com/questions/44832492/tensorflow-ssd-mobilenet-model-accuracy-drop-after-quantization-using-transform
    transforms = 'add_default_attributes strip_unused_nodes(type=float, shape="1,224,224,3") ' \
                 'remove_nodes(op=Identity, op=CheckNumerics) fold_constants(ignore_errors=true) ' \
                 'fold_batch_norms fold_old_batch_norms  ' \
                 'strip_unused_nodes sort_by_execution_order'.split()
    results = TransformGraph(frozen_graph_def, inputs, outputs, transforms)

    with tf.gfile.GFile('./tmp/' + fpath + '-opt.pb', 'wb') as f:
        f.write(results.SerializeToString())
    return results
Esempio n. 17
0
def export_pb(session, output, inputs, outputs):
    with tf.gfile.GFile(output, "wb") as f:
        graph_def = session.graph.as_graph_def(add_shapes=True)
        graph_def = _remove_assert(graph_def.node)
        graph_def = tf.graph_util.convert_variables_to_constants(
            session, graph_def, outputs)
        graph_def = TransformGraph(
            graph_def,
            inputs,
            outputs,
            [
                "remove_nodes(op=Identity, op=CheckNumerics, op=StopGradient)",
                "sort_by_execution_order",  # sort by execution order after each transform to ensure correct node ordering
                "remove_device",
                "sort_by_execution_order",
                "fold_batch_norms",
                "sort_by_execution_order",
                "fold_old_batch_norms",
                "sort_by_execution_order"
            ])
        f.write(graph_def.SerializeToString())
Esempio n. 18
0
def transform_graph(input_graph, output_graph=None):
    """ Use to run different transform function on the input graph and generate a output graph. """
    if isinstance(input_graph, graph_pb2.GraphDef):
        graph_def = input_graph
    else:
        graph_def = load_graph(input_graph)

    new_graph_def = TransformGraph(
        graph_def, ['input_placeholder/input'], ['outputs_'], [
            'strip_unused_nodes(type=float, shape="1,28,28,1")',
            'remove_nodes(op=Identity, op=CheckNumerics, op=Switch)',
            'fold_constants(ignore_errors=true)', 'fold_batch_norms',
            'fold_old_batch_norms', 'sort_by_execution_order'
        ])

    if output_graph == None:
        return new_graph_def

    # save new graph
    with tf.gfile.GFile(output_graph, "wb") as f:
        f.write(new_graph_def.SerializeToString())
Esempio n. 19
0
def transform_graph(input_graph, output_graph=None):
    """ Use to run different transform function on the input graph and generate a output graph. """
    if isinstance(input_graph, graph_pb2.GraphDef):
        graph_def = input_graph
    else:
        graph_def = load_graph(input_graph)

    # the block to remove Switch nodes from the graph
    for i in reversed(range(len(graph_def.node))):
        op = graph_def.node[i].op
        name = graph_def.node[i].name

        if op == 'Switch' or op == 'Merge':
            inp = graph_def.node[i].input[0]
            for node in graph_def.node:
                for j in range(len(node.input)):
                    if name == node.input[j]:
                        node.input[j] = inp
            del graph_def.node[i]

    new_graph_def = TransformGraph(graph_def, [
        'Webcam/fifo_queue_DequeueMany:0', 'Webcam/fifo_queue_DequeueMany:1',
        'Webcam/fifo_queue_DequeueMany:2'
    ], [
        'hourglass/hg_2/after/hmap/conv/BiasAdd:0', 'upscale/Mean',
        'radius/out/fc/BiasAdd:0'
    ], [
        'strip_unused_nodes(type=float, shape="1,36,60,1")',
        'remove_nodes(op=Identity, op=CheckNumerics, op=Switch)',
        'fold_constants(ignore_errors=true)', 'fold_batch_norms',
        'fold_old_batch_norms', 'sort_by_execution_order'
    ])

    if output_graph == None:
        return new_graph_def

    # save new graph
    with tf.gfile.GFile(output_graph, "wb") as f:
        f.write(new_graph_def.SerializeToString())
Esempio n. 20
0
def export(ARGS):
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    print("\nStart exporting the model...\n")
    with tf.Session(config=config) as sess:
        ckpt_name = ARGS["CKPT"] + ".meta"
        saver = tf.train.import_meta_graph(ckpt_name)
        saver.restore(sess, tf.train.latest_checkpoint(ARGS["CKPT_dir"]))

        # SAVE NCHW
        graph_def = sess.graph.as_graph_def()

        inputs = ['IteratorGetNext']

        if ARGS['SCALE'] == 4:
            outputs = ['NCHW_output', 'NCHW_output_2x', 'NCHW_output_4x']
        elif ARGS['SCALE'] == 8:
            outputs = [
                'NCHW_output', 'NCHW_output_2x', 'NCHW_output_4x',
                'NCHW_output_8x'
            ]
        else:
            outputs = ['NCHW_output']
        type = ["DT_FLOAT"]
        graph_def = tf.graph_util.convert_variables_to_constants(
            sess, graph_def, outputs)
        graph_def = optimize_for_inference_lib.optimize_for_inference(
            graph_def, inputs, outputs, type)
        graph_def = TransformGraph(graph_def, inputs, outputs,
                                   ['sort_by_execution_order'])

        filename = "export/LapSRN_x" + str(ARGS['SCALE']) + ".pb"
        with tf.gfile.FastGFile(filename, 'wb') as f:
            f.write(graph_def.SerializeToString())

    print("\nExporting done!\n")
Esempio n. 21
0
# Transform it
input_names = ['Placeholder']
# output_names = ['u_net/conv2d/Reshape_1']
output_names = ['dilated_cnn/conv2d_1/Reshape_1']
transforms = [
    'strip_unused_nodes',
    'remove_nodes(op=Identity, op=CheckNumerics)',
    'fold_constants(ignore_errors=true)',
    'fold_batch_norms',
    'fold_old_batch_norms',
    # 'quantize_weights',
    # 'quantize_nodes'
]

G_opt = TransformGraph(graph, input_names, output_names, transforms)

# Write it to disk
with tf.gfile.GFile('./models/keras_opt_model.pb', "wb") as f:
    f.write(G_opt.SerializeToString())

#Compare the number of operation before and after
graph = load_graph('./models/keras_frozen_model.pb')
print(len(graph.get_operations()))
# for op in graph.get_operations():
#    print(op.name)

graph = load_graph('./models/keras_opt_model.pb')
print(len(graph.get_operations()))
# for op in graph.get_operations():
#    print(op.name)
Esempio n. 22
0
            if node.name == inp_node:
                del node.attr['shape']

    tf.import_graph_def(graph_def, name='')

    inputData = gen_data(tf.placeholder(tf.float32, [1, 4, 5, 3], inp_node))
    outputData = localSession.run(
        localSession.graph.get_tensor_by_name(out_node + ':0'),
        feed_dict={inp_node + ':0': inputData})
    writeBlob(inputData, 'slim_batch_norm_in')
    writeBlob(outputData, 'slim_batch_norm_out')

    graph_def = TransformGraph(graph_def, [inp_node], [out_node],
                               ['fold_constants', 'strip_unused_nodes'])
    with tf.gfile.FastGFile('slim_batch_norm_net.pb', 'wb') as f:
        f.write(graph_def.SerializeToString())

################################################################################
# issue https://github.com/opencv/opencv/issues/13839
inp_node = 'PNet/conv3/add'
out_node = 'PNet/cls_prob'
with tf.Session(graph=tf.Graph()) as localSession:
    localSession.graph.as_default()

    with tf.gfile.FastGFile('PNet_pnet.pb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
        graph_def = TransformGraph(graph_def, [inp_node], [out_node],
                                   ['strip_unused_nodes'])

    tf.import_graph_def(graph_def, name='')
Esempio n. 23
0
        ckpt_path = os.path.join(args.ckpt_dir,
                                 "ckpt-{iter}".format(iter=args.ckpt_iter))

    with tf.Session() as sess:
        inputs = tf.placeholder(name="inputs",
                                dtype=tf.float32,
                                shape=[1, 256, 256, 3])
        outputs = tf.identity(generator(inputs), name="outputs")

        generator_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                           "generator/")
        saver = tf.train.Saver(var_list=generator_vars)
        saver.restore(sess, ckpt_path)

        convert_variables_to_constants = tf.graph_util.convert_variables_to_constants
        output_graph_def = convert_variables_to_constants(
            sess,
            tf.get_default_graph().as_graph_def(), ["outputs"])

        input_names = ["inputs"]
        output_names = ["outputs"]
        transforms = [
            "strip_unused_nodes", "fold_batch_norms", "fold_constants",
            "quantize_weights"
        ]
        transformed_graph_def = TransformGraph(output_graph_def, input_names,
                                               output_names, transforms)

        with tf.gfile.GFile(args.output_graph, "wb") as f:
            f.write(transformed_graph_def.SerializeToString())
Esempio n. 24
0
    def export(self, quant):
        print("Exporting model...")

        export_dir = "./models/"
        if not os.path.exists(export_dir):
            os.makedirs(export_dir)

        export_file = "EDSRorig_x{}.pb".format(self.scale)

        graph = tf.get_default_graph()
        with graph.as_default():
            with tf.Session(config=self.config) as sess:

                ### Restore checkpoint
                ckpt_name = self.ckpt_path + "edsr_ckpt" + ".meta"
                saver = tf.train.import_meta_graph(ckpt_name)
                saver.restore(sess, tf.train.latest_checkpoint(self.ckpt_path))

                # Return a serialized GraphDef representation of this graph
                graph_def = sess.graph.as_graph_def()

                # All variables to constants
                graph_def = tf.graph_util.convert_variables_to_constants(
                    sess, graph_def, ['NCHW_output'])

                # Optimize for inference
                graph_def = optimize_for_inference_lib.optimize_for_inference(
                    graph_def,
                    ["IteratorGetNext"],
                    ["NCHW_output"],  # ["NHWC_output"],
                    tf.float32.as_datatype_enum)

                # Implement certain file shrinking transforms. 2 is recommended.
                transforms = ["sort_by_execution_order"]
                if quant == 1:
                    print("Rounding weights for export.")
                    transforms = ["sort_by_execution_order", "round_weights"]
                    export_file = "EDSR_x{}_q1.pb".format(self.scale)
                if quant == 2:
                    print("Quantizing for export.")
                    transforms = [
                        "sort_by_execution_order", "quantize_weights"
                    ]
                    export_file = "EDSR_x{}.pb".format(self.scale)
                if quant == 3:
                    print("Round weights and quantizing for export.")
                    transforms = [
                        "sort_by_execution_order", "round_weights",
                        "quantize_weights"
                    ]
                    export_file = "EDSR_x{}_q3.pb".format(self.scale)

                graph_def = TransformGraph(graph_def, ["IteratorGetNext"],
                                           ["NCHW_output"], transforms)

                print("Exported file = {}".format(export_dir + export_file))
                with tf.gfile.GFile(export_dir + export_file, 'wb') as f:
                    f.write(graph_def.SerializeToString())

                tf.train.write_graph(graph_def, ".", 'train.pbtxt')

        sess.close()
Esempio n. 25
0
def main(config):

    # Build Networks
    tf.reset_default_graph()

    photo_ph = tf.placeholder(
        tf.float32,
        [1, None, None, 1])  # input grayscale image, normalized by 0~1
    is_training = tf.constant(False)  # Always False in testing

    ops = build_networks(config, photo_ph, is_training)

    tfconfig = tf.ConfigProto()
    tfconfig.gpu_options.allow_growth = True
    sess = tf.Session(config=tfconfig)
    sess.run(tf.global_variables_initializer())

    # load model
    saver = tf.train.Saver()
    print('Load trained models...')

    if os.path.isdir(config.model):
        checkpoint = tf.train.latest_checkpoint(config.model)
        model_dir = config.model
    else:
        checkpoint = config.model
        model_dir = os.path.dirname(config.model)

    if checkpoint is not None:
        print('Checkpoint', os.path.basename(checkpoint))
        print("[{}] Resuming...".format(time.asctime()))
        saver.restore(sess, checkpoint)
    else:
        raise ValueError('Cannot load model from {}'.format(model_dir))
    print('Done.')

    output_graph = "export/normal.pb"
    print(tf.all_variables())
    with tf.gfile.GFile(output_graph, "wb") as f:
        f.write(sess.graph.as_graph_def().SerializeToString())
    tf.train.write_graph(sess.graph.as_graph_def(),
                         'export/',
                         'normaltxt.pbtxt',
                         as_text=True)

    input_graph_def = sess.graph.as_graph_def()

    output_nodes_names = [
        ops['kpts'].op.name, ops['feats'].op.name, ops['scale_maps'].op.name,
        ops['kpts_scale'].op.name, ops['degree_maps'].op.name,
        ops['kpts_ori'].op.name
    ]
    output_graph_def = graph_util.convert_variables_to_constants(
        sess,  # The session
        input_graph_def,  # input_graph_def is useful for retrieving the nodes 
        output_nodes_names)
    output_graph_name = "export/frozen.pb"
    with tf.gfile.GFile(output_graph_name, "wb") as f:
        f.write(output_graph_def.SerializeToString())
    tf.train.write_graph(output_graph_def,
                         'export/',
                         'frozen.pbtxt',
                         as_text=True)

    inp_node = ['Placeholder']
    optimize_graph_def = optimize_for_inference_lib.optimize_for_inference(
        output_graph_def, inp_node, output_nodes_names,
        tf.float32.as_datatype_enum)
    optimize_graph_def = TransformGraph(optimize_graph_def, inp_node,
                                        output_nodes_names,
                                        ["sort_by_execution_order"])
    output_graph_name = "export/optimize.pb"
    with tf.gfile.GFile(output_graph_name, "wb") as f:
        f.write(optimize_graph_def.SerializeToString())
    tf.train.write_graph(optimize_graph_def,
                         "export/",
                         'optimize.pbtxt',
                         as_text=True)
Esempio n. 26
0
 def export(self, ckptname):
     shape = (self.input_size, self.input_size)
     with tf.Graph().as_default():
         in_node_name = "img_path"
         img_path = tf.placeholder(tf.string, name=in_node_name)
         # NOTE: decode_jpeg supports png
         x = tf.cast(
             tf.image.resize_images(
                 tf.expand_dims(
                     tf.image.decode_jpeg(tf.read_file(img_path),
                                          channels=3), 0), shape),
             tf.float32)
         x = (x - tf.constant([[[MEAN]]])) / tf.constant(
             [[[STD]]])  # [[[]]] for [n, c, h, w]
         # Hope graph optimization tool may fuse these ops.
         # NOTE: tf.image.resize_images does expand_dims on ndims==3 images and squeeze
         #       back; thus expand_dims first so resize_image would do less things.
         net = Net(x,
                   alpha=self.alpha,
                   input_size=(self.input_size, self.input_size),
                   optim_graph=True
                   )  # optim_graph==True makes inference_only==True
         with tf.Session() as sess:
             net.load(sess, self.savedir, ckptname)
             npypath = os.path.join(self.savedir, "%s.pkl" % ckptname)
             net.save_to_numpy(sess, npypath)
             if self.debug:
                 test_img = os.path.join("datasets", "train", "cat.0.jpg")
                 test_y = sess.run(net.out, {img_path: test_img})
                 out_var_name = net.out.name
             self.logger.info(
                 "Params of list of numpy array format saved to %s" %
                 npypath)
             in_graph_def = tf.get_default_graph().as_graph_def()
             out_graph_def = tf.graph_util.convert_variables_to_constants(
                 sess, in_graph_def, [net.out.op.name])
         out_graph_def = TransformGraph(
             out_graph_def,
             [in_node_name],
             [net.out.op.name],
             [
                 "strip_unused_nodes",
                 # "fuse_convolutions",
                 "fold_constants(ignore_errors=true)",
                 "fold_batch_norms",
                 "fold_old_batch_norms"
             ])
         ckptpath = os.path.join(self.savedir, "optimized_%s.pb" % ckptname)
         with tf.gfile.GFile(ckptpath, 'wb') as f:
             f.write(out_graph_def.SerializeToString())
         self.logger.info("Optimized frozen pb saved to %s" % ckptpath)
         node_name_path = os.path.join(self.savedir, "node_names.txt")
         if not os.path.exists(os.path.join(node_name_path)):
             with open(node_name_path, "w") as f:
                 f.write("%s\n%s" % (in_node_name, net.out.op.name))
     if self.debug:
         with tf.Graph().as_default():
             gd = tf.GraphDef()
             with tf.gfile.GFile(ckptpath, "rb") as f:
                 gd.ParseFromString(f.read())
             tf.import_graph_def(gd, name="")
             tf.get_default_graph().finalize()
             with tf.Session() as sess:
                 img_path = tf.get_default_graph().get_tensor_by_name(
                     "%s:0" % in_node_name)
                 out = tf.get_default_graph().get_tensor_by_name(
                     out_var_name)
                 new_y = sess.run(out, {img_path: test_img})
             diff = np.abs(new_y - test_y)
             self.logger.debug("Diff between original and optimized: %f" %
                               diff)
             self.logger.debug("Diff < 5e-7: %s" % (diff < 5e-7))
Esempio n. 27
0
                                   "input_ids": input_ids_p,
                                   "input_mask": input_mask_p,
                                   "label_ids": label_ids_p,
                                   "segment_ids": segment_ids_p
                               },
                               outputs={"probabilities": probabilities})

    # 2
    # only .pb
    input_node_names = ['input_ids', 'input_mask', 'label_ids', 'segment_ids']
    output_node_names = ['loss/probabilities']

    transforms = [
        'remove_nodes(op=Identity)', 'fold_constants(ignore_errors=true)',
        'fold_batch_norms', 'merge_duplicate_nodes', 'strip_unused_nodes',
        'sort_by_execution_order'
    ]

    output_graph_def = tf.graph_util.convert_variables_to_constants(
        sess, graph_def, [
            'loss/probabilities', 'input_ids', 'input_mask', 'label_ids',
            'segment_ids'
        ])
    output_graph_def = TransformGraph(output_graph_def, input_node_names,
                                      output_node_names, transforms)

    save_path = os.path.join(export_model_dir, export_model_name + ".pb")
    with tf.gfile.FastGFile(save_path, mode='wb') as f:
        f.write(output_graph_def.SerializeToString())
        print("froze graph save to path: ", save_path)
Esempio n. 28
0
def optimize_graph(params):

    config = tf.ConfigProto(device_count={'GPU': 0}, allow_soft_placement=True)

    init_checkpoint = params.ckpt_dir

    tf.logging.info('build graph...')
    # input placeholders, not sure if they are friendly to XLA
    input_ids = tf.placeholder(tf.int32, (None, params.max_seq_len),
                               'input_ids')
    input_mask = tf.placeholder(tf.int32, (None, params.max_seq_len),
                                'input_mask')
    input_type_ids = tf.placeholder(tf.int32, (None, params.max_seq_len),
                                    'segment_ids')

    jit_scope = tf.contrib.compiler.jit.experimental_jit_scope

    with jit_scope():
        features = {}
        features['input_ids'] = input_ids
        features['input_mask'] = input_mask
        features['segment_ids'] = input_type_ids
        model = BertMultiTask(params)
        hidden_feature = model.body(features, tf.estimator.ModeKeys.PREDICT)
        problem_sep_features, hidden_feature = model.hidden(
            features, hidden_feature, tf.estimator.ModeKeys.PREDICT)
        pred = model.top(problem_sep_features, hidden_feature,
                         tf.estimator.ModeKeys.PREDICT)

        output_tensors = [pred[k] for k in pred]

        tvars = tf.trainable_variables()

        (assignment_map, initialized_variable_names
         ) = modeling.get_assignment_map_from_checkpoint(
             tvars, init_checkpoint)

        tf.train.init_from_checkpoint(init_checkpoint, assignment_map)

        tmp_g = tf.get_default_graph().as_graph_def()

    input_node_names = ['input_ids', 'input_mask', 'segment_ids']
    output_node_names = [
        '%s_top/%s_predict' %
        (params.share_top[problem], params.share_top[problem])
        for problem in params.problem_list
    ]

    transforms = [
        'remove_nodes(op=Identity)',
        'fold_constants(ignore_errors=true)',
        'fold_batch_norms',
        # 'quantize_weights',
        # 'quantize_nodes',
        'merge_duplicate_nodes',
        'strip_unused_nodes',
        'sort_by_execution_order'
    ]

    with tf.Session(config=config) as sess:
        tf.logging.info('load parameters from checkpoint...')
        sess.run(tf.global_variables_initializer())
        tf.logging.info('freeze...')
        tmp_g = tf.graph_util.convert_variables_to_constants(
            sess, tmp_g, [n.name[:-2] for n in output_tensors])
        tmp_g = TransformGraph(tmp_g, input_node_names, output_node_names,
                               transforms)
    tmp_file = os.path.join(params.ckpt_dir, 'export_model')
    tf.logging.info('write graph to: %s' % tmp_file)
    with tf.gfile.GFile(tmp_file, 'wb') as f:
        f.write(tmp_g.SerializeToString())
    return tmp_file
Esempio n. 29
0
def save_model(model, model_dir, model_name, metadata=None, verbose=True):
    """
    Saves the model in 2 formats
    - A complied model (<model_name>.h5)
    - A serialized model (<model_name>_frozen_optimised.pb)

    A compiled model is a .h5 file that contains model weights, model architecture
    and the optimizer state. It's mainly used for continue training. When used for
    inference, no other files are required as the .h5 file already contains the
    weights, architecture, and parameters for preprocessing a trace into model input
    (which is stored in the metadata).

    A serialised model is a .pb file that only contains the model weight and
    a frozen achitecture optimised for fast inference. Since it does not contain
    the parameters for preprocessing a trace into model input, it must be used
    along with a metadata json file `<model_name>_metadata.json`.

    Parameters
    ----------
    model: A Keras model.
    model_dir: String.
               The directory to save this model.
    model_name: String.
                Model name which is also used as the basename for files.
    metadata: Dictionary (optional).
              Other metadata to save with the model.
    verbose: Boolean.
             Whether to print file locations.

    Returns
    -------
    None
    """

    # Create directory if not already exist
    os.makedirs(model_dir, exist_ok=True)

    byproducts_dir = os.path.join(model_dir, 'byproducts')
    os.makedirs(byproducts_dir, exist_ok=True)

    ######
    # Save a complied model for continue training
    ######

    compiled_model_path = os.path.join(model_dir, model_name + '.h5')
    model.save(compiled_model_path)

    if verbose:
        print('Saved compiled model to ' + compiled_model_path)

    ######
    # Save metadata
    ######

    if metadata:
        # Append metadata to compiled model
        with h5py.File(compiled_model_path, mode='a') as fp:
            fp.attrs['metadata'] = json.dumps(metadata)
            if verbose:
                print('Appended metadata to ' + compiled_model_path)

        # Save metadata to a json file for serialised model
        metadata_file = os.path.join(model_dir, model_name + '_metadata.json')
        with open(metadata_file, 'w') as f:
            json.dump(metadata, f)
            if verbose:
                print('Saved metadata to ' + metadata_file)

    ######
    # Save a serialized model for inferencing in Spark
    ######

    # Saves a checkpoint for freezing graph
    checkpt_path = os.path.join(byproducts_dir, model_name + '.ckpt')
    saver = tf.train.Saver()
    saver.save(K.get_session(), checkpt_path)
    if verbose:
        print('Saved checkpoint to ' + checkpt_path)

    # Saves a copy of the graph
    graph_path = os.path.join(byproducts_dir, model_name + '.pbtxt')
    tf.train.write_graph(K.get_session().graph, './', graph_path)
    if verbose:
        print('Saved graph to ' + graph_path)

    # Get node names
    input_node_names = model.input.op.name
    output_node_names = model.output.op.name
    restore_op_name = 'save/restore_all'

    # Freeze and save a frozen graph
    # (Modified from https://github.com/tensorflow/tensorflow/issues/8181#issuecomment-309375713)
    input_saver_def_path = ""
    input_binary = False
    filename_tensor_name = 'whatever'  # Unused by updated loading code – see https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/freeze_graph.py#L76
    output_frozen_graph_name = os.path.join(byproducts_dir,
                                            model_name + '_frozen.pb')
    clear_devices = True

    freeze_graph.freeze_graph(input_graph=graph_path,
                              input_saver=input_saver_def_path,
                              input_binary=input_binary,
                              input_checkpoint=checkpt_path,
                              output_node_names=output_node_names,
                              restore_op_name=restore_op_name,
                              filename_tensor_name=filename_tensor_name,
                              output_graph=output_frozen_graph_name,
                              clear_devices=clear_devices,
                              initializer_nodes="")

    if verbose:
        print('Saved frozen graph to ' + output_frozen_graph_name)

    # Optimize the graph for inference
    # (See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/graph_transforms/python/transform_graph_test.py)
    input_graph_def = tf.GraphDef()
    with tf.gfile.Open(output_frozen_graph_name, "rb") as f:
        # load graph
        data = f.read()
        input_graph_def.ParseFromString(data)

        # list of transforms
        # (See https://github.com/tensorflow/tensorflow/tree/master/tensorflow/tools/graph_transforms#transform-reference)
        transforms = [
            "fold_batch_norms", "strip_unused_nodes", "remove_device",
            "remove_nodes(op=Identity, op=CheckNumerics)",
            "add_default_attributes"
        ]

        output_graph_def = TransformGraph(
            input_graph_def,
            input_node_names.split(","),  # an array of the input node(s)
            output_node_names.split(","),  # an array of the output nodes
            transforms)

    # Save the optimized graph
    output_optimized_graph_name = os.path.join(
        model_dir, model_name + '_optimised_frozen.pb')
    with tf.gfile.FastGFile(output_optimized_graph_name, "w") as f:
        f.write(output_graph_def.SerializeToString())
    if verbose:
        print('Saved optimized frozen graph to ' + output_optimized_graph_name)

    # Clean up
    shutil.rmtree(byproducts_dir, ignore_errors=True)
    if verbose:
        print('Removed byproducts in ' + byproducts_dir)
Esempio n. 30
0
with gfile.FastGFile(FREEZED_PATH, 'wb') as f:
    f.write(optimized_graph_def.SerializeToString())

print("Starting graph optimization ... ")
transforms = [
    'strip_unused_nodes(type=float, shape="1,160,576,3")',
    'remove_nodes(op=Identity, op=CheckNumerics)',
    'fold_constants(ignore_errors=false)', 'fold_batch_norms',
    'fuse_resize_pad_and_conv', 'fuse_resize_and_conv', 'fuse_pad_and_conv',
    'fold_old_batch_norms', 'remove_device', 'round_weights(num_steps=256)',
    'strip_unused_nodes'
]

for transform in transforms:
    try:
        print("Starting transform: `%s` ... " % transform)
        optimized_graph_def = TransformGraph(optimized_graph_def,
                                             [INPUT_TENSOR_NAME],
                                             [FINAL_TENSOR_NAME], [transform])
    except:
        print('Transform failed: `%s`' % transform)

tf.summary.FileWriter('opt_log', graph_def=optimized_graph_def)
print("Wrote optimized graph to `%s` ... " % 'opt_log')

with gfile.FastGFile(OPTIMIZED_PATH, 'wb') as f:
    f.write(optimized_graph_def.SerializeToString())

print("Done! Wrote results to `%s`." % 'tf_files')