コード例 #1
0
def main():

    graph = tf.Graph()
    with graph.as_default():

        input_node = tf.placeholder(tf.float32,
                                    shape=(1, 368, 432, 3),
                                    name='image')
        net, pretrain_path, last_layer = get_network("mobilenet_v2_1.4",
                                                     input_node, None, False)
        #net, pretrain_path, last_layer = get_network("mobilenet_v2_small", input_node, None, False)

        saver = tf.train.Saver(tf.global_variables())
        sess = tf.Session()
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        saver.restore(sess, 'models/train/test/model-19021')
        saver.save(sess, 'models/train/test/model-final-19021')

        graphdef = graph.as_graph_def()
        tf.train.write_graph(graphdef,
                             'models/train/test',
                             'model-final.pbtxt',
                             as_text=True)
コード例 #2
0
def main(quantize):

    graph = tf.Graph()
    with graph.as_default():

        input_node = tf.placeholder(tf.float32, shape=(1, 368, 432, 3), name='image')
        net, pretrain_path, last_layer = get_network("mobilenet_v2_1.4", input_node, None, False)

        if quantize == "True" or quantize == "true":
            graph_rewriter_fn = graph_rewriter_builder.build()
            graph_rewriter_fn()
            #exporter.rewrite_nn_resize_op(True)

            saver_kwargs = {}
            saver = tf.train.Saver(**saver_kwargs)
            input_saver_def = saver.as_saver_def()
            frozen_graph_def = exporter.freeze_graph_with_def_protos(
                input_graph_def=tf.get_default_graph().as_graph_def(),
                input_saver_def=input_saver_def,
                input_checkpoint='models/train/test/model_latest-2000',
                output_node_names='Openpose/concat_stage7',
                restore_op_name='save/restore_all',
                filename_tensor_name='save/Const:0',
                clear_devices=True,
                output_graph='',
                initializer_nodes='')

            transformed_graph_def = frozen_graph_def
            binary_graph = os.path.join("models/train/test", "tflite_graph.pb")
            with tf.gfile.GFile(binary_graph, 'wb') as f:
                f.write(transformed_graph_def.SerializeToString())

            txt_graph = os.path.join("models/train/test", "tflite_graph.pbtxt")
            with tf.gfile.GFile(txt_graph, 'w') as f:
                f.write(str(transformed_graph_def))

        else:
            saver = tf.train.Saver(tf.global_variables())
            sess  = tf.Session()
            sess.run(tf.global_variables_initializer())
            sess.run(tf.local_variables_initializer())

            saver.restore(sess, 'models/train/test/model_latest-114000') #<--- "-114000" changes according to the number of steps learned.
            saver.save(sess, 'models/train/test/model_latest-final-114000') #<--- "-114000" changes according to the number of steps learned.

            graphdef = graph.as_graph_def()
            tf.train.write_graph(graphdef, 'models/train/test', 'model_latest-final.pbtxt', as_text=True)
コード例 #3
0
        'cmu / mobilenet / mobilenet_thin / mobilenet_v2_large / mobilenet_v2_small'
    )
    parser.add_argument('--resize', type=str, default='0x0')
    parser.add_argument('--quantize', action='store_true')
    args = parser.parse_args()

    w, h = model_wh(args.resize)
    if w <= 0 or h <= 0:
        w = h = None
    print(w, h)
    input_node = tf.placeholder(tf.float32,
                                shape=(None, h, w, 3),
                                name='image')

    net, pretrain_path, last_layer = get_network(args.model,
                                                 input_node,
                                                 None,
                                                 trainable=False)
    if args.quantize:
        g = tf.get_default_graph()
        tf.contrib.quantize.create_eval_graph(input_graph=g)

    with tf.Session(config=config) as sess:
        loader = tf.train.Saver(net.restorable_variables())
        loader.restore(sess, pretrain_path)

        tf.train.write_graph(sess.graph_def, './tmp', 'graph.pb', as_text=True)

        flops = tf.profiler.profile(
            None,
            cmd='graph',
            options=tf.profiler.ProfileOptionBuilder.float_operation())
コード例 #4
0
    print('split_inp/out', q_inp_split, q_heat_split, q_vect_split,
          q_obj_split)

    output_vectmap = []
    output_heatmap = []
    output_object = []
    losses = []
    last_losses_l1 = []
    last_losses_l2 = []
    last_losses_l3 = []
    outputs = []
    for gpu_id in range(args.gpus):
        with tf.device(tf.DeviceSpec(device_type="GPU", device_index=gpu_id)):
            with tf.variable_scope(tf.get_variable_scope(),
                                   reuse=(gpu_id > 0)):
                net, pretrain_path, last_layer = get_network(
                    args.model, q_inp_split[gpu_id])
                vect, heat, obj = net.loss_last()
                output_vectmap.append(vect)
                output_heatmap.append(heat)
                output_object.append(obj)
                outputs.append(
                    net.get_output())  #output of heat_paf_upsample concatnated

                l1s, l2s, l3s = net.loss_l1_l2_l3()
                for idx, (l1, l2, l3) in enumerate(zip(l1s, l2s, l3s)):
                    loss_l1 = tf.nn.l2_loss(
                        tf.concat(l1, axis=0) - q_vect_split[gpu_id],
                        name='loss_l1_stage%d_tower%d' % (idx, gpu_id))
                    loss_l2 = tf.nn.l2_loss(
                        tf.concat(l2, axis=0) - q_heat_split[gpu_id],
                        name='loss_l2_stage%d_tower%d' % (idx, gpu_id))
コード例 #5
0
    # define model for multi-gpu
    q_inp_split, q_pose_split, q_heat_split, q_vect_split = tf.split(
        q_inp, args.gpus), tf.split(q_pose, args.gpus), tf.split(
            q_heat, args.gpus), tf.split(q_vect, args.gpus)

    output_vectmap = []
    output_heatmap = []
    losses = []
    last_losses_l1 = []
    last_losses_l2 = []
    outputs = []
    for gpu_id in range(args.gpus):
        with tf.device(tf.DeviceSpec(device_type="GPU", device_index=gpu_id)):
            with tf.variable_scope(tf.get_variable_scope(),
                                   reuse=(gpu_id > 0)):
                net, pretrain_path, last_layer = get_network(
                    'rnn', [q_inp_split[gpu_id], q_pose_split[gpu_id]])
                if args.checkpoint:
                    pretrain_path = args.checkpoint
                vect, heat = net.loss_last()
                output_vectmap.append(vect)
                output_heatmap.append(heat)
                outputs.append(net.get_output())

                l1s, l2s = net.loss_l1_l2()
                for idx, (l1, l2) in enumerate(zip(l1s, l2s)):
                    loss_l1 = tf.nn.l2_loss(
                        tf.concat(l1, axis=0) - q_vect_split[gpu_id],
                        name='loss_l1_stage%d_tower%d' % (idx, gpu_id))
                    loss_l2 = tf.nn.l2_loss(
                        tf.concat(l2, axis=0) - q_heat_split[gpu_id],
                        name='loss_l2_stage%d_tower%d' % (idx, gpu_id))
コード例 #6
0
    While training, checkpoints are saved. You can test them with this python code.
    """
    parser = argparse.ArgumentParser(
        description='Tensorflow Pose Estimation Graph Extractor')
    parser.add_argument('--model',
                        type=str,
                        default='cmu',
                        help='cmu / mobilenet / mobilenet_thin')
    args = parser.parse_args()

    input_node = tf.placeholder(tf.float32,
                                shape=(None, None, None, 3),
                                name='image')

    with tf.Session(config=config) as sess:
        net, _, last_layer = get_network(args.model,
                                         input_node,
                                         sess,
                                         trainable=False)

        tf.train.write_graph(sess.graph_def, './tmp', 'graph.pb', as_text=True)

        # graph = tf.get_default_graph()
        # for n in tf.get_default_graph().as_graph_def().node:
        #     if 'concat_stage' not in n.name:
        #         continue
        #     print(n.name)

        saver = tf.train.Saver(max_to_keep=100)
        saver.save(sess, './tmp/chk', global_step=1)