示例#1
0
def main(_):
    if FLAGS.op == 'e':
        export_graph(FLAGS.netname,
                     FLAGS.net_arg_scopename,
                     FLAGS.input_checkpoint,
                     outfile=FLAGS.node_def_file,
                     label_offset=FLAGS.label_offset,
                     batch_size=FLAGS.batch_size,
                     image_size=FLAGS.image_size,
                     is_training=FLAGS.is_training,
                     num_classes=FLAGS.num_classes,
                     global_pool_exists=FLAGS.global_pool_exists)
    elif FLAGS.op == 'f':
        freeze_graph(FLAGS.node_def_file,
                     input_saver=FLAGS.input_saver,
                     input_binary=FLAGS.input_binary,
                     input_checkpoint=FLAGS.input_checkpoint,
                     output_node_names=FLAGS.output_node_names,
                     restore_op_name=FLAGS.restore_op_name,
                     filename_tensor_name=FLAGS.filename_tensor_name,
                     output_graph=FLAGS.output_graph,
                     clear_devices=FLAGS.clear_devices,
                     initializer_nodes=FLAGS.initializer_nodes)
    else:
        print('op Args can only be one of "e" or "f"')
示例#2
0
def _freeze_my_graph(sess, output_node_names):

    input_graph_name = "input_graph.pb"
    output_graph_name = "output_graph.pb"
    checkpoint_prefix = os.path.join(FLAGS.checkpoint_dir,
                                     "checkpoint_new.txt")

    # export graph definition
    tf.train.write_graph(sess.graph.as_graph_def(), FLAGS.model_dir,
                         input_graph_name)
    print('graph definition saved in dir: ', FLAGS.model_dir)

    # We save out the graph to disk, and then call the const conversion routine.
    input_graph_path = os.path.join(FLAGS.model_dir, input_graph_name)
    input_saver_def_path = ""
    input_binary = False
    input_checkpoint_path = checkpoint_prefix + "-0"
    restore_op_name = "save/restore_all"
    filename_tensor_name = "save/Const:0"
    output_graph_path = os.path.join(FLAGS.model_dir, output_graph_name)
    clear_devices = False
    initializer_nodes = ""

    # freeze_graph is in TensorFlow codebase (https://github.com/tensorflow/tensorflow/blob/HEAD/tensorflow/python/tools/freeze_graph.py)
    freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
                              input_binary, input_checkpoint_path,
                              output_node_names, restore_op_name,
                              filename_tensor_name, output_graph_path,
                              clear_devices, initializer_nodes, "")
def freeze_my_graph(sess):

    tf.train.write_graph(sess.graph.as_graph_def(), FLAGS.model_dir, input_graph_name)

    # We save out the graph to disk, and then call the const conversion
    # routine.

    checkpoint_prefix = os.path.join(FLAGS.checkpoint_dir, "saved_checkpoint")
    input_graph_path = os.path.join(FLAGS.model_dir, input_graph_name)
    input_saver_def_path = ""
    input_binary = False
    input_checkpoint_path = checkpoint_prefix + "-0"
    # input_checkpoint_path = os.path.join(FLAGS.checkpoint_dir, 'model.ckpt') + "-0"
    # input_checkpoint_path = os.path.join(FLAGS.checkpoint_dir, 'model.ckpt-299')
    output_node_names = "Dense2/output_node"
    restore_op_name = "save/restore_all"
    filename_tensor_name = "save/Const:0"
    output_graph_path = os.path.join(FLAGS.model_dir, output_graph_name)
    clear_devices = False

    freeze_graph.freeze_graph(
        input_graph_path,
        input_saver_def_path,
        input_binary,
        input_checkpoint_path,
        output_node_names,
        restore_op_name,
        filename_tensor_name,
        output_graph_path,
        clear_devices,
    )
def save(graph_file, ckpt_file, top_node, frozen_model_file):
    sess = K.get_session()
    saver = tf.train.Saver(write_version=saver_pb2.SaverDef.V1)
    save_path = saver.save(sess, ckpt_file)

    gd = sess.graph.as_graph_def()
    tf.train.write_graph(gd, ".", graph_file, False)

    input_graph = graph_file
    input_saver = ""
    input_binary = True
    input_checkpoint = ckpt_file
    output_node_names = top_node  # default: "Mixed_5c_Concatenated/concat"
    restore_op_name = "save/restore_all"
    filename_tensor_name = "save/Const:0"
    output_graph = frozen_model_file
    clear_devices = True
    initializer_nodes = ""
    variable_names_whitelist = ""
    variable_names_blacklist = ""
    input_meta_graph = ""
    input_saved_model_dir = ""
    from tensorflow.python.saved_model import tag_constants
    saved_model_tags = tag_constants.SERVING
    checkpoint_version = saver_pb2.SaverDef.V2
    freeze_graph.freeze_graph(
        input_graph, input_saver, input_binary, input_checkpoint,
        output_node_names, restore_op_name, filename_tensor_name, output_graph,
        clear_devices, initializer_nodes, variable_names_whitelist,
        variable_names_blacklist, input_meta_graph, input_saved_model_dir,
        saved_model_tags, checkpoint_version)
示例#5
0
def freeze_my_graph(sess):

    tf.train.write_graph(sess.graph.as_graph_def(), FLAGS.model_dir,
                         input_graph_name)

    # We save out the graph to disk, and then call the const conversion
    # routine.

    checkpoint_prefix = os.path.join(FLAGS.checkpoint_dir, "saved_checkpoint")
    input_graph_path = os.path.join(FLAGS.model_dir, input_graph_name)
    input_saver_def_path = ""
    input_binary = False
    input_checkpoint_path = checkpoint_prefix + "-0"
    # input_checkpoint_path = os.path.join(FLAGS.checkpoint_dir, 'model.ckpt') + "-0"
    # input_checkpoint_path = os.path.join(FLAGS.checkpoint_dir, 'model.ckpt-299')
    output_node_names = "Dense2/output_node"
    restore_op_name = "save/restore_all"
    filename_tensor_name = "save/Const:0"
    output_graph_path = os.path.join(FLAGS.model_dir, output_graph_name)
    clear_devices = False

    freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
                              input_binary, input_checkpoint_path,
                              output_node_names, restore_op_name,
                              filename_tensor_name, output_graph_path,
                              clear_devices)
示例#6
0
def export_model(graph, ckpt_filename, placeholder, logits, end_points):
    # Export graph definition
    tf.train.write_graph(graph, FLAGS.data_dir, FLAGS.output + '.pb')
    proto_filename = os.path.join(FLAGS.data_dir, FLAGS.output + '.pb')
    output_filename = os.path.join(FLAGS.data_dir, FLAGS.output + '_frozen.pb')

    predictions = end_points['Predictions']

    # Freeze the model
    print('Freezing model.')
    freeze.freeze_graph(input_graph=proto_filename,
                        input_saver='',
                        input_binary=False,
                        input_checkpoint=ckpt_filename,
                        output_node_names=','.join([logits.op.name,
                                                    predictions.op.name]),
                        restore_op_name='save/restore_all',
                        filename_tensor_name='save/Const:0',
                        output_graph=output_filename,
                        clear_devices=True,
                        initializer_nodes='')
    print('Model frozen.')
    frozen_graph_def = tf.GraphDef()
    with open(output_filename, "rb") as f:
        data = f.read()
        frozen_graph_def.ParseFromString(data)

    print('Optimizing model.')
    optimized_graph_def = optimize.optimize_for_inference(
        frozen_graph_def, [placeholder.op.name],
        [logits.op.name, predictions.op.name], tf.string.as_datatype_enum)
    with open(output_filename, 'wb') as f:
        f.write(optimized_graph_def.SerializeToString())
    print('Model optimized.')
    return output_filename, proto_filename
示例#7
0
    def FreezeGraph(sess):
        checkpoint_prefix = os.path.join(MODEL_FOLDER, "saved_checkpoint")
        checkpoint_state_name = "checkpoint_state"
        input_graph_name = "input_graph.pb"
        output_graph_name = "output_graph.pb"

        # We'll create an input graph that has a single variable containing 1.0,
        # and that then multiplies it by 2.
        saver = tf.train.Saver()
        saver.save(sess, checkpoint_prefix, global_step=0,
                        latest_filename=checkpoint_state_name)
        tf.train.write_graph(sess.graph.as_graph_def(), MODEL_FOLDER,input_graph_name)

        # We save out the graph to disk, and then call the const conversion
        # routine.
        input_graph_path = os.path.join(MODEL_FOLDER, input_graph_name)
        input_saver_def_path = ""
        input_binary = False
        input_checkpoint_path = checkpoint_prefix + "-0"
        output_node_names = "check_data_node,check_prediction"
        restore_op_name = "save/restore_all"
        filename_tensor_name = "save/Const:0"
        output_graph_path = os.path.join(MODEL_FOLDER, output_graph_name)
        clear_devices = False

        freeze_graph(input_graph_path, input_saver_def_path,
                                input_binary, input_checkpoint_path,
                                output_node_names, restore_op_name,
                                filename_tensor_name, output_graph_path,
                                clear_devices)
示例#8
0
def create_final_pb_files(graph_file, ckpt_file, top_node, frozen_model_file):
    input_graph = graph_file
    input_saver = ""
    input_binary = True
    input_checkpoint = ckpt_file
    output_node_names = top_node # "Mixed_5c_Concatenated/concat"
    restore_op_name = "save/restore_all"
    filename_tensor_name = "save/Const:0"
    output_graph = frozen_model_file
    clear_devices = True
    initializer_nodes = ""
    variable_names_whitelist = ""
    variable_names_blacklist = ""
    input_meta_graph = ""
    input_saved_model_dir = ""
    #from tensorflow.python.saved_model import tag_constants
    saved_model_tags = tag_constants.SERVING
    checkpoint_version = saver_pb2.SaverDef.V2
    freeze_graph.freeze_graph(input_graph,
                 input_saver,
                 input_binary,
                 input_checkpoint,
                 output_node_names,
                 restore_op_name,
                 filename_tensor_name,
                 output_graph,
                 clear_devices,
                 initializer_nodes,
                 variable_names_whitelist,
                 variable_names_blacklist,
                 input_meta_graph,
                 input_saved_model_dir,
                 saved_model_tags,
                 checkpoint_version)
def freeze_graph(args):
    checkpoint_version = saver_pb2.SaverDef.V2
    input_graph = args.train_dir + '/inference_graph.pb'
    input_checkpoint = tf.train.latest_checkpoint(args.train_dir)
    input_binary = True
    output_graph = args.train_dir + '/inference_graph_frozen.pb'
    output_node_names = 'MobilenetV1/Predictions/Reshape_1'
    input_saved_model_dir = ""
    saved_model_tags = "serve"
    input_meta_graph = ""
    variable_names_blacklist = ""
    variable_names_whitelist = ""
    initializer_nodes = ""
    clear_devices = True
    filename_tensor_name = "save/Const:0"
    restore_op_name = "save/restore_all"
    input_saver = ""
    print("freeze_graph input_checkpoint : {}".format(input_checkpoint))

    fg.freeze_graph(input_graph, input_saver, input_binary, input_checkpoint,
                    output_node_names, restore_op_name, filename_tensor_name,
                    output_graph, clear_devices, initializer_nodes,
                    variable_names_whitelist, variable_names_blacklist,
                    input_meta_graph, input_saved_model_dir, saved_model_tags,
                    checkpoint_version)
示例#10
0
  def testFreezeGraph(self):

    checkpoint_prefix = os.path.join(self.get_temp_dir(), "saved_checkpoint")
    checkpoint_state_name = "checkpoint_state"
    input_graph_name = "input_graph.pb"
    output_graph_name = "output_graph.pb"

    # We'll create an input graph that has a single variable containing 1.0,
    # and that then multiplies it by 2.
    with tf.Graph().as_default():
      variable_node = tf.Variable(1.0, name="variable_node")
      output_node = tf.mul(variable_node, 2.0, name="output_node")
      sess = tf.Session()
      init = tf.initialize_all_variables()
      sess.run(init)
      output = sess.run(output_node)
      self.assertNear(2.0, output, 0.00001)
      saver = tf.train.Saver()
      saver.save(sess, checkpoint_prefix, global_step=0,
                 latest_filename=checkpoint_state_name)
      tf.train.write_graph(sess.graph.as_graph_def(), self.get_temp_dir(),
                           input_graph_name)

    # We save out the graph to disk, and then call the const conversion
    # routine.
    input_graph_path = os.path.join(self.get_temp_dir(), input_graph_name)
    input_saver_def_path = ""
    input_binary = False
    input_checkpoint_path = checkpoint_prefix + "-0"
    output_node_names = "output_node"
    restore_op_name = "save/restore_all"
    filename_tensor_name = "save/Const:0"
    output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name)
    clear_devices = False

    freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
                              input_binary, input_checkpoint_path,
                              output_node_names, restore_op_name,
                              filename_tensor_name, output_graph_path,
                              clear_devices)

    # Now we make sure the variable is now a constant, and that the graph still
    # produces the expected result.
    with tf.Graph().as_default():
      output_graph_def = tf.GraphDef()
      with open(output_graph_path, "rb") as f:
        output_graph_def.ParseFromString(f.read())
        _ = tf.import_graph_def(output_graph_def, name="")

      self.assertEqual(4, len(output_graph_def.node))
      for node in output_graph_def.node:
        self.assertNotEqual("Variable", node.op)

      with tf.Session() as sess:
        output_node = sess.graph.get_tensor_by_name("output_node:0")
        output = sess.run(output_node)
        self.assertNear(2.0, output, 0.00001)
示例#11
0
def freeze_model():
    """ freeze graph. """
    input_node_names = "input_node"
    output_node_names = "output_node"

    content_images = reader.get_image(FLAGS.content_image, FLAGS.image_size)
    images = tf.pack([content_images])

    input_images = tf.placeholder(dtype=tf.float32, name=input_node_names)
    generated_images = model.net(input_images / 255., if_train=False)

    output_format = tf.saturate_cast(generated_images + reader.mean_pixel,
                                     tf.uint8,
                                     name=output_node_names)

    with tf.Session() as sess:
        checkpoint_file = tf.train.latest_checkpoint(FLAGS.model)
        if not checkpoint_file:
            print('Could not find trained model in {}'.format(FLAGS.model))
            return
        print('Using model from {}'.format(checkpoint_file))

        saver = tf.train.Saver()
        saver.restore(sess, checkpoint_file)

        in_images = sess.run(images)
        images_t = sess.run(output_format, feed_dict={input_images: in_images})

        # Save graph
        tf.train.write_graph(sess.graph.as_graph_def(), FLAGS.model,
                             FLAGS.in_graph_name)

    checkpoint_prefix = os.path.join(FLAGS.model, "saved_checkpoint")
    checkpoint_state_name = "checkpoint_state"

    # We save out the graph to disk, and then call the const conversion
    # routine.
    input_graph_path = os.path.join(FLAGS.model, FLAGS.in_graph_name)
    input_saver_def_path = ""
    input_binary = False

    restore_op_name = "save/restore_all"
    filename_tensor_name = "save/Const:0"
    output_graph_path = os.path.join(FLAGS.model, FLAGS.out_graph_name)
    clear_devices = False

    freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
                              input_binary, checkpoint_file, output_node_names,
                              restore_op_name, filename_tensor_name,
                              output_graph_path, clear_devices, "")

    print('------------------------------------')
    print('Finished!')
示例#12
0
def freeze_model(input_graph_path,output_graph_path,output_node_names,checkpoint_path):
    # routine.
    input_saver_def_path = ""
    input_binary = False
    restore_op_name = "save/restore_all"
    filename_tensor_name = "save/Const:0"
    clear_devices = False

    freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
                              input_binary, checkpoint_path,
                              output_node_names, restore_op_name,
                              filename_tensor_name, output_graph_path,
                              clear_devices, "")
示例#13
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model_dir', required=True, help='Models with *.h5')
    parser.add_argument('--output_dir',
                        required=True,
                        help='Where to output the files (as *.pb)')
    args = parser.parse_args()

    fnames = glob.glob(os.path.join(args.model_dir, '*.h5'))
    first = np.zeros((1, 50, 50, 3))

    for keras_model_fname in fnames:
        sess = tf.Session()
        frozen_graph_path = os.path.splitext(
            os.path.split(keras_model_fname)[1])[0] + '.pb'
        frozen_graph_path = os.path.join(args.output_dir, frozen_graph_path)
        print 'Producing: ' + frozen_graph_path

        model = get_keras_model(keras_model_fname, sess)
        img1 = tf.placeholder(tf.float32,
                              shape=(None, 50, 50, 3),
                              name='input_img')
        tf_model = model(img1)
        output = tf.identity(tf_model, name='output_prob')

        # Run to set weights
        sess.run(output, feed_dict={img1: first})
        tf.train.write_graph(sess.graph_def, '/tmp', 'graph-structure.pb')
        saver = saver_lib.Saver()
        checkpoint_path = saver.save(sess, '/tmp/vars', global_step=0)

        input_graph_path = '/tmp/graph-structure.pb'
        input_saver_def_path = ''
        input_binary = False
        input_checkpoint_path = '/tmp/vars-0'
        output_node_names = 'output_prob'
        restore_op_name = 'save/restore_all'
        filename_tensor_name = 'save/Const:0'
        clear_devices = False
        initializer_nodes = ""
        freeze_graph(input_graph_path, input_saver_def_path, input_binary,
                     input_checkpoint_path, output_node_names, restore_op_name,
                     filename_tensor_name, frozen_graph_path, clear_devices,
                     initializer_nodes)

        # Clean up
        sess.close()
        tf.reset_default_graph()
示例#14
0
def main():

    # if not FLAGS.output_file:
    #     raise ValueError('You must supply the path to save to with --output_file')
    if FLAGS.is_video_model and not FLAGS.num_frames:
        raise ValueError(
            'Number of frames must be specified for video models with --num_frames'
        )
    if not FLAGS.checkpoint_path:
        # checkpoint_path = experiment_dir
        checkpoint_path = os.path.join(experiment_dir, 'train')
        checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)
    else:
        print('#####2', checkpoint_path)
        checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)

    if FLAGS.checkpoint_version == 1:
        checkpoint_version = saver_pb2.SaverDef.V1
    elif FLAGS.checkpoint_version == 2:
        checkpoint_version = saver_pb2.SaverDef.V2
    else:
        print("Invalid checkpoint version (must be '1' or '2'): %d" %
              FLAGS.checkpoint_version)
        return -1

    export_inference_graph(FLAGS.dataset_name, dataset_dir, FLAGS.model_name,
                           FLAGS.labels_offset, FLAGS.is_training,
                           FLAGS.final_endpoint, FLAGS.image_size,
                           FLAGS.use_grayscale, FLAGS.is_video_model,
                           FLAGS.batch_size, FLAGS.num_frames, FLAGS.quantize,
                           FLAGS.write_text_graphdef, output_file)

    if not os.path.isfile(output_file):
        raise ValueError('graph not found')
    freeze_graph(output_file, FLAGS.input_saver, FLAGS.input_binary,
                 checkpoint_path, FLAGS.output_node_names,
                 FLAGS.restore_op_name, FLAGS.filename_tensor_name,
                 FLAGS.output_graph, FLAGS.clear_devices,
                 FLAGS.initializer_nodes, FLAGS.variable_names_whitelist,
                 FLAGS.variable_names_blacklist, FLAGS.input_meta_graph,
                 FLAGS.input_saved_model_dir, FLAGS.saved_model_tags,
                 checkpoint_version)
    def save(self, file_prefix):
        graph_path = '{}.graph'.format(file_prefix)
        checkpoint_path = '{}.ckpt'.format(file_prefix)
        frozen_graph_path = '{}.pb'.format(file_prefix)

        tf.train.write_graph(self._session.graph_def,
                             '',
                             graph_path,
                             as_text=False)
        self._saver.save(self._session, checkpoint_path)

        freeze_graph(input_graph=graph_path,
                     input_saver="",
                     input_binary=True,
                     input_checkpoint=os.path.join(os.getcwd(),
                                                   checkpoint_path),
                     output_node_names='output',
                     restore_op_name="",
                     filename_tensor_name="",
                     output_graph=os.path.join(os.getcwd(), frozen_graph_path),
                     clear_devices=True,
                     initializer_nodes="")
示例#16
0
def graph_freez(model_folder, output_names):
    print("Model folder", model_folder)
    checkpoint = tf.train.get_checkpoint_state(model_folder)
    print(checkpoint)
    checkpoint_path = checkpoint.model_checkpoint_path
    output_graph_filename = checkpoint_path + fr_name

    input_saver_def_path = ""
    input_binary = True
    output_node_names = output_names
    restore_op_name = "save/restore_all"
    filename_tensor_name = "save/Const:0"
    clear_devices = False
    input_meta_graph = checkpoint_path + ".meta"

    freeze_graph.freeze_graph("", input_saver_def_path, input_binary,
                              checkpoint_path, output_node_names,
                              restore_op_name, filename_tensor_name,
                              output_graph_filename, clear_devices, "", "",
                              input_meta_graph)

    return output_graph_filename
示例#17
0
def train_model(config):
    logs_path = "tensorboard/" + strftime("train_%Y_%m_%d_%H_%M_%S", gmtime())

    train_dataset = load_dataset(config.train_path)

    train_dataset, val_dataset = split_train_and_val(train_dataset)

    train_feature_minibatches, train_labels_minibatches, train_seqlens_minibatches = make_batches(
        train_dataset, batch_size=Config.batch_size)
    val_feature_minibatches, val_labels_minibatches, val_seqlens_minibatches = make_batches(
        val_dataset, batch_size=len(val_dataset[0]))

    train_feature_minibatches = pad_all_batches(train_feature_minibatches)
    val_feature_minibatches = pad_all_batches(val_feature_minibatches)

    num_examples = np.sum(
        [batch.shape[0] for batch in train_feature_minibatches])
    num_batches_per_epoch = int(math.ceil(num_examples / Config.batch_size))

    with tf.Graph().as_default():
        model = config.get_model()
        init = tf.global_variables_initializer()

        saver = tf.train.Saver(tf.global_variables())

        with tf.Session() as session:
            # Initializate the weights and biases
            session.run(init)
            if config.load_from_file is not None:
                new_saver = tf.train.import_meta_graph('%s.meta' %
                                                       config.load_from_file,
                                                       clear_devices=True)
                new_saver.restore(session, config.load_from_file)

            train_writer = tf.summary.FileWriter(logs_path + '/train',
                                                 session.graph)

            step_ii = 0

            if config.save_every > 0 and config.save_to_file:
                os.makedirs(os.path.dirname(config.save_to_file),
                            exist_ok=True)
                input_graph_name = os.path.join(
                    os.path.dirname(config.save_to_file), "input_graph.pb")
                tf.train.write_graph(session.graph_def, "", input_graph_name)

            for curr_epoch in range(config.num_epochs):
                total_train_cost = total_train_wer = 0
                start = time.time()

                for batch in random.sample(range(num_batches_per_epoch),
                                           num_batches_per_epoch):
                    cur_batch_size = len(train_seqlens_minibatches[batch])

                    try:
                        batch_cost, batch_ler, summary = model.train_on_batch(
                            session,
                            train_feature_minibatches[batch],
                            train_labels_minibatches[batch],
                            train_seqlens_minibatches[batch],
                            train=True)
                    except BatchSkipped:
                        continue

                    total_train_cost += batch_cost * cur_batch_size
                    total_train_wer += batch_ler * cur_batch_size

                    train_writer.add_summary(summary, step_ii)
                    step_ii += 1

                train_cost = total_train_cost / num_examples
                train_wer = total_train_wer / num_examples

                val_batch_cost, val_batch_ler, _ = model.train_on_batch(
                    session,
                    val_feature_minibatches[0],
                    val_labels_minibatches[0],
                    val_seqlens_minibatches[0],
                    train=False)

                log = "Epoch {}/{}, train_cost = {:.3f}, train_ed = {:.3f}, val_cost = {:.3f}, val_ed = {:.3f}, time = {:.3f}"
                print(
                    log.format(curr_epoch + 1, config.num_epochs, train_cost,
                               train_wer, val_batch_cost, val_batch_ler,
                               time.time() - start))

                # Write out status to JSON for CodaLab table display
                with open('status.json', 'w') as fp:
                    json.dump(
                        {
                            'epoch': curr_epoch + 1,
                            'train_cost': float(train_cost),
                            'train_wer': float(train_wer),
                            'val_batch_cost': float(val_batch_cost),
                            'val_batch_ler': float(val_batch_ler),
                        }, fp)

                if config.print_every > 0 and (curr_epoch +
                                               1) % config.print_every == 0:
                    batch_ii = 0
                    model.print_results(session,
                                        train_feature_minibatches[batch_ii],
                                        train_labels_minibatches[batch_ii],
                                        train_seqlens_minibatches[batch_ii])

                if config.save_every > 0 and config.save_to_file and (
                        curr_epoch + 1) % config.save_every == 0:
                    os.makedirs(os.path.dirname(config.save_to_file),
                                exist_ok=True)
                    saver.save(session,
                               config.save_to_file,
                               global_step=curr_epoch + 1)

                    output_graph_name = os.path.join(
                        os.path.dirname(config.save_to_file),
                        "output_graph-" + str(curr_epoch + 1) + ".pb")

                    input_saver_def_path = ""
                    input_binary = False
                    input_checkpoint_path = config.save_to_file + "-" + str(
                        curr_epoch + 1)

                    output_node_names = "DecodedSequence"
                    restore_op_name = "save/restore_all"
                    filename_tensor_name = "save/Const:0"
                    clear_devices = False

                    freeze_graph.freeze_graph(
                        input_graph_name, input_saver_def_path, input_binary,
                        input_checkpoint_path, output_node_names,
                        restore_op_name, filename_tensor_name,
                        output_graph_name, clear_devices, "")
示例#18
0
import os
import freeze_graph
import tensorflow as tf

checkpoint_state_name = "model.ckpt"
input_graph_name = "starnet.pb"
output_graph_name = "starne_weights.pb"

input_graph_path = os.path.join("./", input_graph_name)
input_saver_def_path = ""
input_binary = False
input_checkpoint_path = os.path.join("./", 'saved_checkpoint')

print(tf.version.VERSION)

output_node_names = "generator/g_deconv7/Sub"
restore_op_name = "save/restore_all" # not used
filename_tensor_name = "save/Const:0" # not used
output_graph_path = os.path.join("./", output_graph_name)
clear_devices = False

freeze_graph.freeze_graph("./starnet_generator.pb",
                          "",
                          True,
                          "./model.ckpt",
                          output_node_names,
                          restore_op_name,
                          filename_tensor_name,
                          "./starnet_generator_weights.pb",
                          clear_devices,
                          "")
示例#19
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument('--input_dir',
                        type=str,
                        default="output",
                        help='directory of checkpoint files')
    parser.add_argument('--output',
                        type=str,
                        default=DEFAULT_MODEL,
                        help='exported file')
    parser.add_argument('--image_h',
                        type=int,
                        default=-1,
                        help='weight for texture loss vs content loss')
    parser.add_argument('--image_w',
                        type=int,
                        default=-1,
                        help='weight for texture loss vs content loss')

    parser.add_argument('--noise',
                        type=float,
                        default=0.,
                        help='noise magnitude')

    logging.basicConfig(stream=sys.stdout,
                        format='%(asctime)s %(levelname)s:%(message)s',
                        level=logging.INFO,
                        datefmt='%I:%M:%S')

    args = parser.parse_args()
    tmp_dir = os.path.join(args.input_dir, 'tmp')
    if not os.path.exists(tmp_dir):
        os.mkdir(tmp_dir)
    ckpt_dir = os.path.join(tmp_dir, 'ckpt')
    if not os.path.exists(ckpt_dir):
        os.mkdir(ckpt_dir)

    args.save_model = os.path.join(ckpt_dir, 'model')

    with open(os.path.join(args.input_dir, 'result.json'), 'r') as f:
        result = json.load(f)

    model_name = result['model_name']
    best_model_full = result['best_model']
    best_model_arr = best_model_full.split('/')
    best_model_arr[0] = args.input_dir
    best_model = os.path.join(*best_model_arr)

    if args.image_w < 0:
        if 'image_w' in result:
            args.image_w = result['image_w']
        else:
            args.image_w = vgg.DEFAULT_SIZE
    if args.image_h < 0:
        if 'image_h' in result:
            args.image_h = result['image_h']
        else:
            args.image_h = vgg.DEFAULT_SIZE

    if args.output == DEFAULT_MODEL:
        args.output = model_name + ".pb"

    logging.info("loading best model from %s" % best_model)

    graph = tf.Graph()
    with graph.as_default():
        with tf.name_scope(model_name):
            model = StyleTransfer(is_training=False,
                                  batch_size=1,
                                  image_h=args.image_h,
                                  image_w=args.image_w,
                                  inf_noise=args.noise)
        model_saver = tf.train.Saver(name='saver', sharded=True)
    try:
        with tf.Session(graph=graph) as session:

            logging.info("Loading model")
            model_saver.restore(session, best_model)

            logging.info("Verify model")
            batch_gen_valid = BatchGenerator(1,
                                             args.image_h,
                                             args.image_w,
                                             valid=True)
            _, _, _, test_out, _ = model.run_epoch(session,
                                                   tf.no_op(),
                                                   None,
                                                   batch_gen_valid,
                                                   num_iterations=1)

            utils.write_image(
                os.path.join(args.input_dir, 'export_verify.png'), test_out)

            logging.info("Exporting model")
            best_model = model_saver.save(session, args.save_model)
            # Save graph def
            tf.train.write_graph(session.graph_def, tmp_dir, "temp_model.pb",
                                 False)

            saver_def = model_saver.as_saver_def()
            input_graph_path = os.path.join(tmp_dir, "temp_model.pb")
            input_saver_def_path = ""  # we dont have this
            input_binary = True
            input_checkpoint_path = args.save_model
            output_node_names = model_name + "/output"
            restore_op_name = saver_def.restore_op_name
            filename_tensor_name = saver_def.filename_tensor_name
            output_graph_path = os.path.join(args.input_dir, args.output)
            clear_devices = False

            freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
                                      input_binary, input_checkpoint_path,
                                      output_node_names, restore_op_name,
                                      filename_tensor_name, output_graph_path,
                                      clear_devices, None)
            shutil.rmtree(tmp_dir)
    except:
        print("Unexpected error:", sys.exc_info()[0])
        raise
示例#20
0
def main():
    k = 0
    input_node_name = 'input'
    output_node_name = 'output'
    dropout_name = 'dropout'
    phase_name = 'phase_train'
    rates = [0.0000001, 0.00000005, 0.000000025, 0.00000001, 0.000000005]
    dropouts = [0.65, 0.7, 0,75]
    for r in rates:
        for d in dropouts:
            total_accuracy = 0
            # Запуска графа и сессии в Tensorflow
            tf.reset_default_graph()
            with tf.Session() as sess:
                # Входные данные
                x = tf.placeholder(tf.float32, shape=[None, HEIGHT, WIDTH], name=input_node_name)
                # Метки
                y = tf.placeholder(tf.float32, shape=[None, NUM_LABELS], name = 'label')
                # Шанс срабатывания dropout
                dropout = tf.placeholder(tf.float32, name=dropout_name)
                # 
                phase_train = tf.placeholder(tf.bool, name=phase_name)
                # Модель нейронной сети
                logits = get_model(x, dropout, output_node_name, phase_train)
                '''logits = get_model(x, dropout, output_node_name)'''
                # Loss-функция
                with tf.name_scope('loss'):
                    # ОТЛИЧАЕТСЯ ОТ ВИДЕО!!!
                    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
                    tf.summary.scalar('loss', loss)
                # Оптимайзер Loss-функции
                with tf.name_scope('train'):
                    train_step = tf.train.AdamOptimizer(r).minimize(loss)
                # Функция вычисления точности
                with tf.name_scope('accuracy'):
                    predicided = tf.argmax(logits, 1)
                    truth = tf.argmax(y, 1)
                    correct_prediction = tf.equal(predicided, truth)
                    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
                    confusion_matrix = tf.confusion_matrix(truth, predicided, num_classes=NUM_LABELS)
                    tf.summary.scalar('accuracy', accuracy)
                # Настройка тензорборда
                summ = tf.summary.merge_all()
                sess.run(tf.global_variables_initializer())
                tf.train.write_graph(sess.graph_def, 'out', MODEL_NAME + str(k) + '.pbtxt', True)
                writer = tf.summary.FileWriter(LOGDIR)
                writer.add_graph(sess.graph)
                test_writer = tf.summary.FileWriter(TEST_LOGDIR)
                saver = tf.train.Saver()
                # Обучение модели
                print('Starting training\n')
                batch = get_batch(BATCH_SIZE, PATH_TRAIN)
                for i in range(1, ITERATIONS + 1):
                    print ("dreamNet:", k, "| iteration:", i)
                    X, Y = next(batch)
                    '''if i % EVAL_EVERY == 0:
                        # ОТЛИЧАЕТСЯ ОТ ВИДЕО!!!
                        #[train_accuracy, train_loss, s] = sess.run([accuracy, loss, summ], feed_dict={x: X, y: Y, dropout: 1.0, phase_train: False})
                        [train_accuracy, train_loss, s] = sess.run([accuracy, loss, summ], feed_dict={x: X, y: Y, dropout: 1.0})
                        acc_and_loss = [i, train_loss, train_accuracy * 100]
                        epoch_acc = train_accuracy
                        writer.add_summary(s, i)
                        #print(acc_and_loss)
                    if i % (EVAL_EVERY * 20) == 0:
                        #train_confusion_matrix = sess.run([confusion_matrix], feed_dict={x: X, y: Y, dropout: 1.0, phase_train: False})
                        train_confusion_matrix = sess.run([confusion_matrix], feed_dict={x: X, y: Y, dropout: 1.0})
                        header = LABEL_TO_INDEX_MAP.keys()'''
                    '''sess.run(train_step, feed_dict={x: X, y: Y, dropout: d, phase_train: n})'''
                    sess.run(train_step, feed_dict={x: X, y: Y, dropout: d, phase_train: True})
                    '''[train_accuracy, train_loss] = sess.run([accuracy, loss], feed_dict={x: X, y: Y, dropout: 1.0, phase_train: False})
                    print("train:", train_accuracy, "loss:", train_loss)'''
                    if (i % 5 == 0):                    
                        # Тестирование модели на новых звуках
                        batch_test = get_batch(BATCH_SIZE, PATH_TEST)
                        total_accuracy_test = 0
                        for i in range(ITERATIONS_TEST):
                            X, Y = next(batch_test, PATH_TEST)
                            '''test_accuracy, s = sess.run([accuracy, summ], feed_dict={x: X, y: Y, dropout: 1.0, phase_train: False})'''
                            test_accuracy, s = sess.run([accuracy, summ], feed_dict={x: X, y: Y, dropout: 1.0, phase_train: False})
                            total_accuracy_test += (test_accuracy/ITERATIONS_TEST)
                            #test_writer.add_summary(s, i)
                        print("test:", total_accuracy_test)
                        # Тестирование модели на старых звуках
                        batch_train = get_batch(BATCH_SIZE, PATH2_TEST)
                        total_accuracy_train = 0
                        for i in range(ITERATIONS_TEST):
                            X, Y = next(batch_train, PATH2_TEST)
                            '''test_accuracy, s = sess.run([accuracy, summ], feed_dict={x: X, y: Y, dropout: 1.0, phase_train: False})'''
                            test_accuracy, s = sess.run([accuracy, summ], feed_dict={x: X, y: Y, dropout: 1.0, phase_train: False})
                            total_accuracy_train += (test_accuracy/ITERATIONS_TEST)
                            #test_writer.add_summary(s, i)
                        print("test:", total_accuracy_train)
                        # Сохранение модели
                        if total_accuracy_test > 0.9 and total_accuracy_train > 0.9:
                            saver.save(sess, 'out/' + MODEL_NAME + str(k) + '.chkp')
                            s = ''
                            for n in tf.get_default_graph().as_graph_def().node:
                                s += (n.name + ',')
                            s = s[:-1]
                            freeze_graph.freeze_graph('E:/Temp/AD_001/model/out', 'FC/' + output_node_name, MODEL_NAME + str(k))
                            input_graph_def = tf.GraphDef()
                            with tf.gfile.Open('E:/Temp/AD_001/model/' + MODEL_NAME + str(k) + '.pb', "rb") as f:
                                input_graph_def.ParseFromString(f.read())
                            output_graph_def = optimize_for_inference_lib.optimize_for_inference(input_graph_def, [input_node_name, dropout_name, phase_name], ['FC/' + output_node_name], tf.float32.as_datatype_enum)
                            with tf.gfile.FastGFile('E:/Temp/AD_001/model/opt_' + MODEL_NAME + str(k) + '.pb', "wb") as f:
                                f.write(output_graph_def.SerializeToString())
                            k += 1
    from tensorflow.core.protobuf import saver_pb2
    input_saver = ""
    input_binary = True
    checkpoint_path = tf.train.latest_checkpoint(train_dir)
    print('#######', checkpoint_path)
    if FLAGS.model_name.startswith('inception_v1'):
        output_node_names = "InceptionV1/Predictions/Reshape_1"
    elif FLAGS.model_name.startswith('mobilenet_v1'):
        output_node_names = "MobilenetV1/Predictions/Reshape_1"

    restore_op_name = "save/restore_all"
    filename_tensor_name = "save/Const:0"
    output_graph_name = "{}_{}_{}_frozen.pb".format(FLAGS.project_name,
                                                    FLAGS.dataset_name,
                                                    FLAGS.model_name)
    output_graph = os.path.join(train_dir, output_graph_name)
    clear_devices = True
    initializer_nodes = ""
    variable_names_whitelist = ""
    variable_names_blacklist = ""
    input_meta_graph = ""
    input_saved_model_dir = ""
    saved_model_tags = "serve"
    checkpoint_version = saver_pb2.SaverDef.V2
    freeze_graph(output_file, input_saver, input_binary, checkpoint_path,
                 output_node_names, restore_op_name, filename_tensor_name,
                 output_graph, clear_devices, initializer_nodes,
                 variable_names_whitelist, variable_names_blacklist,
                 input_meta_graph, input_saved_model_dir, saved_model_tags,
                 checkpoint_version)
示例#22
0
def save_graph(H, ckpt_file, output_graph_file):
    """
    Combines a checkpoint and a graph definition to create a self-contained output graph.
    """
    write_graph_to_tb = False
    tf.reset_default_graph()
    googlenet = googlenet_load.init(H)
    x_in = tf.placeholder(tf.float32, name='x_in')
    if H['arch']['use_lstm']:
        pred_boxes, pred_logits, pred_confidences = build_lstm_forward(H, tf.expand_dims(x_in, 0), googlenet, 'test', reuse=None)
    else:
        pred_boxes, pred_logits, pred_confidences = build_overfeat_forward(H, tf.expand_dims(x_in, 0), googlenet, 'test')
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.initialize_all_variables())
        saver.restore(sess, ckpt_file)
        # all_vars = tf.all_variables()
        # for v in all_vars:
        #     print 'var: ', v.name

        # write out the graph def to be used by freeze_graph and then removed
        temp_path = "/Users/brucks/Desktop/"
        temp_graph_name = "temp_unneeded.pb"
        temp_graph_pathname = os.path.join(temp_path, temp_graph_name)
        tf.train.write_graph(sess.graph_def, temp_path, temp_graph_name, as_text=False)

        # call freeze_graph with the graph def and the checkpoint to save a combined graph that can be read into c++
        input_saver_def_path = ""
        input_binary = True
        input_checkpoint_path = ckpt_file
        if H['arch']['use_lstm']:
            output_node_names = "x_in,decoder/box_ip0,decoder/conf_ip0,decoder/box_ip,decoder/conf_ip1," \
                                "decoder/box_ip2,decoder/conf_ip2,decoder/box_ip3,decoder/conf_ip3," \
                                "decoder/box_ip4,decoder/conf_ip4"
        else:
            output_node_names = "x_in,pred_conf,pred_boxes"

        restore_op_name = "save/restore_all"
        filename_tensor_name = "save/Const:0"
        clear_devices = True
        initializer_nodes = ""
        success = freeze_graph.freeze_graph(temp_graph_pathname, input_saver_def_path,
                                  input_binary, input_checkpoint_path,
                                  output_node_names, restore_op_name,
                                  filename_tensor_name, output_graph_file,
                                  clear_devices, initializer_nodes)

        if write_graph_to_tb:
            output_path = os.path.dirname(output_graph_file)
            writer = tf.train.SummaryWriter(logdir=output_path) #,flush_secs=10)
            # add the graph def to the summary so it can be visualized
            writer.add_graph(sess.graph)

    # Print out a status message including the variable names saved in the graph
    if success >= 0:
        print 'Output graph saved to: ', output_graph_file
        vars = output_node_names.split(",")
        print '\tInput variable name: ', vars[0]
        print '\tOutput variable names: %s, %s' % (vars[1], vars[2])

        # Now remove the temporary graph that was created
        if os.path.exists(temp_graph_pathname):
            os.remove(temp_graph_pathname)
    else:
        print 'Error: graph not saved'
示例#23
0
# Train
tf.initialize_all_variables().run()
for i in range(1000):
  batch_xs, batch_ys = mnist.train.next_batch(100)
  train_step.run({x: batch_xs, y_: batch_ys})

# Test trained model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(accuracy.eval({x: mnist.test.images, y_: mnist.test.labels}))

# Save graph
#do_save=tf.assign(saved_result, y)

#softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
print('Saving graph...')
saver = tf.train.Saver(tf.all_variables())
saver.save(sess, "saved_checkpoint")
tf.train.write_graph(sess.graph.as_graph_def(), "", "input_graph.pb")
freeze_graph.freeze_graph("input_graph.pb",
                    "",
                    False,
                    "saved_checkpoint",
                    "b,W",
                    "save/restore_all",
                    "save/Const:0",
                    "output_graph.pb",
                    False, "")
print('Graph saved')
示例#24
0

  input_graph_path = os.path.join("models/", input_graph_name)
  input_saver_def_path = ""
  input_binary = False
  input_checkpoint_path = os.path.join("models/", 'saved_checkpoint') + "-0"

  # Note that we this normally should be only "output_node"!!!
  output_node_names = "Softmax/Sparse_softmax,Softmax/costvalue,Softmax/accu,Softmax_params/softmax_w"
  #output_node_names = "Softmax/Sparse_softmax/Sparse_softmax"
  restore_op_name = "save/restore_all"
  filename_tensor_name = "save/Const:0"
  output_graph_path = os.path.join("models/", output_graph_name)
  clear_devices = False

  freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,input_binary, input_checkpoint_path,output_node_names, restore_op_name,filename_tensor_name, output_graph_path,clear_devices, "")


  tf.train.write_graph(session.graph.as_graph_def(), "models/", "test.pb", as_text=True)
    
"""Additional plots"""
print('The accuracy on the test data is %.3f, before training was %.3f' %(acc_test,acc_test_before))

xnum = np.arange(0,max_iterations,unit)
plt.plot(xnum,perf_collect[0],"-o",label='Train error')
plt.plot(xnum,perf_collect[1],"-o",label = 'Valid')
plt.plot(xnum,perf_collect[2],"-o",label = 'Valid accuracy')
plt.axis([0, max_iterations, 0, 1.3*(np.max(perf_collect))])
plt.xlabel("iteration", fontsize=20)
plt.ylabel("Error / Accuracy ", fontsize=20)
plt.legend()
示例#25
0
import os
import freeze_graph

# We save out the graph to disk, and then call the const conversion
# routine.
checkpoint_state_name = "checkpoint_state"
input_graph_name = "input_graph.pb"
output_graph_name = "output_graph.pb"

input_graph_path = os.path.join(FLAGS.model_dir, input_graph_name)
input_saver_def_path = ""
input_binary = False
input_checkpoint_path = os.path.join(FLAGS.checkpoint_dir, 'saved_checkpoint') + "-0"

# Note that we this normally should be only "output_node"!!!
output_node_names = "Dense2/output_node"
restore_op_name = "save/restore_all"
filename_tensor_name = "save/Const:0"
output_graph_path = os.path.join(FLAGS.model_dir, output_graph_name)
clear_devices = False

freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
                          input_binary, input_checkpoint_path,
                          output_node_names, restore_op_name,
                          filename_tensor_name, output_graph_path,
                          clear_devices)
示例#26
0
from freeze_graph import freeze_graph

input_graph = '../data/train.pb'
input_saver = ''
input_binary = True
input_checkpoint = '../data/net.ckpt'
output_node_names = 'h_hidden3,argmax'
restore_op_name = "save/restore_all"
filename_tensor_name = "save/Const:0"
output_graph = "../data/contrail_graph.pb"
clear_devices = True
initializer_nodes = ''

print "freezing..."
freeze_graph(input_graph, input_saver,
        input_binary, input_checkpoint,
        output_node_names, restore_op_name,
        filename_tensor_name, output_graph,
        clear_devices, "") 
    if ckpt:
        if ckpt < 0:
            checkpoint = tf.train.get_checkpoint_state(ckpt_directory)
            input_checkpoint = checkpoint.model_checkpoint_path
        else:
            input_checkpoint = ckpt_directory + style_name + '-{}'.format(ckpt)
        saver.restore(sess, input_checkpoint)
        print('Checkpoint {} restored.'.format(ckpt))

    for epoch in range(1, epochs + 1):
        imgs = np.zeros((batchsize, 224, 224, 3), dtype=np.float32)
        for i in range(iterations):
            for j in range(batchsize):
                p = imagepaths[i * batchsize + j]
                imgs[j] = np.asarray(
                    Image.open(p).convert('RGB').resize((224, 224)),
                    np.float32)
            feed_dict = {inputs: imgs, target: styles_np}
            loss_, _ = sess.run([
                loss,
                train_step,
            ], feed_dict=feed_dict)
            print('[epoch {}/{}] batch {}/{}... loss: {}'.format(
                epoch, epochs, i + 1, iterations, loss_[0]))
        saver.save(sess, ckpt_directory + style_name, global_step=epoch)

if save_pb:
    if not os.path.exists('./pbs'):
        os.makedirs('./pbs')
    freeze_graph(ckpt_directory, './pbs/{}.pb'.format(style_name), 'output')
示例#28
0
    trainfiles = trainfiles[shuffle]
    trainresult = trainresult[shuffle]


'''
Final accuracy test
'''

testAcc = sess.run(accuracy, feed_dict={\
                    x: testfiles, y__: testresult, keep_prob: 1.0})
print("test accuracy %g"%testAcc)


'''
Graph saving code
'''

saver = tf.train.Saver()

checkpoint_dir = "model"
model_dir = "model"

# Save variable checkpoints
checkpoint_prefix = os.path.join(checkpoint_dir, "graph")
saver.save(sess, checkpoint_prefix)

# Do freeze
freeze_graph.freeze_graph(model_dir)