Esempio n. 1
0
 def __init__(self,
              X,
              KEEP,
              view,
              name,
              dir_path,
              node='logits:0',
              softmax=True):
     self.name = name
     self.view = view
     print dir_path
     paths = glob(os.path.join(dir_path, '*.meta'))
     print paths
     assert len(paths) == 1
     path = os.path.splitext(paths[0])[0]
     mg = meta_graph.read_meta_graph_file(path + '.meta')
     if KEEP is None:
         fts, = tf.import_graph_def(mg.graph_def,
                                    name=name,
                                    input_map={'images:0': X},
                                    return_elements=[node])
     else:
         fts, = tf.import_graph_def(mg.graph_def,
                                    name=name,
                                    input_map={
                                        'images:0': X,
                                        'keep:0': KEEP
                                    },
                                    return_elements=[node])
     if softmax:
         fts = logits2prob(fts)
     self.fts = fts
     self.saver = tf.train.Saver(saver_def=mg.saver_def, name=name)
     self.loader = lambda sess: self.saver.restore(sess, path)
     pass
Esempio n. 2
0
 def initialize_variables(self, save_file=None):
     self.session.run(tf.global_variables_initializer())
     if save_file is not None:
         try:
             self.saver.restore(self.session, save_file)
         except:
             # some wizardry here... basically, only restore variables
             # that are in the save file; otherwise, initialize them normally.
             from tensorflow.python.framework import meta_graph
             meta_graph_def = meta_graph.read_meta_graph_file(save_file +
                                                              '.meta')
             stored_var_names = set([
                 n.name for n in meta_graph_def.graph_def.node
                 if n.op == 'VariableV2'
             ])
             print(stored_var_names)
             var_list = [
                 v for v in tf.global_variables()
                 if v.op.name in stored_var_names
             ]
             # initialize all of the variables
             self.session.run(tf.global_variables_initializer())
             # then overwrite the ones we have in the save file
             # by using a throwaway saver, saved models are automatically
             # "upgraded" to the latest graph definition.
             throwaway_saver = tf.train.Saver(var_list=var_list)
             throwaway_saver.restore(self.session, save_file)
Esempio n. 3
0
def convert(load_file, dest_file):
    from tensorflow.python.framework import meta_graph
    features, labels = dual_net.get_inference_input()
    dual_net.model_fn(features, labels, tf.estimator.ModeKeys.PREDICT)
    sess = tf.Session()

    # retrieve the global step as a python value
    ckpt = tf.train.load_checkpoint(load_file)
    global_step_value = ckpt.get_tensor('global_step')

    # restore all saved weights, except global_step
    meta_graph_def = meta_graph.read_meta_graph_file(load_file + '.meta')
    stored_var_names = set([
        n.name for n in meta_graph_def.graph_def.node if n.op == 'VariableV2'
    ])
    stored_var_names.remove('global_step')
    var_list = [
        v for v in tf.global_variables() if v.op.name in stored_var_names
    ]
    tf.train.Saver(var_list=var_list).restore(sess, load_file)

    # manually set the global step
    global_step_tensor = tf.train.get_or_create_global_step()
    assign_op = tf.assign(global_step_tensor, global_step_value)
    sess.run(assign_op)

    # export a new savedmodel that has the right global step type
    tf.train.Saver().save(sess, dest_file)
    sess.close()
    tf.reset_default_graph()
Esempio n. 4
0
  def test_save_variable_devices(self, save_devices, meta_graph_only):
    context._reset_context()
    cpus = context.context().list_physical_devices("CPU")
    if len(cpus) == 1:
      context.context().set_logical_device_configuration(
          cpus[0], [
              context.LogicalDeviceConfiguration(),
              context.LogicalDeviceConfiguration()
          ])
    context.ensure_initialized()

    root = tracking.AutoTrackable()
    with ops.device("CPU:0"):
      root.v0 = variables.Variable(1., name="v0")
    with ops.device("CPU:1"):
      root.v1 = variables.Variable(1., name="v1")

    options = save_options.SaveOptions(
        experimental_variable_policy=save_devices)
    file_name = os.path.join(self.get_temp_dir(), "saved_model")
    if meta_graph_only:
      save.export_meta_graph(obj=root, filename=file_name, options=options)
    else:
      save.save(obj=root, export_dir=file_name, options=options)

    meta = None
    if meta_graph_only:
      meta = meta_graph.read_meta_graph_file(file_name)
    else:
      meta = loader_impl.parse_saved_model(file_name).meta_graphs[0]

    # Check devices in meta graph nodes.
    graph_def = meta.graph_def
    v0 = next((n for n in graph_def.node if n.name == "v0"), None)
    v1 = next((n for n in graph_def.node if n.name == "v1"), None)
    self.assertIsNotNone(v0)
    self.assertIsNotNone(v1)
    if save_devices == save_options.VariablePolicy.SAVE_VARIABLE_DEVICES:
      self.assertIn("CPU:0", v0.device)
      self.assertIn("CPU:1", v1.device)
    else:
      self.assertEmpty(v0.device)
      self.assertEmpty(v1.device)

    # Check devices in object graph nodes.
    object_graph_def = meta.object_graph_def
    v0 = next((n.variable
               for n in object_graph_def.nodes
               if n.HasField("variable") and n.variable.name == "v0"), None)
    v1 = next((n.variable
               for n in object_graph_def.nodes
               if n.HasField("variable") and n.variable.name == "v1"), None)
    self.assertIsNotNone(v0)
    self.assertIsNotNone(v1)
    if save_devices == save_options.VariablePolicy.SAVE_VARIABLE_DEVICES:
      self.assertIn("CPU:0", v0.device)
      self.assertIn("CPU:1", v1.device)
    else:
      self.assertEmpty(v0.device)
      self.assertEmpty(v1.device)
Esempio n. 5
0
def main (_):
    logging.basicConfig()

    X = tf.placeholder(tf.float32, shape=(None, None, None, 3))
    mg = meta_graph.read_meta_graph_file(FLAGS.model + '.meta')
    Y, = tf.import_graph_def(mg.graph_def, name='enhance',
                        input_map={'lo_res:0':X},
                        return_elements=['hi_res:0'])
    saver = tf.train.Saver(saver_def=mg.saver_def, name='enhance')

    init = tf.global_variables_initializer()

    sess_config = tf.ConfigProto()

    with tf.Session(config=sess_config) as sess:
        sess.run(init)
        saver.restore(sess, FLAGS.model)
        cap = cv2.VideoCapture(FLAGS.input)

        fourcc = cv2.cv.CV_FOURCC(*'XVID')
        out = cv2.VideoWriter(FLAGS.output, fourcc, 25, (640*2, 360))
        C = 0
        while cap.isOpened():
            print('%f' % (C/25,))
            ret, frame = cap.read()
            orig = frame.astype(np.float32)
            ks = FLAGS.blur * 2 + 1
            frame = cv2.GaussianBlur(orig, (ks, ks), FLAGS.blur)
            frame = np.expand_dims(frame, 0)
            frame, = sess.run([Y], feed_dict={X: frame})
            both = np.concatenate((orig, frame[0]), axis=1)
            out.write(both.astype(np.uint8))
            C += 1
    pass
Esempio n. 6
0
  def test_expand_distributed_variables(self, expand_strategy, policy):
    # 1. Create a context with both CPU:0 and CPU:1.
    context._reset_context()
    cpus = context.context().list_physical_devices("CPU")
    if len(cpus) == 1:
      context.context().set_logical_device_configuration(
          cpus[0], [
              context.LogicalDeviceConfiguration(),
              context.LogicalDeviceConfiguration()
          ])
    context.ensure_initialized()

    # 2. Create and save a model under a mirrored strategy.
    file_name = os.path.join(self.get_temp_dir(), "saved_model.pb")
    strategy = mirrored_strategy.MirroredStrategy(["CPU:0", "CPU:1"])
    strategy.extended._use_var_policy = policy
    with strategy.scope():
      root = tracking.AutoTrackable()
      root.v = variables.Variable([1., 1.], name="v")

      @def_function.function(input_signature=[])
      def f():
        root.v.assign([2., 2.])

      root.f = f

      save.export_meta_graph(
          obj=root,
          filename=file_name,
          options=save_options.SaveOptions(
              experimental_variable_policy=expand_strategy))

    # 3. Read the output file and test behavior.
    meta_graph_def = meta_graph.read_meta_graph_file(file_name)
    object_graph = meta_graph_def.object_graph_def
    graph_def = meta_graph_def.graph_def
    v = next((n.variable
              for n in object_graph.nodes
              if n.HasField("variable") and n.variable.name == "v"), None)
    saved_function = next((f for f in graph_def.library.function
                           if "inference_f_" in f.signature.name), None)
    self.assertIsNotNone(saved_function)
    if (expand_strategy ==
        save_options.VariablePolicy.EXPAND_DISTRIBUTED_VARIABLES):
      # experimental_save_variable_devices should have been automatically set.
      self.assertIn("CPU:0", v.device)
      components = v.experimental_distributed_variable_components
      self.assertLen(components, 2)
      v0 = next((x for x in components if x.name == "v"), None)
      v1 = next((x for x in components if x.name == "v/replica_1"), None)
      self.assertIsNotNone(v0)
      self.assertIsNotNone(v1)
      self.assertIn("CPU:0", v0.device)
      self.assertIn("CPU:1", v1.device)
      self.assertLen(saved_function.signature.input_arg, 2)
    else:
      self.assertEmpty(v.device)
      self.assertEmpty(v.experimental_distributed_variable_components)
      self.assertLen(saved_function.signature.input_arg, 1)
Esempio n. 7
0
def main():
    meta_graph.read_meta_graph_file("/tmp/tf_training/ckpt/rjmodel.ckpt.meta")
    g = tf.MetaGraphDef()
    g.ParseFromString(
        open("/tmp/tf_training/ckpt/rjmodel.ckpt.meta", "rb").read())
    #print("GraphDef from meta_graph_file:", g.graph_def)
    g = tf.GraphDef()
    g.ParseFromString(open("/tmp/stylize_quantized.pb", "rb").read())
    print("GraphDef: ", g)
    #[n for n in g.node if n.name.find("input") != -1] # same for output or any other node you want to make sure is ok

    saved_model = saved_model_pb2.SavedModel()
    saved_model.ParseFromString(
        open("/tmp/SavedModel/saved_model.pb", "rb").read())
    print("saved_model parsed", saved_model.saved_model_schema_version,
          len(saved_model.meta_graphs))
    print("GraphDef from SavedModel file:",
          saved_model.meta_graphs[0].graph_def)
Esempio n. 8
0
 def __init__(self, X, path, name):
     mg = meta_graph.read_meta_graph_file(path + '.meta')
     is_training = tf.constant(False)
     self.logits, = \
             tf.import_graph_def(mg.graph_def, name=name,
                 input_map={'images:0': X, 'is_training:0': is_training},
                 return_elements=['logits:0'])
     self.saver = tf.train.Saver(saver_def=mg.saver_def, name=name)
     self.loader = lambda sess: self.saver.restore(sess, path)
     pass
Esempio n. 9
0
def _load_saved_model_from_session_bundle_path(export_dir, target, config):
    """Load legacy TF Exporter/SessionBundle checkpoint.

  Args:
    export_dir: the directory that contains files exported by exporter.
    target: The execution engine to connect to. See target in
      tf.compat.v1.Session()
    config: A ConfigProto proto with configuration options. See config in
      tf.compat.v1.Session()

  Returns:
    session: a tensorflow session created from the variable files.
    metagraph_def: The `MetaGraphDef` protocol buffer loaded in the provided
    session. This can be used to further extract signature-defs,
    collection-defs, etc.
    This model is up-converted to SavedModel format. Specifically, metagraph_def
    SignatureDef field is populated with Signatures converted from legacy
    signatures contained within CollectionDef

  Raises:
    RuntimeError: If metagraph already contains signature_def and cannot be
    up-converted.
  """

    meta_graph_filename = os.path.join(
        export_dir, legacy_constants.META_GRAPH_DEF_FILENAME)

    metagraph_def = meta_graph.read_meta_graph_file(meta_graph_filename)
    if metagraph_def.signature_def:
        raise RuntimeError("Legacy graph contains signature def, unable to "
                           "up-convert.")

    # Add SignatureDef to metagraph.
    default_signature_def, named_signature_def = (
        _convert_signatures_to_signature_defs(metagraph_def))
    if default_signature_def:
        metagraph_def.signature_def[
            signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY].CopyFrom(
                default_signature_def)
    if named_signature_def:
        signature_def_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
        if default_signature_def:
            signature_def_key += "_from_named"
        metagraph_def.signature_def[signature_def_key].CopyFrom(
            named_signature_def)

    # We cannot just output session we loaded with older metagraph_def and
    # up-converted metagraph definition because Session has an internal object of
    # type Graph which is populated from meta_graph_def. If we do not create
    # session with our new meta_graph_def, then Graph will be out of sync with
    # meta_graph_def.
    sess, metagraph_def = session_bundle.load_session_bundle_from_path(
        export_dir, target, config, meta_graph_def=metagraph_def)
    return sess, metagraph_def
  def testConvertSignaturesToSignatureDefs(self):
    base_path = tf.test.test_src_dir_path(SESSION_BUNDLE_PATH)
    meta_graph_filename = os.path.join(base_path,
                                       constants.META_GRAPH_DEF_FILENAME)
    metagraph_def = meta_graph.read_meta_graph_file(meta_graph_filename)
    default_signature_def, named_signature_def = (
        bundle_shim._convert_signatures_to_signature_defs(metagraph_def))
    self.assertEqual(default_signature_def.method_name,
                      signature_constants.REGRESS_METHOD_NAME)
    self.assertEqual(len(default_signature_def.inputs), 1)
    self.assertEqual(len(default_signature_def.outputs), 1)
    self.assertProtoEquals(
        default_signature_def.inputs[signature_constants.REGRESS_INPUTS],
        meta_graph_pb2.TensorInfo(name="tf_example:0"))
    self.assertProtoEquals(
        default_signature_def.outputs[signature_constants.REGRESS_OUTPUTS],
        meta_graph_pb2.TensorInfo(name="Identity:0"))
    self.assertEqual(named_signature_def.method_name,
                      signature_constants.PREDICT_METHOD_NAME)
    self.assertEqual(len(named_signature_def.inputs), 1)
    self.assertEqual(len(named_signature_def.outputs), 1)
    self.assertProtoEquals(
        named_signature_def.inputs["x"], meta_graph_pb2.TensorInfo(name="x:0"))
    self.assertProtoEquals(
        named_signature_def.outputs["y"], meta_graph_pb2.TensorInfo(name="y:0"))

    # Now try default signature only
    collection_def = metagraph_def.collection_def
    signatures_proto = manifest_pb2.Signatures()
    signatures = collection_def[constants.SIGNATURES_KEY].any_list.value[0]
    signatures.Unpack(signatures_proto)
    named_only_signatures_proto = manifest_pb2.Signatures()
    named_only_signatures_proto.CopyFrom(signatures_proto)

    default_only_signatures_proto = manifest_pb2.Signatures()
    default_only_signatures_proto.CopyFrom(signatures_proto)
    default_only_signatures_proto.named_signatures.clear()
    default_only_signatures_proto.ClearField("named_signatures")
    metagraph_def.collection_def[constants.SIGNATURES_KEY].any_list.value[
        0].Pack(default_only_signatures_proto)
    default_signature_def, named_signature_def = (
        bundle_shim._convert_signatures_to_signature_defs(metagraph_def))
    self.assertEqual(default_signature_def.method_name,
                      signature_constants.REGRESS_METHOD_NAME)
    self.assertEqual(named_signature_def, None)

    named_only_signatures_proto.ClearField("default_signature")
    metagraph_def.collection_def[constants.SIGNATURES_KEY].any_list.value[
        0].Pack(named_only_signatures_proto)
    default_signature_def, named_signature_def = (
        bundle_shim._convert_signatures_to_signature_defs(metagraph_def))
    self.assertEqual(named_signature_def.method_name,
                      signature_constants.PREDICT_METHOD_NAME)
    self.assertEqual(default_signature_def, None)
Esempio n. 11
0
    def testConvertSignaturesToSignatureDefs(self):
        base_path = tf.test.test_src_dir_path(SESSION_BUNDLE_PATH)
        meta_graph_filename = os.path.join(base_path,
                                           constants.META_GRAPH_DEF_FILENAME)
        metagraph_def = meta_graph.read_meta_graph_file(meta_graph_filename)
        default_signature_def, named_signature_def = (
            bundle_shim._convert_signatures_to_signature_defs(metagraph_def))
        self.assertEqual(default_signature_def.method_name,
                         signature_constants.REGRESS_METHOD_NAME)
        self.assertEqual(len(default_signature_def.inputs), 1)
        self.assertEqual(len(default_signature_def.outputs), 1)
        self.assertProtoEquals(
            default_signature_def.inputs[signature_constants.REGRESS_INPUTS],
            meta_graph_pb2.TensorInfo(name="tf_example:0"))
        self.assertProtoEquals(
            default_signature_def.outputs[signature_constants.REGRESS_OUTPUTS],
            meta_graph_pb2.TensorInfo(name="Identity:0"))
        self.assertEqual(named_signature_def.method_name,
                         signature_constants.PREDICT_METHOD_NAME)
        self.assertEqual(len(named_signature_def.inputs), 1)
        self.assertEqual(len(named_signature_def.outputs), 1)
        self.assertProtoEquals(named_signature_def.inputs["x"],
                               meta_graph_pb2.TensorInfo(name="x:0"))
        self.assertProtoEquals(named_signature_def.outputs["y"],
                               meta_graph_pb2.TensorInfo(name="y:0"))

        # Now try default signature only
        collection_def = metagraph_def.collection_def
        signatures_proto = manifest_pb2.Signatures()
        signatures = collection_def[constants.SIGNATURES_KEY].any_list.value[0]
        signatures.Unpack(signatures_proto)
        named_only_signatures_proto = manifest_pb2.Signatures()
        named_only_signatures_proto.CopyFrom(signatures_proto)

        default_only_signatures_proto = manifest_pb2.Signatures()
        default_only_signatures_proto.CopyFrom(signatures_proto)
        default_only_signatures_proto.named_signatures.clear()
        default_only_signatures_proto.ClearField("named_signatures")
        metagraph_def.collection_def[constants.SIGNATURES_KEY].any_list.value[
            0].Pack(default_only_signatures_proto)
        default_signature_def, named_signature_def = (
            bundle_shim._convert_signatures_to_signature_defs(metagraph_def))
        self.assertEqual(default_signature_def.method_name,
                         signature_constants.REGRESS_METHOD_NAME)
        self.assertEqual(named_signature_def, None)

        named_only_signatures_proto.ClearField("default_signature")
        metagraph_def.collection_def[constants.SIGNATURES_KEY].any_list.value[
            0].Pack(named_only_signatures_proto)
        default_signature_def, named_signature_def = (
            bundle_shim._convert_signatures_to_signature_defs(metagraph_def))
        self.assertEqual(named_signature_def.method_name,
                         signature_constants.PREDICT_METHOD_NAME)
        self.assertEqual(default_signature_def, None)
 def __init__(self, path, image_size, name='xxx'):
     images = tf.placeholder(tf.uint8, shape=(None, image_size, image_size, 3), name="images")
     batch = (tf.cast(images, tf.float32) - 127.5) / 128.0
     self.images = images
     is_training = tf.constant(False)
     mg = meta_graph.read_meta_graph_file(path + '.meta')
     self.embeddings, = \
         tf.import_graph_def(mg.graph_def, name=name,
                             input_map={'image_batch:0': batch, 'phase_train': is_training},
                             return_elements=['embeddings:0'])
     self.saver = tf.train.Saver(saver_def=mg.saver_def, name=name)
     self.loader = lambda sess: self.saver.restore(sess, path)
Esempio n. 13
0
def _load_saved_model_from_session_bundle_path(export_dir, target, config):
  """Load legacy TF Exporter/SessionBundle checkpoint.

  Args:
    export_dir: the directory that contains files exported by exporter.
    target: The execution engine to connect to. See target in
      tf.compat.v1.Session()
    config: A ConfigProto proto with configuration options. See config in
      tf.compat.v1.Session()

  Returns:
    session: a tensorflow session created from the variable files.
    metagraph_def: The `MetaGraphDef` protocol buffer loaded in the provided
    session. This can be used to further extract signature-defs,
    collection-defs, etc.
    This model is up-converted to SavedModel format. Specifically, metagraph_def
    SignatureDef field is populated with Signatures converted from legacy
    signatures contained within CollectionDef

  Raises:
    RuntimeError: If metagraph already contains signature_def and cannot be
    up-converted.
  """

  meta_graph_filename = os.path.join(export_dir,
                                     legacy_constants.META_GRAPH_DEF_FILENAME)

  metagraph_def = meta_graph.read_meta_graph_file(meta_graph_filename)
  if metagraph_def.signature_def:
    raise RuntimeError("Legacy graph contains signature def, unable to "
                       "up-convert.")

  # Add SignatureDef to metagraph.
  default_signature_def, named_signature_def = (
      _convert_signatures_to_signature_defs(metagraph_def))
  if default_signature_def:
    metagraph_def.signature_def[
        signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY].CopyFrom(
            default_signature_def)
  if named_signature_def:
    signature_def_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
    if default_signature_def:
      signature_def_key += "_from_named"
    metagraph_def.signature_def[signature_def_key].CopyFrom(named_signature_def)

  # We cannot just output session we loaded with older metagraph_def and
  # up-converted metagraph definition because Session has an internal object of
  # type Graph which is populated from meta_graph_def. If we do not create
  # session with our new meta_graph_def, then Graph will be out of sync with
  # meta_graph_def.
  sess, metagraph_def = session_bundle.load_session_bundle_from_path(
      export_dir, target, config, meta_graph_def=metagraph_def)
  return sess, metagraph_def
Esempio n. 14
0
 def __init__ (self, X, anchor_th, nms_max, nms_th, is_training, path, name):
     mg = meta_graph.read_meta_graph_file(path + '.meta')
     self.predictions = tf.import_graph_def(mg.graph_def, name=name,
                 input_map={'images:0': X,
                            'anchor_th:0': anchor_th,
                            'nms_max:0': nms_max,
                            'nms_th:0': nms_th,
                            'is_training:0': is_training,
                            },
                 return_elements=['rpn_probs:0', 'rpn_shapes:0', 'rpn_index:0'])
     self.saver = tf.train.Saver(saver_def=mg.saver_def, name=name)
     self.loader = lambda sess: self.saver.restore(sess, path)
     pass
Esempio n. 15
0
def _parse_input_graph_proto(input_graph, input_binary):
    """Parser input tensorflow graph into GraphDef proto."""
    if not gfile.Exists(input_graph):
        print("Input graph file '" + input_graph + "' does not exist!")
        return -1
    input_graph_def = meta_graph.read_meta_graph_file(input_graph).graph_def
    mode = "rb" if input_binary else "r"
    with gfile.FastGFile(input_graph, mode) as f:
        if input_binary:
            input_graph_def.ParseFromString(f.read())
        else:
            text_format.Merge(f.read(), input_graph_def)
    return input_graph_def
Esempio n. 16
0
 def __init__(self, X, is_training, path, name):
     mg = meta_graph.read_meta_graph_file(path + '.meta')
     self.prob, self.offsets = tf.import_graph_def(
         mg.graph_def,
         name=name,
         input_map={
             'images:0': X,
             'is_training:0': is_training
         },
         return_elements=['prob:0', 'offsets:0'])
     self.saver = tf.train.Saver(saver_def=mg.saver_def, name=name)
     self.loader = lambda sess: self.saver.restore(sess, path)
     pass
Esempio n. 17
0
def main (_):
    assert FLAGS.db and os.path.exists(FLAGS.db)
    assert FLAGS.model and os.path.exists(FLAGS.model + '.meta')

    L = tf.placeholder(tf.float32, shape=(None, None, None, 1))

    mg = meta_graph.read_meta_graph_file(FLAGS.model + '.meta')
    logits, = tf.import_graph_def(mg.graph_def, name='colorize',
                        #input_map={'L:0':L},
                        input_map={'fifo_queue_Dequeue:0':L},
                        return_elements=['logits:0'])
    prob = tf.nn.softmax(logits)
    saver = tf.train.Saver(saver_def=mg.saver_def, name='colorize')

    picpac_config = dict(seed=2016,
                cache=False,
                max_size=200,
                min_size=192,
                crop_width=192,
                crop_height=192,
                shuffle=True,
                reshuffle=True,
                batch=1,
                round_div=FLAGS.stride,
                channels=3,
                stratify=False,
                channel_first=False # this is tensorflow specific
                                    # Caffe's dimension order is different.
                )

    stream = picpac.ImageStream(FLAGS.db, perturb=False, loop=False, **picpac_config)

    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        saver.restore(sess, FLAGS.model)
        gallery = Gallery(FLAGS.output, cols=2, header=['groundtruth', 'prediction'])
        c = 0
        for images, _, _ in stream:
            if FLAGS.max and (c >= FLAGS.max):
                break
            l, ab, w = _pic2pic.encode_lab(images.copy(), FLAGS.downsize)
            ab_p, = sess.run([prob], feed_dict={L: l})
            y_p = decode_lab(l, ab_p, T=FLAGS.T)
            cv2.imwrite(gallery.next(), images[0])
            cv2.imwrite(gallery.next(), y_p[0])
            c += 1
            print('%d/%d' % (c, FLAGS.max))
            pass
        gallery.flush()
        pass
    pass
Esempio n. 18
0
 def __init__ (self, X, is_training, path, name):
     mg = meta_graph.read_meta_graph_file(path + '.meta')
     self.logits, = tf.import_graph_def(mg.graph_def, name=name,
                 input_map={'images:0': X, 'is_training:0': is_training},
                 return_elements=['logits:0'])
     if len(self.logits.get_shape()) == 4:
         # FCN
         self.is_fcn = True
         self.prob = tf.squeeze(tf.slice(tf.nn.softmax(self.logits), [0,0,0,1], [-1,-1,-1,1]), 3)
     else:
         # classification
         self.is_fcn = False
         self.prob = tf.nn.softmax(self.logits)
     self.saver = tf.train.Saver(saver_def=mg.saver_def, name=name)
     self.loader = lambda sess: self.saver.restore(sess, path)
     pass
Esempio n. 19
0
    def test_export_meta_graph(self):
        root = tracking.AutoTrackable()
        root.variable = resource_variable_ops.UninitializedVariable(
            name="some_variable", dtype=dtypes.float32)

        @def_function.function(input_signature=[tensor_spec.TensorSpec(None)])
        def multiply_var(x):
            return root.variable * x

        @def_function.function(input_signature=[tensor_spec.TensorSpec([])])
        def update(y):
            root.variable.assign_add(y)
            # TODO(b/150393409): All functions exported as signatures must have at
            # least one output.
            return 0

        @def_function.function(input_signature=[])
        def initialize():
            root.variable.assign(1.0)
            # TODO(b/150393409): All functions exported as signatures must have at
            # least one output.
            return 0

        save_path = os.path.join(self.get_temp_dir(), "meta_graph.pb")
        save.export_meta_graph(root,
                               save_path,
                               signatures={
                                   "multiply_var": multiply_var,
                                   "initialize": initialize,
                                   "update": update
                               })

        with ops.Graph().as_default(), session_lib.Session() as session:
            saver.import_meta_graph(save_path)
            meta_graph_def = meta_graph.read_meta_graph_file(save_path)

            # Initialize variable to 1
            _run_signature(session, meta_graph_def, {}, "initialize")
            out = _run_signature(session, meta_graph_def, {"x": 3},
                                 "multiply_var")
            self.assertAllEqual(out, {"output_0": 3})

            # Adds 2 to the variable. Variable is now 3
            _run_signature(session, meta_graph_def, {"y": 2}, "update")
            out = _run_signature(session, meta_graph_def, {"x": 4},
                                 "multiply_var")
            self.assertAllEqual(out, {"output_0": 12})
Esempio n. 20
0
  def test_expand_distributed_variables(self, expand_strategy):
    context._reset_context()
    cpus = context.context().list_physical_devices("CPU")
    if len(cpus) == 1:
      context.context().set_logical_device_configuration(
          cpus[0], [
              context.LogicalDeviceConfiguration(),
              context.LogicalDeviceConfiguration()
          ])
    context.ensure_initialized()

    file_name = os.path.join(self.get_temp_dir(), "saved_model.pb")
    with mirrored_strategy.MirroredStrategy(["CPU:0", "CPU:1"]).scope():
      root = tracking.AutoTrackable()
      root.v = variables.Variable([1., 1.], name="v")

      @def_function.function(input_signature=[])
      def f():
        root.v.assign([2., 2.])

      root.f = f

      save.export_meta_graph(
          obj=root,
          filename=file_name,
          options=save_options.SaveOptions(
              experimental_variable_policy=expand_strategy))
    graph_def = meta_graph.read_meta_graph_file(file_name).graph_def
    v0 = next((n for n in graph_def.node if n.name == "v"), None)
    v1 = next((n for n in graph_def.node if n.name == "v/replica_1"), None)
    self.assertIsNotNone(v0)
    saved_function = next((f for f in graph_def.library.function
                           if "inference_f_" in f.signature.name), None)
    self.assertIsNotNone(saved_function)
    if (expand_strategy ==
        save_options.VariablePolicy.EXPAND_DISTRIBUTED_VARIABLES):
      self.assertIsNotNone(v1)
      # experimental_save_variable_devices should have been automatically set.
      self.assertIn("CPU:0", v0.device)
      self.assertIn("CPU:1", v1.device)
      self.assertLen(saved_function.signature.input_arg, 2)
    else:
      self.assertIsNone(v1)
      self.assertEmpty(v0.device)
      # TODO(b/159752793): There should be only one input here.
      self.assertLen(saved_function.signature.input_arg, 2)
Esempio n. 21
0
    def __init__(self, X, path, name, node='logits:0', softmax=True):
        """applying tensorflow image model.

        path -- path to model
        name -- output tensor name
        prob -- convert output (softmax) to probability
        """
        mg = meta_graph.read_meta_graph_file(path + '.meta')
        output, = tf.import_graph_def(mg.graph_def,
                                      name=name,
                                      input_map={'images:0': X},
                                      return_elements=[node])
        if softmax:
            output = tf.nn.softmax(output)
        self.output = output
        self.saver = tf.train.Saver(saver_def=mg.saver_def, name=name)
        self.load = lambda sess: self.saver.restore(sess, path)
        pass
Esempio n. 22
0
 def initialize_variables(self, save_file=None):
     self.session.run(tf.global_variables_initializer())
     if save_file is not None:
         try:
             self.saver.restore(self.session, save_file)
         except:
             # some wizardry here... basically, only restore variables
             # that are in the save file; otherwise, initialize them normally.
             from tensorflow.python.framework import meta_graph
             meta_graph_def = meta_graph.read_meta_graph_file(save_file + '.meta')
             stored_var_names = set([n.name
                 for n in meta_graph_def.graph_def.node
                 if n.op == 'VariableV2'])
             print(stored_var_names)
             var_list = [v for v in tf.global_variables()
                 if v.op.name in stored_var_names]
             # initialize all of the variables
             self.session.run(tf.global_variables_initializer())
             # then overwrite the ones we have in the save file
             # by using a throwaway saver, saved models are automatically
             # "upgraded" to the latest graph definition.
             throwaway_saver = tf.train.Saver(var_list=var_list)
             throwaway_saver.restore(self.session, save_file)
Esempio n. 23
0
def swa():
    path_base = fsdb.models_dir()
    model_names = [
        "000393-lincoln",
        "000390-indus",
        "000404-hannibal",
        "000447-hawke",
        "000426-grief",
        "000431-lion",
        "000428-invincible",
        "000303-olympus",
        "000291-superb",
        "000454-victorious",
    ]
    model_names = model_names[:FLAGS.count]

    model_paths = [os.path.join(path_base, m) for m in model_names]

    # construct the graph
    features, labels = dual_net.get_inference_input()
    dual_net.model_fn(features, labels, tf.estimator.ModeKeys.PREDICT)

    # restore all saved weights
    meta_graph_def = meta_graph.read_meta_graph_file(model_paths[0] + '.meta')
    stored_var_names = set([
        n.name for n in meta_graph_def.graph_def.node if n.op == 'VariableV2'
    ])

    var_list = [
        v for v in tf.global_variables() if v.op.name in stored_var_names
    ]
    var_list.sort(key=lambda v: v.op.name)

    print(stored_var_names)
    print(len(stored_var_names), len(var_list))

    sessions = [tf.Session() for _ in model_paths]
    saver = tf.train.Saver()
    for sess, model_path in zip(sessions, model_paths):
        saver.restore(sess, model_path)

    # Load all VariableV2s for each model.
    values = [sess.run(var_list) for sess in sessions]

    # Iterate over all variables average values from all models.
    all_assign = []
    for var, vals in zip(var_list, zip(*values)):
        print("{}x {}".format(len(vals), var))
        if var.name == "global_step:0":
            avg = vals[0]
            for val in vals:
                avg = tf.maximum(avg, val)
        else:
            avg = tf.add_n(vals) / len(vals)
            continue

        all_assign.append(tf.assign(var, avg))

    # Run all asign ops on an existing model (which has other ops and graph).
    sess = sessions[0]
    sess.run(all_assign)

    # Export a new saved model.
    ensure_dir_exists(FLAGS.data_dir)
    dest_path = os.path.join(FLAGS.data_dir, "swa-" + str(FLAGS.count))
    saver.save(sess, dest_path)
Esempio n. 24
0
def main (_):
    assert FLAGS.db and os.path.exists(FLAGS.db)
    assert FLAGS.model and os.path.exists(FLAGS.model + '.meta')

    GRAY = tf.placeholder(tf.float32, shape=(None, None, None, 1))

    mg = meta_graph.read_meta_graph_file(FLAGS.model + '.meta')
    COLOR, = tf.import_graph_def(mg.graph_def, name='colorize',
                        #input_map={'L:0':L},
                        input_map={'gray:0':GRAY},
                        return_elements=['color:0'])
    #prob = tf.nn.softmax(logits)
    saver = tf.train.Saver(saver_def=mg.saver_def, name='colorize')

    picpac_config = dict(seed=2016,
                cache=False,
                max_size=200,
                min_size=192,
                crop_width=192,
                crop_height=192,
                shuffle=True,
                #reshuffle=True,
                batch=1,
                round_div=FLAGS.stride,
                channels=3,
                stratify=False,
                channel_first=False # this is tensorflow specific
                                    # Caffe's dimension order is different.
                )

    stream = picpac.ImageStream(FLAGS.db, perturb=False, loop=False, **picpac_config)

    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        saver.restore(sess, FLAGS.model)
        gallery = Gallery(FLAGS.output, cols=2, header=['groundtruth', 'prediction'])
        c = 0
        for images, _, _ in stream:
            if FLAGS.max and (c >= FLAGS.max):
                break
            gray, _, _ = _pic2pic.encode_bgr(images.copy(), FLAGS.downsize)
            #l, ab, w = _pic2pic.encode_lab(images.copy(), FLAGS.downsize)
            #
            color, = sess.run([COLOR], feed_dict={GRAY: gray})

            cv2.imwrite(gallery.next(), gray[0])

            full = np.zeros(images.shape, dtype=np.float32)
            color /= 255.0
            gray /= 255.0
            _, H, W, _ = images.shape
            for i in range(images.shape[0]):
                lab = cv2.cvtColor(cv2.cvtColor(gray[i], cv2.COLOR_GRAY2BGR), cv2.COLOR_BGR2LAB)
                print(lab.shape)
                full[i, :, :, :1] = lab[:, :, :1]
                one = cv2.resize(color[i], (W, H))

                lab = cv2.cvtColor(one, cv2.COLOR_BGR2LAB)
                full[i, :, :, 1:] = lab[:, :, 1:]
                cv2.cvtColor(full[i], cv2.COLOR_LAB2BGR, full[i])
                if FLAGS.s_add and FLAGS.s_mul:
                    hsv = cv2.cvtColor(full[i], cv2.COLOR_BGR2HSV)
                    h, s, v = cv2.split(hsv)
                    s *= FLAGS.s_mul
                    s += FLAGS.s_add
                    hsv = cv2.merge([h, s, v])
                    cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR, full[i])
                pass
            full *= 255
            cv2.imwrite(gallery.next(), full[0])
            #y_p = decode_lab(l, ab_p, T=FLAGS.T)
            c += 1
            print('%d/%d' % (c, FLAGS.max))
            pass
        gallery.flush()
        pass
    pass
Esempio n. 25
0
import tensorflow as tf
# import dual_net
path = '/srv/2-lkeb-17-dl01/syousefi/TestCode/EsophagusProject/sythesize_code/debug_Log/synth-5-shark/train/'
path = '/srv/2-lkeb-17-dl01/syousefi/TestCode/EsophagusProject/sythesize_code/debug_Log/synth-5-shark/unet_checkpoints/'
save_file = path + 'unet_inter_epoch0_point100.ckpt-100'
dest_file = path + 'saved'
# features, labels = dual_net.get_inference_input()
# dual_net.model_fn(features, labels, tf.estimator.ModeKeys.PREDICT, dual_net.get_default_hyperparams())
sess = tf.Session()

# retrieve the global step as a python value
ckpt = tf.train.load_checkpoint(save_file)
global_step_value = ckpt.get_tensor('avegare_perfusion')

# restore all saved weights, except global_step
from tensorflow.python.framework import meta_graph
meta_graph_def = meta_graph.read_meta_graph_file(save_file + '.meta')
stored_var_names = set(
    [n.name for n in meta_graph_def.graph_def.node if n.op == 'VariableV2'])
print(stored_var_names)
stored_var_names.remove('global_step')
var_list = [v for v in tf.global_variables() if v.op.name in stored_var_names]
tf.train.Saver(var_list=var_list).restore(sess, save_file)

# manually set the global step
global_step_tensor = tf.train.get_or_create_global_step()
assign_op = tf.assign(global_step_tensor, global_step_value)
sess.run(assign_op)

# export a new savedmodel that has the right global step type
tf.train.Saver().save(sess, dest_file)
Esempio n. 26
0
def freeze_graph_with_def_protos(input_graph_def,
                                 input_saver_def,
                                 input_checkpoint,
                                 output_node_names,
                                 restore_op_name,
                                 filename_tensor_name,
                                 output_graph,
                                 clear_devices,
                                 initializer_nodes,
                                 variable_names_blacklist="",
                                 input_meta_graph_def=None,
                                 input_saved_model_dir=None,
                                 saved_model_tags=None):
    """Converts all variables in a graph and checkpoint into constants."""
    del restore_op_name, filename_tensor_name  # Unused by updated loading code.

    # 'input_checkpoint' may be a prefix if we're using Saver V2 format
    if (not input_saved_model_dir
            and not saver_lib.checkpoint_exists(input_checkpoint)):
        print("Input checkpoint '" + input_checkpoint + "' doesn't exist!")
        return -1

    if not output_node_names:
        print("You need to supply the name of a node to --output_node_names.")
        return -1

    # Remove all the explicit device specifications for this node. This helps to
    # make the graph more portable.


#   if clear_devices:
#     if input_meta_graph_def:
#       for node in input_meta_graph_def.graph_def.node:
#         node.device = ""
#     elif input_graph_def:
#       for node in input_graph_def.node:
#         node.device = ""
# use meta data
    input_graph_def = meta_graph.read_meta_graph_file(input_graph).graph_def
    if input_graph_def:
        _ = importer.import_graph_def(input_graph_def, name="")
    with session.Session() as sess:
        if input_saver_def:
            saver = saver_lib.Saver(saver_def=input_saver_def)
            saver.restore(sess, input_checkpoint)
        elif input_meta_graph_def:
            restorer = saver_lib.import_meta_graph(input_meta_graph_def,
                                                   clear_devices=True)
            restorer.restore(sess, input_checkpoint)
            if initializer_nodes:
                sess.run(initializer_nodes.split(","))
        elif input_saved_model_dir:
            if saved_model_tags is None:
                saved_model_tags = []
            loader.load(sess, saved_model_tags, input_saved_model_dir)
        else:
            var_list = {}
            reader = pywrap_tensorflow.NewCheckpointReader(input_checkpoint)
            var_to_shape_map = reader.get_variable_to_shape_map()
            for key in var_to_shape_map:
                try:
                    tensor = sess.graph.get_tensor_by_name(key + ":0")
                except KeyError:
                    # This tensor doesn't exist in the graph (for example it's
                    # 'global_step' or a similar housekeeping element) so skip it.
                    continue
                var_list[key] = tensor
            saver = saver_lib.Saver(var_list=var_list)
            saver.restore(sess, input_checkpoint)
            if initializer_nodes:
                sess.run(initializer_nodes.split(","))

        variable_names_blacklist = (variable_names_blacklist.split(",")
                                    if variable_names_blacklist else None)

        if input_meta_graph_def:
            output_graph_def = graph_util.convert_variables_to_constants(
                sess,
                input_meta_graph_def.graph_def,
                output_node_names.split(","),
                variable_names_blacklist=variable_names_blacklist)
        else:
            output_graph_def = graph_util.convert_variables_to_constants(
                sess,
                input_graph_def,
                output_node_names.split(","),
                variable_names_blacklist=variable_names_blacklist)

    # Write GraphDef to file if output path has been given.
    if output_graph:
        with gfile.GFile(output_graph, "wb") as f:
            f.write(output_graph_def.SerializeToString())

    return output_graph_def
#!/usr/bin/env python
import tensorflow as tf
from tensorflow.python.framework import meta_graph
import numpy as np
import pkgutil
import cv2
import sys

fold = sys.argv[1]

size = 224
X = tf.placeholder(tf.float32, shape=(None, None, None, 2), name="images")
mg = meta_graph.read_meta_graph_file('model/' + fold + '.meta')
logits = tf.import_graph_def(mg.graph_def,
                             name='xxx',
                             input_map={'images:0': X},
                             return_elements=['logits:0'])
prob = tf.nn.softmax(logits)
saver = tf.train.Saver(saver_def=mg.saver_def, name='xxx')

with tf.Session() as sess:
    saver.restore(sess, 'model/' + fold)
    tmp_input = np.zeros((1, 224, 224, 2))
    TEST = open('list_test', 'r')
    PRED = open('pred.txt', 'w')
    for line in TEST:

        table = line.split('\t')

        img = np.load(table[0])
        img = cv2.resize(img, (224, 224))
        h.update(**blobs)
elif args.output_path.endswith('.npy') or args.output_path.endswith('.npz'):
    (np.savez if args.output_path[-1] == 'z' else numpy.save)(args.output_path,
                                                              **blobs)
elif args.output_path.endswith('.pt'):
    import torch
    torch.save(
        {
            k: (torch.as_tensor(blob) if not np.isscalar(blob) else
                torch.tensor(blob)) if isinstance(blob, np.ndarray) else blob
            for k, blob in blobs.items()
        }, args.output_path)

if args.onnx or args.tensorboard or args.graph:
    meta_graph_file = glob.glob(os.path.join(checkpoint_dir, '*.meta'))[0]
    graph_def = meta_graph.read_meta_graph_file(meta_graph_file).graph_def

    if args.graph or (not args.input_name) or (args.onnx
                                               and not args.output_name):
        print('\n'.join(
            sorted(f'{v.name} <- {v.op}(' + ', '.join(v.input)
                   for v in graph_def.node)) + ')',
              file=None if not args.graph else open(args.graph, 'w'))
        sys.exit(0)

    for v in graph_def.node:
        if any(name in v.name for name in args.identityop):
            v.op = 'Identity'
            for a in set(v.attr.keys()) - set(['T']):
                del v.attr[a]