Exemplo n.º 1
0
def _get_signature_def(signature_def_key, export_dir, tags):
    """Construct a `SignatureDef` proto."""
    signature_def_key = (signature_def_key or
                         signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)

    metagraph_def = saved_model_cli.get_meta_graph_def(export_dir, tags)

    try:
        signature_def = signature_def_utils.get_signature_def_by_key(
            metagraph_def, signature_def_key)
    except ValueError as e:
        try:
            formatted_key = _DEFAULT_INPUT_ALTERNATIVE_FORMAT.format(
                signature_def_key)
            signature_def = signature_def_utils.get_signature_def_by_key(
                metagraph_def, formatted_key)

            logging.warning(
                'Could not find signature def "%s". '
                'Using "%s" instead', signature_def_key, formatted_key)
        except ValueError:
            raise ValueError(
                'Got signature_def_key "{}". Available signatures are {}. '
                'Original error:\n{}'.format(signature_def_key,
                                             list(metagraph_def.signature_def),
                                             e))
    return signature_def
Exemplo n.º 2
0
def load_meta_graph(model_path, tags, graph, signature_def_key=None):
    saved_model = reader.read_saved_model(model_path)
    the_meta_graph = None
    for meta_graph_def in saved_model.meta_graphs:
        if sorted(meta_graph_def.meta_info_def.tags) == sorted(tags):
            the_meta_graph = meta_graph_def
    signature_def_key = signature_def_key or tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
    try:
        signature_def = signature_def_utils.get_signature_def_by_key(
            the_meta_graph,
            signature_def_key)
    except ValueError as ex:
        try:
            formatted_key = 'default_input_alternative:{}'.format(
                signature_def_key)
            signature_def = signature_def_utils.get_signature_def_by_key(
                the_meta_graph, formatted_key)
        except ValueError:
            raise ValueError(
                'Got signature_def_key "{}". Available signatures are {}. '
                'Original error:\n{}'.format(
                    signature_def_key, list(the_meta_graph.signature_def), ex)
            )
    input_names = {k: v.name for k, v in signature_def.inputs.items()}
    output_names = {k: v.name for k, v in signature_def.outputs.items()}
    feed_tensors = {k: graph.get_tensor_by_name(v)
                    for k, v in input_names.items()}
    fetch_tensors = {k: graph.get_tensor_by_name(v)
                     for k, v in output_names.items()}
    return feed_tensors, fetch_tensors
Exemplo n.º 3
0
def _get_signature_def(signature_def_key, export_dir, tags):
  """Construct a `SignatureDef` proto."""
  signature_def_key = (
      signature_def_key or
      signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)

  metagraph_def = saved_model_cli.get_meta_graph_def(export_dir, tags)

  try:
    signature_def = signature_def_utils.get_signature_def_by_key(
        metagraph_def,
        signature_def_key)
  except ValueError as e:
    try:
      formatted_key = _DEFAULT_INPUT_ALTERNATIVE_FORMAT.format(
          signature_def_key)
      signature_def = signature_def_utils.get_signature_def_by_key(
          metagraph_def, formatted_key)

      logging.warning('Could not find signature def "%s". '
                      'Using "%s" instead', signature_def_key, formatted_key)
    except ValueError:
      raise ValueError(
          'Got signature_def_key "{}". Available signatures are {}. '
          'Original error:\n{}'.format(
              signature_def_key, list(metagraph_def.signature_def), e))
  return signature_def
Exemplo n.º 4
0
    def testGetSignatureDefByKey(self):
        x = array_ops.placeholder(dtypes.float32, 1, name="x")
        x_tensor_info = utils.build_tensor_info(x)

        y = array_ops.placeholder(dtypes.float32, name="y")
        y_tensor_info = utils.build_tensor_info(y)

        foo_signature_def = signature_def_utils.build_signature_def(
            {"foo-input": x_tensor_info}, {"foo-output": y_tensor_info},
            "foo-method-name")
        bar_signature_def = signature_def_utils.build_signature_def(
            {"bar-input": x_tensor_info}, {"bar-output": y_tensor_info},
            "bar-method-name")
        meta_graph_def = meta_graph_pb2.MetaGraphDef()
        self._add_to_signature_def_map(meta_graph_def, {
            "foo": foo_signature_def,
            "bar": bar_signature_def
        })

        # Look up a key that does not exist in the SignatureDefMap.
        missing_key = "missing-key"
        with self.assertRaisesRegexp(
                ValueError,
                "No SignatureDef with key '%s' found in MetaGraphDef" %
                missing_key):
            signature_def_contrib_utils.get_signature_def_by_key(
                meta_graph_def, missing_key)

        # Look up the key, `foo` which exists in the SignatureDefMap.
        foo_signature_def = signature_def_contrib_utils.get_signature_def_by_key(
            meta_graph_def, "foo")
        self.assertTrue("foo-method-name", foo_signature_def.method_name)

        # Check inputs in signature def.
        self.assertEqual(1, len(foo_signature_def.inputs))
        self._check_tensor_info(foo_signature_def.inputs, "foo-input", "x:0")

        # Check outputs in signature def.
        self.assertEqual(1, len(foo_signature_def.outputs))
        self._check_tensor_info(foo_signature_def.outputs, "foo-output", "y:0")

        # Look up the key, `bar` which exists in the SignatureDefMap.
        bar_signature_def = signature_def_contrib_utils.get_signature_def_by_key(
            meta_graph_def, "bar")
        self.assertTrue("bar-method-name", bar_signature_def.method_name)

        # Check inputs in signature def.
        self.assertEqual(1, len(bar_signature_def.inputs))
        self._check_tensor_info(bar_signature_def.inputs, "bar-input", "x:0")

        # Check outputs in signature def.
        self.assertEqual(1, len(bar_signature_def.outputs))
        self._check_tensor_info(bar_signature_def.outputs, "bar-output", "y:0")
  def testGetSignatureDefByKey(self):
    x = array_ops.placeholder(dtypes.float32, 1, name="x")
    x_tensor_info = utils.build_tensor_info(x)

    y = array_ops.placeholder(dtypes.float32, name="y")
    y_tensor_info = utils.build_tensor_info(y)

    foo_signature_def = signature_def_utils.build_signature_def({
        "foo-input": x_tensor_info
    }, {"foo-output": y_tensor_info}, "foo-method-name")
    bar_signature_def = signature_def_utils.build_signature_def({
        "bar-input": x_tensor_info
    }, {"bar-output": y_tensor_info}, "bar-method-name")
    meta_graph_def = meta_graph_pb2.MetaGraphDef()
    self._add_to_signature_def_map(
        meta_graph_def, {"foo": foo_signature_def,
                         "bar": bar_signature_def})

    # Look up a key that does not exist in the SignatureDefMap.
    missing_key = "missing-key"
    with self.assertRaisesRegexp(
        ValueError,
        "No SignatureDef with key '%s' found in MetaGraphDef" % missing_key):
      signature_def_contrib_utils.get_signature_def_by_key(
          meta_graph_def, missing_key)

    # Look up the key, `foo` which exists in the SignatureDefMap.
    foo_signature_def = signature_def_contrib_utils.get_signature_def_by_key(
        meta_graph_def, "foo")
    self.assertTrue("foo-method-name", foo_signature_def.method_name)

    # Check inputs in signature def.
    self.assertEqual(1, len(foo_signature_def.inputs))
    self._check_tensor_info(foo_signature_def.inputs, "foo-input", "x:0")

    # Check outputs in signature def.
    self.assertEqual(1, len(foo_signature_def.outputs))
    self._check_tensor_info(foo_signature_def.outputs, "foo-output", "y:0")

    # Look up the key, `bar` which exists in the SignatureDefMap.
    bar_signature_def = signature_def_contrib_utils.get_signature_def_by_key(
        meta_graph_def, "bar")
    self.assertTrue("bar-method-name", bar_signature_def.method_name)

    # Check inputs in signature def.
    self.assertEqual(1, len(bar_signature_def.inputs))
    self._check_tensor_info(bar_signature_def.inputs, "bar-input", "x:0")

    # Check outputs in signature def.
    self.assertEqual(1, len(bar_signature_def.outputs))
    self._check_tensor_info(bar_signature_def.outputs, "bar-output", "y:0")
Exemplo n.º 6
0
def test(test_input_filename, test_output_filename):
    model_path = '.'
    with open(test_input_filename, 'rb') as fh:
        actual_input = fh.read()
    with open(test_output_filename, 'rb') as fh:
        expected_output = fh.read()

    # Load io_transformers module
    transformers_module_name = 'model_io_transformers'
    spec = importlib.util.spec_from_file_location(transformers_module_name, '%s.py' % transformers_module_name)
    transformers_module = importlib.util.module_from_spec(spec)
    spec.loader.exec_module(transformers_module)
    actual_transformed_input = transformers_module.transform_inputs(actual_input)
    print(actual_transformed_input)

#    model.setup()

    with tf.Session(graph=tf.Graph()) as sess:
        meta_graph_def = tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], model_path)   
        print('meta_graph_def %s' % meta_graph_def)
        print('graph %s' % sess.graph)

        #x_observed = tf.placeholder(dtype=tf.float32,
        #                                  shape=[None, 1],
        #                                  name='x_observed')
        internal_input_tensor_name = signature_def_contrib_utils.get_signature_def_by_key(meta_graph_def, 'predict').inputs['x_observed'].name
        #x_observed = sess.graph.get_tensor_by_name('x_observed:0')
        x_observed_internal = sess.graph.get_tensor_by_name(internal_input_tensor_name)

        internal_output_tensor_name = signature_def_contrib_utils.get_signature_def_by_key(meta_graph_def, 'predict').outputs['y_pred'].name
        #y_pred = sess.graph.get_tensor_by_name('add:0')
        y_pred_internal = sess.graph.get_tensor_by_name(internal_output_tensor_name)     

        #actual_output = sess.run(y_pred, feed_dict={'x_observed:0':actual_transformed_input.inputs['x_observed'].float_val})
        actual_output = sess.run(y_pred_internal, feed_dict={internal_input_tensor_name:actual_transformed_input.inputs['x_observed'].float_val})
        print('actual output: %s' % actual_output)
        print('actual output type: %s' % type(actual_output))
        #actual_output = model.predict(actual_transformed_input)
        #hacked_actual_output = type('hacked_actual_output_obj', (object,), {'outputs' : actual_output}) 
        #hacked_actual_output.outputs = {'y_pred': actual_output}
        actual_transformed_output = transformers_module.transform_outputs(actual_output)
             #json.dumps(actual_output.tolist())
        #transformers_module.transform_outputs(actual_output)
        # TODO:  add {"y_pred":...}
        print('actual transformed output: %s' % actual_transformed_output)
        print('expected: %s' % expected_output.decode('utf-8').strip())

    return (json.loads(expected_output.decode('utf-8').strip()) == json.loads(actual_transformed_output.strip()))
Exemplo n.º 7
0
def _from_saved_model_v1(sess, model_path, input_names, output_names, tag, signatures):
    """Load tensorflow graph from saved_model."""

    if tag is None:
        tag = [tf.saved_model.tag_constants.SERVING]

    if not isinstance(tag, list):
        tag = [tag]

    imported = tf.saved_model.loader.load(sess, tag, model_path)
    for k in imported.signature_def.keys():
        if k.startswith("_"):
            # consider signatures starting with '_' private
            continue
        signatures.append(k)
    try:
        from tensorflow.contrib.saved_model.python.saved_model import signature_def_utils
        # pylint: disable=unnecessary-lambda
        get_signature_def = lambda meta_graph_def, k: \
            signature_def_utils.get_signature_def_by_key(meta_graph_def, k)
    except ImportError:
        # TF1.12 changed the api
        get_signature_def = lambda meta_graph_def, k: meta_graph_def.signature_def[k]

    input_names = []
    output_names = []
    for k in signatures:
        inputs_tensor_info = get_signature_def(imported, k).inputs
        for _, input_tensor in inputs_tensor_info.items():
            input_names.append(input_tensor.name)
        outputs_tensor_info = get_signature_def(imported, k).outputs
        for _, output_tensor in outputs_tensor_info.items():
            output_names.append(output_tensor.name)
    frozen_graph = freeze_session(sess, input_names=input_names, output_names=output_names)
    return frozen_graph, input_names, output_names
  def testGetSignatureDefByKeyRegression(self):
    input1 = constant_op.constant("a", name="input-1")
    output1 = constant_op.constant(7.2, name="output-1")

    meta_graph_def = meta_graph_pb2.MetaGraphDef()
    self._add_to_signature_def_map(meta_graph_def, {
        "my_regression":
            signature_def_utils.regression_signature_def(input1, output1)
    })

    # Look up the regression signature with the key used while saving.
    signature_def = signature_def_contrib_utils.get_signature_def_by_key(
        meta_graph_def, "my_regression")

    # Check the method name to match the constants regression method name.
    self.assertEqual(signature_constants.REGRESS_METHOD_NAME,
                     signature_def.method_name)

    # Check inputs in signature def.
    self.assertEqual(1, len(signature_def.inputs))
    self._check_tensor_info(signature_def.inputs,
                            signature_constants.REGRESS_INPUTS, "input-1:0")

    # Check outputs in signature def.
    self.assertEqual(1, len(signature_def.outputs))
    self._check_tensor_info(signature_def.outputs,
                            signature_constants.REGRESS_OUTPUTS, "output-1:0")
Exemplo n.º 9
0
def _get_signature_def(meta_graph, signature_key):
  """Get the signature def from meta_graph with given signature_key.

  Args:
    meta_graph: meta_graph_def.
    signature_key: signature_def in the meta_graph_def.

  Returns:
    The signature_def used for tflite conversion.

  Raises:
    ValueError: Given signature_key is not valid for this meta_graph.
  """
  signature_def_map = meta_graph.signature_def
  signature_def_keys = set(signature_def_map.keys())
  logging.info(
      "The given SavedModel MetaGraphDef contains SignatureDefs with the "
      "following keys: %s", signature_def_keys)
  if signature_key not in signature_def_keys:
    raise ValueError("No '{}' in the SavedModel\'s SignatureDefs. Possible "
                     "values are '{}'.".format(signature_key,
                                               ",".join(signature_def_keys)))
  signature_def = signature_def_utils.get_signature_def_by_key(
      meta_graph, signature_key)
  return signature_def
Exemplo n.º 10
0
def RunModel(saved_model_dir, signature_def_key, tag, text, ngrams_list=None):
    saved_model = reader.read_saved_model(saved_model_dir)
    meta_graph = None
    for meta_graph_def in saved_model.meta_graphs:
        if tag in meta_graph_def.meta_info_def.tags:
            meta_graph = meta_graph_def
            break
    if meta_graph_def is None:
        raise ValueError("Cannot find saved_model with tag" + tag)
    signature_def = signature_def_utils.get_signature_def_by_key(
        meta_graph, signature_def_key)
    text = text_utils.TokenizeText(text)
    ngrams = None
    if ngrams_list is not None:
        ngrams_list = text_utils.ParseNgramsOpts(ngrams_list)
        ngrams = text_utils.GenerateNgrams(text, ngrams_list)
    example = inputs.BuildTextExample(text, ngrams=ngrams)
    example = example.SerializeToString()
    inputs_feed_dict = {
        signature_def.inputs["inputs"].name: [example],
    }
    if signature_def_key == "proba":
        output_key = "scores"
    elif signature_def_key == "embedding":
        output_key = "outputs"
    else:
        raise ValueError("Unrecognised signature_def %s" % (signature_def_key))
    output_tensor = signature_def.outputs[output_key].name
    with tf.Session() as sess:
        loader.load(sess, [tag], saved_model_dir)
        outputs = sess.run(output_tensor, feed_dict=inputs_feed_dict)
        return outputs
Exemplo n.º 11
0
    def __init__(self, path=DEFAULT_MODEL_PATH, model_dir=DEFAULT_MODEL_DIR):
        logger.info('Loading model from: {}...'.format(path))
        sess = tf.Session(graph=tf.Graph())
        # load the graph
        saved_model_path = '{}/{}'.format(path, model_dir)
        model_graph_def = sm.loader.load(sess, [sm.tag_constants.SERVING],
                                         saved_model_path)
        sig_def = signature_def_utils.get_signature_def_by_key(
            model_graph_def,
            sm.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)
        input_name = sig_def.inputs['inputs'].name
        output_name = sig_def.outputs['scores'].name

        # Load labels from file
        label_file = codecs.open('./{}/labels.txt'.format(path),
                                 "r",
                                 encoding="utf-8")
        labels = [label.strip('\n') for label in label_file.readlines()]

        self.labels = labels

        # set up instance variables and required inputs for inference
        self.sess = sess
        self.model_graph_def = model_graph_def
        self.output_tensor = sess.graph.get_tensor_by_name(output_name)
        self.input_name = input_name
        self.output_name = output_name

        self.means = np.load('./{}/crop_mean.npy'.format(path)).reshape(
            [NUM_FRAMES_PER_CLIP, CROP_SIZE, CROP_SIZE, CHANNELS])
        logger.info('Loaded model')
Exemplo n.º 12
0
def _get_signature_def(meta_graph, signature_key):
    """Get the signature def from meta_graph with given signature_key.

  Args:
    meta_graph: meta_graph_def.
    signature_key: signature_def in the meta_graph_def.

  Returns:
    The signature_def used for tflite conversion.

  Raises:
    ValueError: Given signature_key is not valid for this meta_graph.
  """
    signature_def_map = meta_graph.signature_def
    signature_def_keys = set(signature_def_map.keys())
    logging.info(
        "The given saved_model MetaGraphDef contains SignatureDefs with the "
        "following keys: %s", signature_def_keys)
    if signature_key not in signature_def_keys:
        raise ValueError(
            "No '{}' in the saved_model\'s SignatureDefs. Possible "
            "values are '{}'. ".format(signature_key, signature_def_keys))
    signature_def = signature_def_utils.get_signature_def_by_key(
        meta_graph, signature_key)
    return signature_def
Exemplo n.º 13
0
    def testGetSignatureDefByKeyRegression(self):
        input1 = constant_op.constant("a", name="input-1")
        output1 = constant_op.constant("b", name="output-1")

        meta_graph_def = meta_graph_pb2.MetaGraphDef()
        self._add_to_signature_def_map(
            meta_graph_def, {
                "my_regression":
                signature_def_utils.regression_signature_def(input1, output1)
            })

        # Look up the regression signature with the key used while saving.
        signature_def = signature_def_contrib_utils.get_signature_def_by_key(
            meta_graph_def, "my_regression")

        # Check the method name to match the constants regression method name.
        self.assertEqual(signature_constants.REGRESS_METHOD_NAME,
                         signature_def.method_name)

        # Check inputs in signature def.
        self.assertEqual(1, len(signature_def.inputs))
        self._check_tensor_info(signature_def.inputs,
                                signature_constants.REGRESS_INPUTS,
                                "input-1:0")

        # Check outputs in signature def.
        self.assertEqual(1, len(signature_def.outputs))
        self._check_tensor_info(signature_def.outputs,
                                signature_constants.REGRESS_OUTPUTS,
                                "output-1:0")
Exemplo n.º 14
0
def from_saved_model(model_path, input_names, output_names):
    """Load tensorflow graph from saved_model."""
    # make sure we start with clean default graph
    tf.reset_default_graph()
    inputs = {}
    outputs = {}
    try:
        from tensorflow.contrib.saved_model.python.saved_model import signature_def_utils
        # pylint: disable=unnecessary-lambda
        get_signature_def = lambda meta_graph_def, k: \
            signature_def_utils.get_signature_def_by_key(meta_graph_def, k)
    except ImportError:
        # TF1.12 changed the api
        get_signature_def = lambda meta_graph_def, k: meta_graph_def.signature_def[
            k]

    with tf.Session() as sess:
        meta_graph_def = tf.saved_model.loader.load(
            sess, [tf.saved_model.tag_constants.SERVING], model_path)
        for k in meta_graph_def.signature_def.keys():
            inputs_tensor_info = get_signature_def(meta_graph_def, k).inputs
            for _, input_tensor in sorted(inputs_tensor_info.items()):
                inputs[input_tensor.name] = sess.graph.get_tensor_by_name(
                    input_tensor.name)
            outputs_tensor_info = get_signature_def(meta_graph_def, k).outputs
            for _, output_tensor in sorted(outputs_tensor_info.items()):
                outputs[output_tensor.name] = sess.graph.get_tensor_by_name(
                    output_tensor.name)
        frozen_graph = freeze_session(sess, output_names=list(outputs.keys()))
    if input_names is None:
        input_names = inputs.keys()
    input_names = remove_redundant_inputs(frozen_graph, input_names)
    # clean up
    tf.reset_default_graph()
    return frozen_graph, input_names, outputs.keys()
Exemplo n.º 15
0
    def testGetSignatureDefByKeyClassification(self):
        input1 = constant_op.constant("a", name="input-1")
        output1 = constant_op.constant("b", name="output-1")
        output2 = constant_op.constant("c", name="output-2")

        meta_graph_def = meta_graph_pb2.MetaGraphDef()
        self._add_to_signature_def_map(
            meta_graph_def, {
                "my_classification":
                signature_def_utils.classification_signature_def(
                    input1, output1, output2)
            })

        # Look up the classification signature def with the key used while saving.
        signature_def = signature_def_contrib_utils.get_signature_def_by_key(
            meta_graph_def, "my_classification")

        # Check the method name to match the constants classification method name.
        self.assertEqual(signature_constants.CLASSIFY_METHOD_NAME,
                         signature_def.method_name)

        # Check inputs in signature def.
        self.assertEqual(1, len(signature_def.inputs))
        self._check_tensor_info(signature_def.inputs,
                                signature_constants.CLASSIFY_INPUTS,
                                "input-1:0")

        # Check outputs in signature def.
        self.assertEqual(2, len(signature_def.outputs))
        self._check_tensor_info(signature_def.outputs,
                                signature_constants.CLASSIFY_OUTPUT_CLASSES,
                                "output-1:0")
        self._check_tensor_info(signature_def.outputs,
                                signature_constants.CLASSIFY_OUTPUT_SCORES,
                                "output-2:0")
Exemplo n.º 16
0
def _from_saved_model_v1(sess, model_path, input_names, output_names, signatures):
    """Load tensorflow graph from saved_model."""
    # make sure we start with clean default graph
    inputs = {}
    outputs = {}

    imported = tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], model_path)
    for k in imported.signature_def.keys():
        if k.startswith("_"):
            # consider signatures starting with '_' private
            continue
        signatures.append(k)
    try:
        from tensorflow.contrib.saved_model.python.saved_model import signature_def_utils
        # pylint: disable=unnecessary-lambda
        get_signature_def = lambda meta_graph_def, k: \
            signature_def_utils.get_signature_def_by_key(meta_graph_def, k)
    except ImportError:
        # TF1.12 changed the api
        get_signature_def = lambda meta_graph_def, k: meta_graph_def.signature_def[k]

    for k in signatures:
        inputs_tensor_info = get_signature_def(imported, k).inputs
        for _, input_tensor in inputs_tensor_info.items():
            inputs[input_tensor.name] = sess.graph.get_tensor_by_name(input_tensor.name)
        outputs_tensor_info = get_signature_def(imported, k).outputs
        for _, output_tensor in outputs_tensor_info.items():
            outputs[output_tensor.name] = sess.graph.get_tensor_by_name(output_tensor.name)

    frozen_graph = freeze_session(sess, input_names=list(inputs.keys()), output_names=list(outputs.keys()))
    return frozen_graph, inputs, outputs
  def testGetSignatureDefByKeyClassification(self):
    input1 = constant_op.constant("a", name="input-1")
    output1 = constant_op.constant("b", name="output-1")
    output2 = constant_op.constant(3.0, name="output-2")

    meta_graph_def = meta_graph_pb2.MetaGraphDef()
    self._add_to_signature_def_map(meta_graph_def, {
        "my_classification":
            signature_def_utils.classification_signature_def(
                input1, output1, output2)
    })

    # Look up the classification signature def with the key used while saving.
    signature_def = signature_def_contrib_utils.get_signature_def_by_key(
        meta_graph_def, "my_classification")

    # Check the method name to match the constants classification method name.
    self.assertEqual(signature_constants.CLASSIFY_METHOD_NAME,
                     signature_def.method_name)

    # Check inputs in signature def.
    self.assertEqual(1, len(signature_def.inputs))
    self._check_tensor_info(signature_def.inputs,
                            signature_constants.CLASSIFY_INPUTS, "input-1:0")

    # Check outputs in signature def.
    self.assertEqual(2, len(signature_def.outputs))
    self._check_tensor_info(signature_def.outputs,
                            signature_constants.CLASSIFY_OUTPUT_CLASSES,
                            "output-1:0")
    self._check_tensor_info(signature_def.outputs,
                            signature_constants.CLASSIFY_OUTPUT_SCORES,
                            "output-2:0")
  def testPredictionSignatureDef(self):
    input1 = constant_op.constant("a", name="input-1")
    input2 = constant_op.constant("b", name="input-2")
    output1 = constant_op.constant("c", name="output-1")
    output2 = constant_op.constant("d", name="output-2")

    meta_graph_def = meta_graph_pb2.MetaGraphDef()
    self._add_to_signature_def_map(meta_graph_def, {
        "my_prediction":
            signature_def_utils.predict_signature_def({
                "input-1": input1,
                "input-2": input2
            }, {"output-1": output1,
                "output-2": output2})
    })

    # Look up the prediction signature def with the key used while saving.
    signature_def = signature_def_contrib_utils.get_signature_def_by_key(
        meta_graph_def, "my_prediction")
    self.assertEqual(signature_constants.PREDICT_METHOD_NAME,
                     signature_def.method_name)

    # Check inputs in signature def.
    self.assertEqual(2, len(signature_def.inputs))
    self._check_tensor_info(signature_def.inputs, "input-1", "input-1:0")
    self._check_tensor_info(signature_def.inputs, "input-2", "input-2:0")

    # Check outputs in signature def.
    self.assertEqual(2, len(signature_def.outputs))
    self._check_tensor_info(signature_def.outputs, "output-1", "output-1:0")
    self._check_tensor_info(signature_def.outputs, "output-2", "output-2:0")
Exemplo n.º 19
0
def _from_saved_model_v1(sess, model_path, input_names, output_names, tag, signature_names, use_graph_names):
    """Load tensorflow graph from saved_model."""

    wrn_no_tag = "'--tag' not specified for saved_model. Using --tag serve"
    wrn_empty_tag = "'--tag' value is empty string. Using tags = []"
    wrn_empty_sig = "'--signature_def' not provided. Using all signatures."

    if tag is None:
        tag = [tf.saved_model.tag_constants.SERVING]
        logger.warning(wrn_no_tag)

    if not signature_names:
        logger.warning(wrn_empty_sig)

    if tag == '':
        tag = []
        logger.warning(wrn_empty_tag)

    if not isinstance(tag, list):
        tag = [tag]

    imported = tf.saved_model.loader.load(sess, tag, model_path)
    signatures = []
    for k in imported.signature_def.keys():
        if k in signature_names or (not signature_names and not k.startswith("_")):
            signatures.append(k)
    try:
        from tensorflow.contrib.saved_model.python.saved_model import signature_def_utils
        # pylint: disable=unnecessary-lambda
        get_signature_def = lambda meta_graph_def, k: \
            signature_def_utils.get_signature_def_by_key(meta_graph_def, k)
    except ImportError:
        # TF1.12 changed the api
        get_signature_def = lambda meta_graph_def, k: meta_graph_def.signature_def[k]

    tensors_to_rename = {}
    if input_names is None:
        input_names = []
        for k in signatures:
            inputs_tensor_info = get_signature_def(imported, k).inputs
            for structured_name, input_tensor in inputs_tensor_info.items():
                if input_tensor.name not in input_names:
                    input_names.append(input_tensor.name)
                    if not use_graph_names:
                        tensors_to_rename[input_tensor.name] = structured_name
    if output_names is None:
        output_names = []
        for k in signatures:
            outputs_tensor_info = get_signature_def(imported, k).outputs
            for structured_name, output_tensor in outputs_tensor_info.items():
                if output_tensor.name not in output_names:
                    output_names.append(output_tensor.name)
                    if not use_graph_names:
                        tensors_to_rename[output_tensor.name] = structured_name
    frozen_graph, initialized_tables = \
        freeze_session(sess, input_names=input_names, output_names=output_names, get_tables=True)
    return frozen_graph, input_names, output_names, initialized_tables, tensors_to_rename
Exemplo n.º 20
0
    def __init__(self):

        model_path = os.environ.get('MODEL_PATH', '/model')

        self.sess = tf.Session(graph=tf.Graph())
        saved_metagraphdef = tf.saved_model.loader.load(
            self.sess, [tag_constants.SERVING], model_path)

        self.inputs_tensor_info = signature_def_utils.get_signature_def_by_key(
            saved_metagraphdef,
            signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY).inputs
        outputs_tensor_info = signature_def_utils.get_signature_def_by_key(
            saved_metagraphdef,
            signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY).outputs

        self.output_tensor_keys_sorted = sorted(outputs_tensor_info.keys())
        self.output_tensor_names_sorted = [
            outputs_tensor_info[tensor_key].name
            for tensor_key in self.output_tensor_keys_sorted
        ]
Exemplo n.º 21
0
def _from_saved_model_v1(sess, model_path, tag, signatures):
    """
    Load tensorflow graph from saved_model.
    NOTICE: Modified version from tf2onnx project
    """

    wrn_no_tag = "'--tag' not specified for saved_model. Using --tag serve"
    wrn_empty_tag = "'--tag' value is empty string. Using tag =[[]]"

    if tag is None:
        tag = [tf.saved_model.SERVING]
        LOGGER.warning(wrn_no_tag)

    if tag == "":
        tag = [[]]
        LOGGER.warning(wrn_empty_tag)

    if not isinstance(tag, list):
        tag = [tag]

    imported = tf.compat.v1.saved_model.loader.load(sess, tag, model_path)
    for k in imported.signature_def.keys():
        if k.startswith("_"):
            # consider signatures starting with '_' private
            continue
        signatures.append(k)
    try:
        from tensorflow.contrib.saved_model.python.saved_model import (  # pytype: disable=import-error
            signature_def_utils, )

        # pylint: disable=unnecessary-lambda
        get_signature_def = lambda meta_graph_def, k: signature_def_utils.get_signature_def_by_key(
            meta_graph_def, k)
    except ImportError:
        # TF1.12 changed the api
        get_signature_def = lambda meta_graph_def, k: meta_graph_def.signature_def[
            k]

    inputs = {}
    outputs = {}
    for k in signatures:
        inputs_tensor_info = get_signature_def(imported, k).inputs
        for name, input_tensor in inputs_tensor_info.items():
            inputs[name] = input_tensor.name
        outputs_tensor_info = get_signature_def(imported, k).outputs
        for name, output_tensor in outputs_tensor_info.items():
            outputs[name] = output_tensor.name
    frozen_graph = freeze_session(sess,
                                  input_names=list(inputs.values()),
                                  output_names=list(outputs.values()))
    return frozen_graph, inputs, outputs
Exemplo n.º 22
0
def from_saved_model(model_path, input_names, output_names, signatures=None):
    """Load tensorflow graph from saved_model."""
    # make sure we start with clean default graph
    tf.reset_default_graph()
    inputs = {}
    outputs = {}
    try:
        # pylint: disable=C0415
        from tensorflow.contrib.saved_model.python.saved_model import signature_def_utils
        # pylint: disable=unnecessary-lambda
        get_signature_def = lambda meta_graph_def, k: \
            signature_def_utils.get_signature_def_by_key(meta_graph_def, k)
    except ImportError:
        # TF1.12 changed the api
        get_signature_def = lambda meta_graph_def, k: meta_graph_def.signature_def[
            k]

    with tf.Session() as sess:
        meta_graph_def = tf.saved_model.loader.load(
            sess, [tf.saved_model.tag_constants.SERVING], model_path)

        if signatures is None:
            signatures = []
            for k in meta_graph_def.signature_def.keys():
                if k.startswith("_"):
                    # consider signatures starting with '_' private
                    continue
                signatures.append(k)
            if len(signatures) > 1:
                logger.warning(
                    "found multiple signatures %s in saved_model, pass --signature_def in command line",
                    signatures)
        for k in signatures:
            inputs_tensor_info = get_signature_def(meta_graph_def, k).inputs
            for _, input_tensor in sorted(inputs_tensor_info.items()):
                inputs[input_tensor.name] = sess.graph.get_tensor_by_name(
                    input_tensor.name)
            outputs_tensor_info = get_signature_def(meta_graph_def, k).outputs
            for _, output_tensor in sorted(outputs_tensor_info.items()):
                outputs[output_tensor.name] = sess.graph.get_tensor_by_name(
                    output_tensor.name)
        frozen_graph = freeze_session(sess, output_names=list(outputs.keys()))
    if input_names is None:
        input_names = inputs.keys()
    input_names = remove_redundant_inputs(frozen_graph, input_names)
    # clean up
    tf.reset_default_graph()
    return frozen_graph, input_names, list(outputs.keys())
Exemplo n.º 23
0
    def _get_outputs_tensor_info_from_meta_graph_def(meta_graph_def,
                                                     signature_def_key):
        """Gets TensorInfos for all outputs of the SignatureDef.
      Returns a dictionary that maps each output key to its TensorInfo for the given
      signature_def_key in the meta_graph_def.

      Args:
        meta_graph_def: MetaGraphDef protocol buffer with the SignatureDefmap to
        look up signature_def_key.
        signature_def_key: A SignatureDef key string.

      Returns:
        A dictionary that maps output tensor keys to TensorInfos.
      """
        return signature_def_utils.get_signature_def_by_key(
            meta_graph_def, signature_def_key).outputs
Exemplo n.º 24
0
def _get_outputs_tensor_info_from_meta_graph_def(meta_graph_def,
                                                 signature_def_key):
  """Gets TensorInfos for all outputs of the SignatureDef.

  Returns a dictionary that maps each output key to its TensorInfo for the given
  signature_def_key in the meta_graph_def.

  Args:
    meta_graph_def: MetaGraphDef protocol buffer with the SignatureDefmap to
    look up signature_def_key.
    signature_def_key: A SignatureDef key string.

  Returns:
    A dictionary that maps output tensor keys to TensorInfos.
  """
  return signature_def_utils.get_signature_def_by_key(meta_graph_def,
                                                      signature_def_key).outputs
Exemplo n.º 25
0
    def __init__(self, path=DEFAULT_MODEL_PATH):
        logger.info('Loading model from: {}...'.format(path))
        sess = tf.Session(graph=tf.Graph())
        # Load the graph
        model_graph_def = sm.loader.load(sess, [sm.tag_constants.SERVING], path)
        sig_def = signature_def_utils.get_signature_def_by_key(model_graph_def,
                                                               sm.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)

        input_name = sig_def.inputs['input_images'].name
        output_name = sig_def.outputs['output_images'].name

        # Set up instance variables and required inputs for inference
        self.sess = sess
        self.model_graph_def = model_graph_def
        self.output_tensor = sess.graph.get_tensor_by_name(output_name)
        self.input_name = input_name
        self.output_name = output_name
        logger.info('Loaded model')
    def __init__(self, model_path):

        self.logger = logging.getLogger(self.__class__.__name__)
        self.model_path = model_path

        if not tf.saved_model.loader.maybe_saved_model_directory(self.model_path):
            raise ValueError('No model found in', self.model_path)

        self.sess = tf.Session(graph=tf.Graph())

        meta_graph_def = tf.saved_model.loader.load(self.sess, [tf.saved_model.tag_constants.SERVING], self.model_path)
        signature_def = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
        meta_graph_def_sig = signature_def_utils.get_signature_def_by_key(meta_graph_def, signature_def)

        self.input_tensor_info = meta_graph_def_sig.inputs

        self.output_tensor_info = meta_graph_def_sig.outputs
        self.output_tensor_keys = [k for k in self.output_tensor_info]
        self.output_tensor_names = [self.output_tensor_info[k].name for k in self.output_tensor_keys]
Exemplo n.º 27
0
def from_saved_model(model_path, input_names, output_names):
    """Load tensorflow graph from saved_model."""
    # make sure we start with clean default graph
    tf.reset_default_graph()
    inputs = {}
    outputs = {}
    try:
        from tensorflow.contrib.saved_model.python.saved_model import signature_def_utils
        # pylint: disable=unnecessary-lambda
        get_signature_def = lambda meta_graph_def, k: \
            signature_def_utils.get_signature_def_by_key(meta_graph_def, k)
    except ImportError:
        # TF1.12 changed the api
        get_signature_def = lambda meta_graph_def, k: meta_graph_def.signature_def[
            k]

    with tf.Session() as sess:
        meta_graph_def = tf.saved_model.loader.load(
            sess, [tf.saved_model.tag_constants.SERVING], model_path)
        for k in meta_graph_def.signature_def.keys():
            inputs_tensor_info = get_signature_def(meta_graph_def, k).inputs
            for _, input_tensor in sorted(inputs_tensor_info.items()):
                inputs[input_tensor.name] = sess.graph.get_tensor_by_name(
                    input_tensor.name)
            outputs_tensor_info = get_signature_def(meta_graph_def, k).outputs
            for _, output_tensor in sorted(outputs_tensor_info.items()):
                outputs[output_tensor.name] = sess.graph.get_tensor_by_name(
                    output_tensor.name)
        frozen_graph = freeze_session(sess, output_names=list(outputs.keys()))
        frozen_inputs = []
        # get inputs in frozen graph
        for n in frozen_graph.node:
            for inp, _ in inputs.items():
                if utils.node_name(inp) == n.name:
                    frozen_inputs.append(inp)
        deleted_inputs = list(set(inputs.keys()) - set(frozen_inputs))
        if deleted_inputs:
            log.warning("inputs [%s] is not in frozen graph, delete them",
                        ",".join(deleted_inputs))
    # clean up
    tf.reset_default_graph()
    return frozen_graph, frozen_inputs, outputs.keys()
Exemplo n.º 28
0
def RunModel(saved_model_dir, signature_def_key, feature_text, feature_map):
    saved_model = reader.read_saved_model(saved_model_dir)
    meta_graph = None
    for meta_graph_def in saved_model.meta_graphs:
        if meta_graph_def.meta_info_def.tags == _TAG:
            meta_graph = meta_graph_def
    signature_def = signature_def_utils.get_signature_def_by_key(
        meta_graph, signature_def_key)
    features = pi.get_feature_list(feature_map, feature_text.split(" "))
    inputs_feed_dict = {signature_def.inputs["inputs"].name: features}
    if signature_def_key == "proba":
        output_key = "scores"
    elif signature_def_key == "embedding":
        output_key = "outputs"
    else:
        raise ValueError("Unrecognised signature_def %s" % (signature_def_key))
    output_tensor = signature_def.outputs[output_key].name
    with tf.Session() as sess:
        loader.load(sess, [_TAG], saved_model_dir)
        outputs = sess.run(output_tensor, feed_dict=inputs_feed_dict)
        return outputs
Exemplo n.º 29
0
    def testPredictionSignatureDef(self):
        input1 = constant_op.constant("a", name="input-1")
        input2 = constant_op.constant("b", name="input-2")
        output1 = constant_op.constant("c", name="output-1")
        output2 = constant_op.constant("d", name="output-2")

        meta_graph_def = meta_graph_pb2.MetaGraphDef()
        self._add_to_signature_def_map(
            meta_graph_def, {
                "my_prediction":
                signature_def_utils.predict_signature_def(
                    {
                        "input-1": input1,
                        "input-2": input2
                    }, {
                        "output-1": output1,
                        "output-2": output2
                    })
            })

        # Look up the prediction signature def with the key used while saving.
        signature_def = signature_def_contrib_utils.get_signature_def_by_key(
            meta_graph_def, "my_prediction")
        self.assertEqual(signature_constants.PREDICT_METHOD_NAME,
                         signature_def.method_name)

        # Check inputs in signature def.
        self.assertEqual(2, len(signature_def.inputs))
        self._check_tensor_info(signature_def.inputs, "input-1", "input-1:0")
        self._check_tensor_info(signature_def.inputs, "input-2", "input-2:0")

        # Check outputs in signature def.
        self.assertEqual(2, len(signature_def.outputs))
        self._check_tensor_info(signature_def.outputs, "output-1",
                                "output-1:0")
        self._check_tensor_info(signature_def.outputs, "output-2",
                                "output-2:0")
    def __init__(self, model_path):

        self.logger = logging.getLogger(self.__class__.__name__)
        self.model_path = model_path

        if not tf.saved_model.loader.maybe_saved_model_directory(
                self.model_path):
            raise ValueError('No model found in', self.model_path)

        self.sess = tf.Session(graph=tf.Graph())

        meta_graph_def = tf.saved_model.loader.load(
            self.sess, [tf.saved_model.tag_constants.SERVING], self.model_path)
        signature_def = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
        meta_graph_def_sig = signature_def_utils.get_signature_def_by_key(
            meta_graph_def, signature_def)

        self.input_tensor_info = meta_graph_def_sig.inputs

        self.output_tensor_info = meta_graph_def_sig.outputs
        self.output_tensor_keys = [k for k in self.output_tensor_info]
        self.output_tensor_names = [
            self.output_tensor_info[k].name for k in self.output_tensor_keys
        ]
Exemplo n.º 31
0
    def __init__(self, model_path, vocab_fname, output_fname, logger, skip_attention):

        self.logger = logger
        self.model_path = model_path
        self.output_fname = output_fname

        if not tf.saved_model.loader.maybe_saved_model_directory(self.model_path):
            raise ValueError('No model found in', self.model_path)

        self.sess = tf.Session(graph=tf.Graph())

        meta_graph_def = tf.saved_model.loader.load(self.sess, [tf.saved_model.tag_constants.SERVING], self.model_path)
        signature_def = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
        meta_graph_def_sig = signature_def_utils.get_signature_def_by_key(meta_graph_def, signature_def)

        input_tensor_keys = [k for k in meta_graph_def_sig.inputs]
        input_tensor_names = [meta_graph_def_sig.inputs[k].name for k in input_tensor_keys]

        self.t_variables = {key: name for key, name in zip(input_tensor_keys, input_tensor_names)}
     
        self.final_output = meta_graph_def_sig.outputs['output'].name
        self.str_scores = meta_graph_def_sig.outputs['str_scores'].name

        self.vocab = utils.load_dict(vocab_fname)
Exemplo n.º 32
0
def _from_saved_model_v1(sess, model_path, input_names, output_names, tag,
                         signature_names):
    """Load tensorflow graph from saved_model."""

    wrn_no_tag = "'--tag' not specified for saved_model. Using --tag serve"
    wrn_empty_tag = "'--tag' value is empty string. Using tags = []"
    wrn_empty_sig = "'--signature_def' not provided. Using all signatures."

    if tag is None:
        tag = [tf.saved_model.tag_constants.SERVING]
        logger.warning(wrn_no_tag)

    if not signature_names:
        logger.warning(wrn_empty_sig)

    if tag == '':
        tag = []
        logger.warning(wrn_empty_tag)

    if not isinstance(tag, list):
        tag = [tag]

    imported = tf.saved_model.loader.load(sess, tag, model_path)
    signatures = []
    for k in imported.signature_def.keys():
        if k in signature_names or (not signature_names
                                    and not k.startswith("_")):
            signatures.append(k)
    try:
        from tensorflow.contrib.saved_model.python.saved_model import signature_def_utils
        # pylint: disable=unnecessary-lambda
        get_signature_def = lambda meta_graph_def, k: \
            signature_def_utils.get_signature_def_by_key(meta_graph_def, k)
    except ImportError:
        # TF1.12 changed the api
        get_signature_def = lambda meta_graph_def, k: meta_graph_def.signature_def[
            k]

    if input_names is None:
        input_names = []
        for k in signatures:
            inputs_tensor_info = get_signature_def(imported, k).inputs
            for _, input_tensor in inputs_tensor_info.items():
                if input_tensor.name not in input_names:
                    input_names.append(input_tensor.name)
    tensors_to_rename = {}
    if output_names is None:
        output_names = []
        for k in signatures:
            outputs_tensor_info = get_signature_def(imported, k).outputs
            for structured_name, output_tensor in outputs_tensor_info.items():
                if output_tensor.name not in output_names:
                    output_names.append(output_tensor.name)
                    tensors_to_rename[output_tensor.name] = structured_name
    frozen_graph = freeze_session(sess,
                                  input_names=input_names,
                                  output_names=output_names)
    table_names, key_dtypes, value_dtypes = get_hash_table_info(frozen_graph)
    initialized_tables = {}
    tf.tables_initializer().run()
    for n, k_dtype, val_dtype in zip(table_names, key_dtypes, value_dtypes):
        h = lookup_ops.hash_table_v2(k_dtype, val_dtype, shared_name=n)
        try:
            k, v = lookup_ops.lookup_table_export_v2(h, k_dtype, val_dtype)
            k, v = sess.run([k, v])
            initialized_tables[n] = (k, v)
        except Exception:  # pylint: disable=broad-except
            logger.warning("Could not initialize table with shared_name = %r",
                           n)
    return frozen_graph, input_names, output_names, initialized_tables, tensors_to_rename
Exemplo n.º 33
0
import pandas as pd
import tensorflow as tf
from tensorflow.python.tools import saved_model_utils
from tensorflow.contrib.saved_model.python.saved_model import signature_def_utils
import common

tag = tf.saved_model.tag_constants.SERVING
signature_def = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY

with tf.Session() as sess:
    export_dir = common.get_export_dir()
    tf.saved_model.loader.load(sess, [tag], export_dir)

    meta_graph_def = saved_model_utils.get_meta_graph_def(export_dir, tag)
    predict_signature_def = signature_def_utils.get_signature_def_by_key(
        meta_graph_def, signature_def)

    inputs = common.get_test_inputs()
    examples = common.create_examples(inputs)

    fetches = [
        predict_signature_def.outputs[key].name
        for key in ['classes', 'scores']
    ]
    feed_dict = {predict_signature_def.inputs['inputs'].name: examples}

    outputs = sess.run(fetches, feed_dict=feed_dict)
    predictions = {
        'classes': outputs[0],
        'scores': outputs[1],
    }
Exemplo n.º 34
0
    def run_test(self,
                 name,
                 backend="caffe2",
                 debug=False,
                 onnx_file=None,
                 opset=None,
                 perf=None,
                 fold_const=None):
        """Run complete test against backend."""
        print(name)
        self.perf = perf

        # get the model
        if self.url:
            _, dir_name = self.download_file()
            model_path = os.path.join(dir_name, self.local)
        else:
            model_path = self.local
            dir_name = os.path.dirname(self.local)
        print("\tdownloaded", model_path)

        if self.model_type in ["checkpoint"]:
            #
            # if the input model is a checkpoint, convert it to a frozen model
            saver = tf.train.import_meta_graph(model_path)
            with tf.Session() as sess:
                saver.restore(sess, model_path[:-5])
                frozen_graph = freeze_session(sess,
                                              output_names=self.output_names)
                tf.train.write_graph(frozen_graph,
                                     dir_name,
                                     "frozen.pb",
                                     as_text=False)
            model_path = os.path.join(dir_name, "frozen.pb")
        elif self.model_type in ["saved_model"]:
            try:
                from tensorflow.contrib.saved_model.python.saved_model import signature_def_utils
                get_signature_def = lambda meta_graph_def, k: \
                    signature_def_utils.get_signature_def_by_key(meta_graph_def, k)
            except ImportError:
                # TF1.12 changed the api
                get_signature_def = lambda meta_graph_def, k: meta_graph_def.signature_def[
                    k]

            # saved_model format - convert to checkpoint
            with tf.Session() as sess:
                meta_graph_def = tf.saved_model.loader.load(
                    sess, [tf.saved_model.tag_constants.SERVING], model_path)
                inputs = {}
                outputs = {}
                for k in meta_graph_def.signature_def.keys():
                    inputs_tensor_info = get_signature_def(meta_graph_def,
                                                           k).inputs
                    for _, input_tensor in sorted(inputs_tensor_info.items()):
                        inputs[
                            input_tensor.name] = sess.graph.get_tensor_by_name(
                                input_tensor.name)
                    outputs_tensor_info = get_signature_def(meta_graph_def,
                                                            k).outputs
                    for _, output_tensor in sorted(
                            outputs_tensor_info.items()):
                        outputs[output_tensor.
                                name] = sess.graph.get_tensor_by_name(
                                    output_tensor.name)
                # freeze uses the node name derived from output:0 so only pass in output:0;
                # it will provide all outputs of that node.
                for o in list(outputs.keys()):
                    if not o.endswith(":0"):
                        del outputs[o]
                frozen_graph = freeze_session(sess,
                                              output_names=list(
                                                  outputs.keys()))
                tf.train.write_graph(frozen_graph,
                                     dir_name,
                                     "frozen.pb",
                                     as_text=False)
            model_path = os.path.join(dir_name, "frozen.pb")

        # create the input data
        inputs = {}
        for k, v in self.input_names.items():
            if isinstance(v, six.text_type) and v.startswith("np."):
                inputs[k] = eval(v)  # pylint: disable=eval-used
            else:
                inputs[k] = self.make_input(v)
        if self.more_inputs:
            for k, v in self.more_inputs.items():
                inputs[k] = v
        tf.reset_default_graph()
        graph_def = graph_pb2.GraphDef()
        with open(model_path, "rb") as f:
            graph_def.ParseFromString(f.read())

        graph_def = tf2onnx.tfonnx.tf_optimize(inputs.keys(),
                                               self.output_names, graph_def,
                                               fold_const)
        shape_override = {}
        g = tf.import_graph_def(graph_def, name='')
        with tf.Session(config=tf.ConfigProto(allow_soft_placement=True),
                        graph=g) as sess:

            # fix inputs if needed
            for k in inputs.keys():  # pylint: disable=consider-iterating-dictionary
                t = sess.graph.get_tensor_by_name(k)
                dtype = tf.as_dtype(t.dtype).name
                if type != "float32":
                    v = inputs[k]
                    inputs[k] = v.astype(dtype)
            if self.force_input_shape:
                for k, v in inputs.items():
                    shape_override[k] = list(v.shape)

            # run the model with tensorflow
            if self.skip_tensorflow:
                print("\ttensorflow", "SKIPPED")
            else:
                tf_results = self.run_tensorflow(sess, inputs)
                print("\ttensorflow", "OK")
            model_proto = None
            try:
                # convert model to onnx
                onnx_graph = self.to_onnx(sess.graph,
                                          opset=opset,
                                          shape_override=shape_override,
                                          input_names=inputs.keys())
                new_model_proto = GraphUtil.opt_transposes_with_graph(
                    onnx_graph, "test", debug=debug)
                if new_model_proto:
                    model_proto = new_model_proto
                else:
                    print(
                        "\tNON-CRITICAL, optimizers are not applied successfully"
                    )
                print("\tto_onnx", "OK")
                if debug:
                    onnx_graph.dump_graph()
                if onnx_file:
                    self.create_onnx_file(name, model_proto, inputs, onnx_file)
            except Exception as ex:
                tb = traceback.format_exc()
                print("\tto_onnx", "FAIL", ex, tb)

        try:
            onnx_results = None
            if backend == "caffe2":
                onnx_results = self.run_caffe2(name, model_proto, inputs)
            elif backend == "onnxmsrtnext":
                onnx_results = self.run_onnxmsrtnext(name, model_proto, inputs)
            elif backend == "onnxruntime":
                onnx_results = self.run_onnxruntime(name, model_proto, inputs)
            else:
                raise ValueError("unknown backend")
            print("\trun_onnx OK")

            try:
                if self.skip_tensorflow:
                    print("\tResults: skipped tensorflow")
                else:
                    if self.check_only_shape:
                        for tf_res, onnx_res in zip(tf_results, onnx_results):
                            np.testing.assert_array_equal(
                                tf_res.shape, onnx_res.shape)
                    else:
                        for tf_res, onnx_res in zip(tf_results, onnx_results):
                            np.testing.assert_allclose(tf_res,
                                                       onnx_res,
                                                       rtol=self.rtol,
                                                       atol=self.atol)
                    print("\tResults: OK")
                return True
            except Exception as ex:
                print("\tResults: ", ex)

        except Exception as ex:
            print("\trun_onnx", "FAIL", ex)

        return False
Exemplo n.º 35
0
def _run_model(iterator, args):
  """Run single-node inferencing on a checkpoint/saved_model using input tensors obtained from a Spark partition iterator and returning output tensors."""
  single_node_env(args)

  logging.info("===== input_mapping: {}".format(args.input_mapping))
  logging.info("===== output_mapping: {}".format(args.output_mapping))
  input_tensor_names = [ tensor for col,tensor in sorted(args.input_mapping.items()) ]
  output_tensor_names = [ tensor for tensor,col in sorted(args.output_mapping.items()) ]

  # if using a signature_def_key, get input/output tensor info from the requested signature
  if args.signature_def_key:
    assert args.export_dir, "Inferencing with signature_def_key requires --export_dir argument"
    logging.info("===== loading meta_graph_def for tag_set ({0}) from saved_model: {1}".format(args.tag_set, args.export_dir))
    meta_graph_def = get_meta_graph_def(args.export_dir, args.tag_set)
    signature = signature_def_utils.get_signature_def_by_key(meta_graph_def, args.signature_def_key)
    logging.info("signature: {}".format(signature))
    inputs_tensor_info = signature.inputs
    logging.info("inputs_tensor_info: {0}".format(inputs_tensor_info))
    outputs_tensor_info = signature.outputs
    logging.info("outputs_tensor_info: {0}".format(outputs_tensor_info))

  result = []
  with tf.Session(graph=ops_lib.Graph()) as sess:
    if args.export_dir:
      assert args.tag_set, "Inferencing from a saved_model requires --tag_set"
      # load graph from a saved_model
      logging.info("===== restoring from saved_model: {}".format(args.export_dir))
      loader.load(sess, args.tag_set.split(','), args.export_dir)
    elif args.model_dir:
      # load graph from a checkpoint
      ckpt = tf.train.latest_checkpoint(args.model_dir)
      assert ckpt, "Invalid model checkpoint path: {}".format(args.model_dir)
      logging.info("===== restoring from checkpoint: {}".format(ckpt + ".meta"))
      saver = tf.train.import_meta_graph(ckpt + ".meta", clear_devices=True)
      saver.restore(sess, ckpt)
    else:
      raise Exception("Inferencing requires either --model_dir or --export_dir argument")

    # get list of input/output tensors (by name)
    if args.signature_def_key:
      input_tensors = [inputs_tensor_info[t].name for t in input_tensor_names]
      output_tensors = [outputs_tensor_info[output_tensor_names[0]].name]
    else:
      input_tensors = [t + ':0' for t in input_tensor_names]
      output_tensors = [t + ':0' for t in output_tensor_names]

    logging.info("input_tensors: {0}".format(input_tensors))
    logging.info("output_tensors: {0}".format(output_tensors))

    # feed data in batches and return output tensors
    for tensors in yield_batch(iterator, args.batch_size, len(input_tensor_names)):
      inputs_feed_dict = {}
      for i in range(len(input_tensors)):
        inputs_feed_dict[input_tensors[i]] = tensors[i]

      outputs = sess.run(output_tensors, feed_dict=inputs_feed_dict)
      lengths = [ len(output) for output in outputs ]
      input_size = len(tensors[0])
      assert all([ l == input_size for l in lengths ]), "Output array sizes {} must match input size: {}".format(lengths, input_size)
      python_outputs = [ output.tolist() for output in outputs ]      # convert from numpy to standard python types
      result.extend(zip(*python_outputs))                             # convert to an array of tuples of "output columns"
  return result
Exemplo n.º 36
0
    def run_test(self, name, backend="caffe2", debug=False, onnx_file=None, opset=None, perf=None, fold_const=None):
        """Run complete test against backend."""
        print(name)
        self.perf = perf

        # get the model
        if self.url:
            _, dir_name = self.download_file()
            model_path = os.path.join(dir_name, self.local)
        else:
            model_path = self.local
            dir_name = os.path.dirname(self.local)
        print("\tdownloaded", model_path)

        if self.model_type in ["checkpoint"]:
            #
            # if the input model is a checkpoint, convert it to a frozen model
            saver = tf.train.import_meta_graph(model_path)
            with tf.Session() as sess:
                saver.restore(sess, model_path[:-5])
                frozen_graph = freeze_session(sess, output_names=self.output_names)
                tf.train.write_graph(frozen_graph, dir_name, "frozen.pb", as_text=False)
            model_path = os.path.join(dir_name, "frozen.pb")
        elif self.model_type in ["saved_model"]:
            # saved_model format - convert to checkpoint
            with tf.Session() as sess:
                meta_graph_def = tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], model_path)
                inputs = {}
                outputs = {}
                for k in meta_graph_def.signature_def.keys():
                    inputs_tensor_info = signature_def_utils.get_signature_def_by_key(meta_graph_def, k).inputs
                    for _, input_tensor in sorted(inputs_tensor_info.items()):
                        inputs[input_tensor.name] = sess.graph.get_tensor_by_name(input_tensor.name)
                    outputs_tensor_info = signature_def_utils.get_signature_def_by_key(meta_graph_def, k).outputs
                    for _, output_tensor in sorted(outputs_tensor_info.items()):
                        outputs[output_tensor.name] = sess.graph.get_tensor_by_name(output_tensor.name)
                frozen_graph = freeze_session(sess, output_names=list(outputs.keys()))
                tf.train.write_graph(frozen_graph, dir_name, "frozen.pb", as_text=False)
            model_path = os.path.join(dir_name, "frozen.pb")

        # create the input data
        inputs = {}
        for k, v in self.input_names.items():
            if isinstance(v, six.text_type) and v.startswith("np."):
                inputs[k] = eval(v)  # pylint: disable=eval-used
            else:
                inputs[k] = self.make_input(v)
        if self.more_inputs:
            for k, v in self.more_inputs.items():
                inputs[k] = v
        tf.reset_default_graph()
        graph_def = graph_pb2.GraphDef()
        with open(model_path, "rb") as f:
            graph_def.ParseFromString(f.read())

        graph_def = tf2onnx.tfonnx.tf_optimize(inputs, self.output_names, graph_def, fold_const)
        shape_override = {}
        g = tf.import_graph_def(graph_def, name='')
        with tf.Session(graph=g) as sess:

            # fix inputs if needed
            for k in inputs.keys():  # pylint: disable=consider-iterating-dictionary
                t = sess.graph.get_tensor_by_name(k)
                dtype = tf.as_dtype(t.dtype).name
                if type != "float32":
                    v = inputs[k]
                    inputs[k] = v.astype(dtype)
            if self.force_input_shape:
                shape_override = self.input_names

            # run the model with tensorflow
            if self.skip_tensorflow:
                print("\ttensorflow", "SKIPPED")
            else:
                tf_results = self.run_tensorflow(sess, inputs)
                print("\ttensorflow", "OK")
            model_proto = None
            try:
                # convert model to onnx
                onnx_graph = self.to_onnx(sess.graph, opset=opset, shape_override=shape_override)
                optimizer = TransposeOptimizer(onnx_graph, self.output_names, debug)
                optimizer.optimize()

                model_proto = onnx_graph.make_model("test")
                print("\tto_onnx", "OK")
                if debug:
                    onnx_graph.dump_graph()
                if onnx_file:
                    self.create_onnx_file(name, model_proto, inputs, onnx_file)
            except Exception as ex:
                print("\tto_onnx", "FAIL", ex)

        try:
            onnx_results = None
            if backend == "caffe2":
                onnx_results = self.run_caffe2(name, model_proto, inputs)
            elif backend == "onnxmsrtnext":
                onnx_results = self.run_onnxmsrtnext(name, model_proto, inputs)
            elif backend == "onnxruntime":
                onnx_results = self.run_onnxruntime(name, model_proto, inputs)
            else:
                raise ValueError("unknown backend")
            print("\trun_onnx OK")

            try:
                if self.skip_tensorflow:
                    print("\tResults: skipped tensorflow")
                else:
                    if self.check_only_shape:
                        for tf_res, onnx_res in zip(tf_results, onnx_results):
                            np.testing.assert_array_equal(tf_res.shape, onnx_res.shape)
                    else:
                        for tf_res, onnx_res in zip(tf_results, onnx_results):
                            np.testing.assert_allclose(tf_res, onnx_res, rtol=self.rtol, atol=self.atol)
                    print("\tResults: OK")
                return True
            except Exception as ex:
                print("\tResults: ", ex)

        except Exception as ex:
            print("\trun_onnx", "FAIL", ex)

        return False
Exemplo n.º 37
0
def _run_model(iterator, args, tf_args):
    """mapPartitions function to run single-node inferencing from a checkpoint/saved_model, using the model's input/output mappings.

  Args:
    :iterator: input RDD partition iterator.
    :args: arguments for TFModel, in argparse format
    :tf_args: arguments for TensorFlow inferencing code, in argparse or ARGV format.

  Returns:
    An iterator of result data.
  """
    single_node_env(tf_args)

    logging.info("===== input_mapping: {}".format(args.input_mapping))
    logging.info("===== output_mapping: {}".format(args.output_mapping))
    input_tensor_names = [
        tensor for col, tensor in sorted(args.input_mapping.items())
    ]
    output_tensor_names = [
        tensor for tensor, col in sorted(args.output_mapping.items())
    ]

    # if using a signature_def_key, get input/output tensor info from the requested signature
    if args.signature_def_key:
        assert args.export_dir, "Inferencing with signature_def_key requires --export_dir argument"
        logging.info(
            "===== loading meta_graph_def for tag_set ({0}) from saved_model: {1}"
            .format(args.tag_set, args.export_dir))
        meta_graph_def = get_meta_graph_def(args.export_dir, args.tag_set)
        signature = signature_def_utils.get_signature_def_by_key(
            meta_graph_def, args.signature_def_key)
        logging.debug("signature: {}".format(signature))
        inputs_tensor_info = signature.inputs
        logging.debug("inputs_tensor_info: {0}".format(inputs_tensor_info))
        outputs_tensor_info = signature.outputs
        logging.debug("outputs_tensor_info: {0}".format(outputs_tensor_info))

    result = []

    global global_sess, global_args
    if global_sess and global_args == args:
        # if graph/session already loaded/started (and using same args), just reuse it
        sess = global_sess
    else:
        # otherwise, create new session and load graph from disk
        tf.reset_default_graph()
        sess = tf.Session(graph=tf.get_default_graph())
        if args.export_dir:
            assert args.tag_set, "Inferencing from a saved_model requires --tag_set"
            # load graph from a saved_model
            logging.info("===== restoring from saved_model: {}".format(
                args.export_dir))
            loader.load(sess, args.tag_set.split(','), args.export_dir)
        elif args.model_dir:
            # load graph from a checkpoint
            ckpt = tf.train.latest_checkpoint(args.model_dir)
            assert ckpt, "Invalid model checkpoint path: {}".format(
                args.model_dir)
            logging.info("===== restoring from checkpoint: {}".format(ckpt +
                                                                      ".meta"))
            saver = tf.train.import_meta_graph(ckpt + ".meta",
                                               clear_devices=True)
            saver.restore(sess, ckpt)
        else:
            raise Exception(
                "Inferencing requires either --model_dir or --export_dir argument"
            )
        global_sess = sess
        global_args = args

    # get list of input/output tensors (by name)
    if args.signature_def_key:
        input_tensors = [
            inputs_tensor_info[t].name for t in input_tensor_names
        ]
        output_tensors = [outputs_tensor_info[output_tensor_names[0]].name]
    else:
        input_tensors = [t + ':0' for t in input_tensor_names]
        output_tensors = [t + ':0' for t in output_tensor_names]

    logging.info("input_tensors: {0}".format(input_tensors))
    logging.info("output_tensors: {0}".format(output_tensors))

    # feed data in batches and return output tensors
    for tensors in yield_batch(iterator, args.batch_size,
                               len(input_tensor_names)):
        inputs_feed_dict = {}
        for i in range(len(input_tensors)):
            inputs_feed_dict[input_tensors[i]] = tensors[i]

        outputs = sess.run(output_tensors, feed_dict=inputs_feed_dict)
        lengths = [len(output) for output in outputs]
        input_size = len(tensors[0])
        assert all([
            length == input_size for length in lengths
        ]), "Output array sizes {} must match input size: {}".format(
            lengths, input_size)
        python_outputs = [output.tolist() for output in outputs
                          ]  # convert from numpy to standard python types
        result.extend(zip(*python_outputs)
                      )  # convert to an array of tuples of "output columns"

    return result
Exemplo n.º 38
0
def get_meta_graph_def(saved_model_dir, tags):
    """Gets `MetaGraphDef` from a directory containing a `SavedModel`.
  Returns the `MetaGraphDef` for the given tag-set and SavedModel directory.
  Args:
    saved_model_dir: Directory containing the SavedModel.
    tags: Comma separated list of tags used to identify the correct
      `MetaGraphDef`.
  Raises:
    ValueError: An error when the given tags cannot be found.
  Returns:
    A `MetaGraphDef` corresponding to the given tags.
  """
    saved_model = reader.read_saved_model(saved_model_dir)
    set_of_tags = set([tag.strip() for tag in tags.split(',')])
    for meta_graph_def in saved_model.meta_graphs:
        if set(meta_graph_def.meta_info_def.tags) == set_of_tags:
            return meta_graph_def
    raise ValueError('Could not find MetaGraphDef with tags {}'.format(tags))


exp_dir = "/home/heavens/Chiron_project/Chiron/chiron/model/DNA_default/export/1/"
metagraph_def = get_meta_graph_def(exp_dir, 'serve')
signature_def = signature_def_utils.get_signature_def_by_key(
    metagraph_def, 'predicted_sequences')
output_names = {k: v.name for k, v in signature_def.outputs.items()}
print(output_names)
predict_fn = predictor.from_saved_model(exp_dir)
seq_len = np.asarray([[4]])
combined_input = np.concatenate((batch_x, seq_len), axis=1)
predictions = predict_fn({'inputs': combined_input})