def post_processing_graph(self):
        # type: () -> tf.Graph
        """
    Generates and returns a TensorFlow graph containing postprocessing
    operations. By convention, this graph contains one or more input
    placeholders that correspond to output ops by the same name in
    the main graph.

    For each output in the original graph that needs postprocessing,
    the preprocessing graph should contain an input placeholder with the same
    name and a second op named "<name of output>_postprocessed",
    where `<name of output>` is the name of the original output op.
    """

        _HASH_TABLE_INIT_OP_NAME = "hash_table_init"

        label_file = util.fetch_or_use_cached(_CACHE_DIR, "labels.pbtext",
                                              _LABEL_MAP_URL)

        # Category mapping comes in pbtext format. Translate to the format that
        # TensorFlow's hash table initializers expect (key and value tensors).
        with open(label_file, "r") as f:
            raw_data = f.read()
        # Parse directly instead of going through the protobuf API dance.
        records = raw_data.split("}")
        records = records[0:-1]  # Remove empty record at end
        records = [r.replace("\n", "") for r in records]  # Strip newlines
        regex = re.compile(
            r"item {  name: \".+\"  id: (.+)  display_name: \"(.+)\"")
        keys = []
        values = []
        for r in records:
            match = regex.match(r)
            keys.append(int(match.group(1)))
            values.append(match.group(2))

        result_decode_g = tf.Graph()
        with result_decode_g.as_default():
            # The original graph produces floating-point output for detection class,
            # even though the output is always an integer.
            float_class = tf.placeholder(tf.float32,
                                         shape=[None],
                                         name="detection_classes")
            int_class = tf.cast(float_class, tf.int32)
            key_tensor = tf.constant(keys, dtype=tf.int32)
            value_tensor = tf.constant(values)
            table_init = tf.contrib.lookup.KeyValueTensorInitializer(
                key_tensor, value_tensor, name=_HASH_TABLE_INIT_OP_NAME)
            hash_table = tf.contrib.lookup.HashTable(table_init,
                                                     default_value="Unknown")
            _ = hash_table.lookup(int_class,
                                  name="detection_classes_postprocessed")
        return result_decode_g
    def frozen_graph(self):
        # type: () -> tf.GraphDef
        """
    Generates and returns the core TensorFlow graph for the model as a frozen
    (i.e. all variables converted to constants) GraphDef protocol buffer
    message.
    """
        tarball = util.fetch_or_use_cached(
            _CACHE_DIR, "{}.tar.gz".format(_LONG_MODEL_NAME),
            _MODEL_TARBALL_URL)

        print("Original model files at {}".format(tarball))
        with tarfile.open(tarball) as t:
            frozen_graph_bytes = t.extractfile(_FROZEN_GRAPH_MEMBER).read()
            return tf.GraphDef.FromString(frozen_graph_bytes)
Example #3
0
def main():
    """
  Connect to a copy of the model deployed via the deploy_wml.py script,
  generate a web service request, pass that request through the model,
  and print the result.

  Before running this script, you need to perform the following manual steps:
  * Perform the manual steps outlined in the deploy_wml.py script.
  * Run the deploy_wml.py script
  * Enter the deployment URL that the deploy_wml.py script prints out into
    the local file `ibm_cloud_credentials.json` under the key
    "WML_function_url".
  """
    if not os.path.isdir(_TMP_DIR):
        os.mkdir(_TMP_DIR)

    # Prepare a request
    image_path = util.fetch_or_use_cached(_TMP_DIR, "panda.jpg",
                                          _PANDA_PIC_URL)
    with open(image_path, "rb") as f:
        image_data = f.read()
    thresh = 0.7

    # Note that "values" tag at the top level. This tag is a requirement of the
    # WML API standard.
    # Under the "values" tag, you must place a list of tuples. Each tuple must
    # be represented as a JSON list of values. Tensor-valued values must be
    # represented as lists of numbers.
    request_json = {
        "fields": ["image", "threshold"],
        "values": [[
            # TensorFlow only decodes URL-safe base64
            base64.urlsafe_b64encode(image_data).decode("utf-8"),
            thresh
        ]]
    }

    # Connect to Watson Machine Learning Python API
    with open("./ibm_cloud_credentials.json") as f:
        creds_json = json.load(f)
    _WML_CREDENTIALS = creds_json["WML_credentials"]
    _WML_FUNCTION_URL = creds_json["WML_function_url"]
    client = WatsonMachineLearningAPIClient(_WML_CREDENTIALS)

    response = client.deployments.score(_WML_FUNCTION_URL, request_json)
    print("Response: {}".format(response))
def main():
    """
  Spin up a local copy of the model, generate a JSON request, pass that
  through the model, and print the result.
  """
    if not os.path.isdir(_TMP_DIR):
        os.mkdir(_TMP_DIR)

    # Prepare a request
    image_path = util.fetch_or_use_cached(_TMP_DIR, "panda.jpg",
                                          _PANDA_PIC_URL)
    with open(image_path, "rb") as f:
        image_data = f.read()
    thresh = 0.7

    request = inference_request.InferenceRequest()
    request.raw_inputs["image"] = base64.urlsafe_b64encode(image_data).decode(
        "utf-8")
    request.raw_inputs["threshold"] = thresh

    # Fire up TensorFlow and perform end-to-end inference
    with tf.Session() as sess:
        graph = tf.Graph()
        with graph.as_default():
            meta_graph = tf.saved_model.loader.load(
                sess, [tf.saved_model.tag_constants.SERVING],
                _SAVED_MODEL_DIR)  # type: tf.MetaGraphDef

            # Extract serving "method" signature
            signature = meta_graph.signature_def["serving_default"]

            print("Signature:\n{}".format(signature))

            odh = handlers.ObjectDetectorHandlers()
            odh.pre_process(request)
            inference_request.pass_to_local_tf(request, sess, graph, signature)
            odh.post_process(request)
            print("Result:\n{}".format(request.json_result()))
Example #5
0
def main():
  """
  Connect to a copy of the "core" model deployed via the deploy_wml.py script
  using a local copy of the WML function that was deployed by the
  deploy_wml.py script.

  Before running this script, you need to perform the following manual steps:
  * Perform the manual steps outlined in the deploy_wml.py script.
  * Run the deploy_wml.py script
  * Enter the deployment URL that the deploy_wml.py script prints out into
    the local file `ibm_cloud_credentials.json` under the key
    "WML_function_url".
  * Enter the model ID that the deploy_wml.py script prints out to the local
    file `ibm_cloud_credentials.json` under the key "WML_model_ID". The model
    ID can be found in the part of the output that looks like::
      Model details: {'metadata': {'guid': '<model id>',
    ...or alternately you can set up the CLI with ". bx_env.sh", then run
      bx ml list models
  * Enter the deployment ID that deploy_wml.py prints out to the local
    file `ibm_cloud_credentials.json` under the key "WML_deployment_ID".
    The deployment ID can be found in the part of the script output that
    looks like:
      Deployment details: {'metadata': {'guid': '<deployment id>'
    or you can type
      bx ml list deployments <model id>
    Don't bother typing just "bx ml list deployments". It will return an
    empty set...
  """
  if not os.path.isdir(_TMP_DIR):
    os.mkdir(_TMP_DIR)

  # Prepare a request
  image_path = util.fetch_or_use_cached(_TMP_DIR, "panda.jpg",
                                        _PANDA_PIC_URL)
  with open(image_path, "rb") as f:
    image_data = f.read()
  thresh = 0.7

  # Note that "values" tag at the top level. This tag is a requirement of the
  # WML API standard.
  # Under the "values" tag, you must place a list of tuples. Each tuple must
  # be represented as a JSON list of values. Tensor-valued values must be
  # represented as lists of numbers.
  request_json = {
    "fields": [
      "image",
      "threshold"
    ],
    "values": [
      [
        # TensorFlow only decodes URL-safe base64
        base64.urlsafe_b64encode(image_data).decode("utf-8"),
        thresh
      ]
    ]
  }

  # Write out JSON suitable for passing to "bx ml score"
  with open("./ibm_cloud_credentials.json") as f:
    creds_json = json.load(f)
  _WML_MODEL_ID = creds_json["WML_model_ID"]
  _WML_DEPLOYMENT_ID = creds_json["WML_deployment_ID"]
  cli_json = {
    "modelId": _WML_MODEL_ID,
    "deploymentId": _WML_DEPLOYMENT_ID,
    "payload": request_json
  }
  with open("request.json", "w") as f:
    f.write(json.dumps(cli_json, indent=2))
  print("A copy of the request we're about emulate locally has been saved to "
        "./request.json.  Run\n"
        "   bx ml score request.json\n"
        "to use the WML CLI to run the end-to-end request remotely.")

  func_ptr = deployable_function.deployable_function()
  response = func_ptr(request_json)