def setUp(self):
        ops.reset_default_graph()
        dim = 1
        num = 3
        with ops.name_scope('some_scope'):
            # Basically from 0 to dim*num-1.
            flat_data = math_ops.linspace(0.0, dim * num - 1, dim * num)
            bias = variables.Variable(array_ops.reshape(flat_data, (num, dim)),
                                      name='bias')
        save = saver.Saver([bias])
        with self.test_session() as sess:
            variables.global_variables_initializer().run()
            self.bundle_file = os.path.join(test.get_temp_dir(),
                                            'bias_checkpoint')
            save.save(sess, self.bundle_file)

        self.new_class_vocab_file = os.path.join(
            test.test_src_dir_path(_TESTDATA_PATH), 'keyword_new.txt')
        self.old_class_vocab_file = os.path.join(
            test.test_src_dir_path(_TESTDATA_PATH), 'keyword.txt')
        self.init_val = 42

        def _init_val_initializer(shape, dtype=None, partition_info=None):
            del dtype, partition_info  # Unused by this unit-testing initializer.
            return array_ops.tile(
                constant_op.constant([[self.init_val]], dtype=dtypes.float32),
                shape)

        self.initializer = _init_val_initializer
Пример #2
0
 def testMaybeSavedModelDir(self):
   base_path = test.test_src_dir_path("/python/saved_model")
   self.assertFalse(loader.maybe_saved_model_directory(base_path))
   base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
   self.assertTrue(loader.maybe_saved_model_directory(base_path))
   base_path = "complete_garbage"
   self.assertFalse(loader.maybe_saved_model_directory(base_path))
Пример #3
0
  def setUp(self):
    ops.reset_default_graph()
    dim = 1
    num = 3
    with ops.name_scope('some_scope'):
      # Basically from 0 to dim*num-1.
      flat_data = math_ops.linspace(0.0, dim * num - 1, dim * num)
      bias = variables.Variable(
          array_ops.reshape(flat_data, (num, dim)), name='bias')
    save = saver.Saver([bias])
    with self.test_session() as sess:
      variables.global_variables_initializer().run()
      self.bundle_file = os.path.join(test.get_temp_dir(), 'bias_checkpoint')
      save.save(sess, self.bundle_file)

    self.new_class_vocab_file = os.path.join(
        test.test_src_dir_path(_TESTDATA_PATH), 'keyword_new.txt')
    self.old_class_vocab_file = os.path.join(
        test.test_src_dir_path(_TESTDATA_PATH), 'keyword.txt')
    self.init_val = 42

    def _init_val_initializer(shape, dtype=None, partition_info=None):
      del dtype, partition_info  # Unused by this unit-testing initializer.
      return array_ops.tile(
          constant_op.constant([[self.init_val]], dtype=dtypes.float32), shape)

    self.initializer = _init_val_initializer
Пример #4
0
 def testMaybeSessionBundleDir(self):
   base_path = test.test_src_dir_path(SESSION_BUNDLE_PATH)
   self.assertTrue(session_bundle.maybe_session_bundle_dir(base_path))
   base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
   self.assertFalse(session_bundle.maybe_session_bundle_dir(base_path))
   base_path = "complete_garbage"
   self.assertFalse(session_bundle.maybe_session_bundle_dir(base_path))
 def testMaybeSavedModelDir(self):
     base_path = test.test_src_dir_path("/python/saved_model")
     self.assertFalse(loader.maybe_saved_model_directory(base_path))
     base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
     self.assertTrue(loader.maybe_saved_model_directory(base_path))
     base_path = "complete_garbage"
     self.assertFalse(loader.maybe_saved_model_directory(base_path))
Пример #6
0
 def testMaybeSessionBundleDir(self):
   base_path = test.test_src_dir_path(SESSION_BUNDLE_PATH)
   self.assertTrue(session_bundle.maybe_session_bundle_dir(base_path))
   base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
   self.assertFalse(session_bundle.maybe_session_bundle_dir(base_path))
   base_path = "complete_garbage"
   self.assertFalse(session_bundle.maybe_session_bundle_dir(base_path))
Пример #7
0
  def testRunCommandWithDebuggerEnabled(self):
    self.parser = saved_model_cli.create_parser()
    base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
    x = np.array([[1], [2]])
    x_notused = np.zeros((6, 3))
    input_path = os.path.join(test.get_temp_dir(),
                              'testRunCommandNewOutdir_inputs.npz')
    output_dir = os.path.join(test.get_temp_dir(), 'new_dir')
    if os.path.isdir(output_dir):
      shutil.rmtree(output_dir)
    np.savez(input_path, x0=x, x1=x_notused)
    args = self.parser.parse_args([
        'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
        'serving_default', '--inputs', 'x=' + input_path + '[x0]', '--outdir',
        output_dir, '--tf_debug'
    ])

    def fake_wrapper_session(sess):
      return sess

    with test.mock.patch.object(local_cli_wrapper,
                                'LocalCLIDebugWrapperSession',
                                side_effect=fake_wrapper_session,
                                autospec=True) as fake:
      saved_model_cli.run(args)
      fake.assert_called_with(test.mock.ANY)

    y_actual = np.load(os.path.join(output_dir, 'y.npy'))
    y_expected = np.array([[2.5], [3.0]])
    self.assertAllClose(y_expected, y_actual)
Пример #8
0
    def testEval(self):
        if not is_tensorrt_enabled():
            return

        # TODO(b/162447069): Enable the test for TRT 7.1.3.
        if trt_test.IsTensorRTVersionGreaterEqual(7, 1, 3):
            return

        model_dir = test.test_src_dir_path(
            'python/compiler/tensorrt/test/testdata/mnist')

        accuracy_tf_native = self._Run(is_training=False,
                                       use_trt=False,
                                       batch_size=128,
                                       num_epochs=None,
                                       model_dir=model_dir)['accuracy']
        logging.info('accuracy_tf_native: %f', accuracy_tf_native)
        self.assertAllClose(0.9662, accuracy_tf_native, rtol=3e-3, atol=3e-3)

        if not trt_test.IsTensorRTVersionGreaterEqual(5):
            return

        accuracy_tf_trt = self._Run(is_training=False,
                                    use_trt=True,
                                    batch_size=128,
                                    num_epochs=None,
                                    model_dir=model_dir)['accuracy']
        logging.info('accuracy_tf_trt: %f', accuracy_tf_trt)
        self.assertAllClose(0.9675, accuracy_tf_trt, rtol=1e-3, atol=1e-3)
  def testEval(self):
    if not is_tensorrt_enabled():
      return
    model_dir = test.test_src_dir_path('python/compiler/tensorrt/test/testdata')

    accuracy_tf_native = self._Run(
        is_training=False,
        use_trt=False,
        batch_size=128,
        num_epochs=None,
        model_dir=model_dir)['accuracy']
    logging.info('accuracy_tf_native: %f', accuracy_tf_native)
    self.assertAllClose(0.9662, accuracy_tf_native, rtol=3e-3, atol=3e-3)

    if get_linked_tensorrt_version()[0] < 5:
      return

    accuracy_tf_trt = self._Run(
        is_training=False,
        use_trt=True,
        batch_size=128,
        num_epochs=None,
        model_dir=model_dir)['accuracy']
    logging.info('accuracy_tf_trt: %f', accuracy_tf_trt)
    self.assertAllClose(0.9675, accuracy_tf_trt, rtol=1e-3, atol=1e-3)
    def setUpClass(cls):
        gpu_memory_fraction_opt = ("--gpu_memory_fraction=%f" %
                                   cls.PER_PROC_GPU_MEMORY_FRACTION)

        worker_port = portpicker.pick_unused_port()
        cluster_spec = "worker|localhost:%d" % worker_port
        tf_logging.info("cluster_spec: %s", cluster_spec)

        server_bin = test.test_src_dir_path(
            "python/debug/grpc_tensorflow_server")

        cls.server_target = "grpc://localhost:%d" % worker_port

        cls.server_procs = {}
        cls.server_procs["worker"] = subprocess.Popen([
            server_bin,
            "--cluster_spec=%s" % cluster_spec,
            "--job_name=worker",
            "--task_id=0",
            gpu_memory_fraction_opt,
        ],
                                                      stdout=sys.stdout,
                                                      stderr=sys.stderr)

        # Start debug server in-process, on separate thread.
        (cls.debug_server_port, cls.debug_server_url, _,
         cls.debug_server_thread, cls.debug_server
         ) = grpc_debug_test_server.start_server_on_separate_thread(
             dump_to_filesystem=False)
        tf_logging.info("debug server url: %s", cls.debug_server_url)

        cls.session_config = config_pb2.ConfigProto(
            gpu_options=config_pb2.GPUOptions(
                per_process_gpu_memory_fraction=cls.
                PER_PROC_GPU_MEMORY_FRACTION))
Пример #11
0
 def testShowCommandErrorNoTagSet(self):
   base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
   self.parser = saved_model_cli.create_parser()
   args = self.parser.parse_args(
       ['show', '--dir', base_path, '--tag_set', 'badtagset'])
   with self.assertRaises(RuntimeError):
     saved_model_cli.show(args)
    def testBasic(self):
        base_path = test.test_src_dir_path(SESSION_BUNDLE_PATH)
        ops.reset_default_graph()
        sess, meta_graph_def = session_bundle.load_session_bundle_from_path(
            base_path,
            target="",
            config=config_pb2.ConfigProto(device_count={"CPU": 2}))

        self.assertTrue(sess)
        asset_path = os.path.join(base_path, constants.ASSETS_DIRECTORY)
        with sess.as_default():
            path1, path2 = sess.run(["filename1:0", "filename2:0"])
            self.assertEqual(
                compat.as_bytes(os.path.join(asset_path, "hello1.txt")), path1)
            self.assertEqual(
                compat.as_bytes(os.path.join(asset_path, "hello2.txt")), path2)

            collection_def = meta_graph_def.collection_def

            signatures_any = collection_def[
                constants.SIGNATURES_KEY].any_list.value
            self.assertEquals(len(signatures_any), 1)

            signatures = manifest_pb2.Signatures()
            signatures_any[0].Unpack(signatures)
            self._checkRegressionSignature(signatures, sess)
            self._checkNamedSignatures(signatures, sess)
Пример #13
0
  def testBasic(self):
    base_path = test.test_src_dir_path(SESSION_BUNDLE_PATH)
    ops.reset_default_graph()
    sess, meta_graph_def = session_bundle.load_session_bundle_from_path(
        base_path,
        target="",
        config=config_pb2.ConfigProto(device_count={"CPU": 2}))

    self.assertTrue(sess)
    asset_path = os.path.join(base_path, constants.ASSETS_DIRECTORY)
    with sess.as_default():
      path1, path2 = sess.run(["filename1:0", "filename2:0"])
      self.assertEqual(
          compat.as_bytes(os.path.join(asset_path, "hello1.txt")), path1)
      self.assertEqual(
          compat.as_bytes(os.path.join(asset_path, "hello2.txt")), path2)

      collection_def = meta_graph_def.collection_def

      signatures_any = collection_def[constants.SIGNATURES_KEY].any_list.value
      self.assertEquals(len(signatures_any), 1)

      signatures = manifest_pb2.Signatures()
      signatures_any[0].Unpack(signatures)
      self._checkRegressionSignature(signatures, sess)
      self._checkNamedSignatures(signatures, sess)
Пример #14
0
def run_all_tests():
  """Runs all sample model with TensorRT FP32/FP16 and reports latency."""
  # The model_configs contains (saved_model_dir, batch_size) for each model
  model_configs = ((platform_test.test_src_dir_path(
      "python/compiler/tensorrt/model_tests/sample_model"), 128),)
  model_handler_cls = model_handler.ModelHandlerV1
  trt_model_handeler_cls = model_handler.TrtModelHandlerV1
  default_trt_convert_params = DEFAUL_TRT_CONVERT_PARAMS._replace(
      is_dynamic_op=False)
  for saved_model_dir, batch_size in model_configs:
    base_model = model_handler_cls(saved_model_dir=saved_model_dir)
    random_inputs = base_model.generate_random_inputs(batch_size)
    base_model_result = base_model.run(random_inputs)
    trt_fp32_model_result = trt_model_handeler_cls(
        saved_model_dir=saved_model_dir,
        trt_convert_params=default_trt_convert_params._replace(
            precision_mode=trt.TrtPrecisionMode.FP32,
            max_batch_size=batch_size)).run(random_inputs)
    trt_fp16_model_result = trt_model_handeler_cls(
        saved_model_dir=saved_model_dir,
        trt_convert_params=default_trt_convert_params._replace(
            precision_mode=trt.TrtPrecisionMode.FP16,
            max_batch_size=batch_size)).run(random_inputs)

    logging.info("Base model latency: %f ms",
                 _get_mean_latency(base_model_result))
    logging.info("TensorRT FP32 model latency: %f ms",
                 _get_mean_latency(trt_fp32_model_result))
    logging.info("TensorRT FP16 model latency: %f ms",
                 _get_mean_latency(trt_fp16_model_result))
Пример #15
0
  def testEval(self):
    if not trt_convert.is_tensorrt_enabled():
      return
    model_dir = test.test_src_dir_path('contrib/tensorrt/test/testdata')

    accuracy_tf_native = self._Run(
        is_training=False,
        use_trt=False,
        batch_size=128,
        num_epochs=None,
        model_dir=model_dir)['accuracy']
    logging.info('accuracy_tf_native: %f', accuracy_tf_native)
    self.assertAllClose(accuracy_tf_native, 0.9662)

    if trt_convert.get_linked_tensorrt_version()[0] < 5:
      return

    accuracy_tf_trt = self._Run(
        is_training=False,
        use_trt=True,
        batch_size=128,
        num_epochs=None,
        model_dir=model_dir)['accuracy']
    logging.info('accuracy_tf_trt: %f', accuracy_tf_trt)
    self.assertAllClose(accuracy_tf_trt, 0.9677)
  def setUpClass(cls):
    gpu_memory_fraction_opt = (
        "--gpu_memory_fraction=%f" % cls.PER_PROC_GPU_MEMORY_FRACTION)

    worker_port = portpicker.pick_unused_port()
    cluster_spec = "worker|localhost:%d" % worker_port
    tf_logging.info("cluster_spec: %s", cluster_spec)

    server_bin = test.test_src_dir_path("python/debug/grpc_tensorflow_server")

    cls.server_target = "grpc://localhost:%d" % worker_port

    cls.server_procs = {}
    cls.server_procs["worker"] = subprocess.Popen(
        [
            server_bin,
            "--cluster_spec=%s" % cluster_spec,
            "--job_name=worker",
            "--task_id=0",
            gpu_memory_fraction_opt,
        ],
        stdout=sys.stdout,
        stderr=sys.stderr)

    # Start debug server in-process, on separate thread.
    (cls.debug_server_port, cls.debug_server_url, _, cls.debug_server_thread,
     cls.debug_server
    ) = grpc_debug_test_server.start_server_on_separate_thread(
        dump_to_filesystem=False)
    tf_logging.info("debug server url: %s", cls.debug_server_url)

    cls.session_config = config_pb2.ConfigProto(
        gpu_options=config_pb2.GPUOptions(
            per_process_gpu_memory_fraction=cls.PER_PROC_GPU_MEMORY_FRACTION))
Пример #17
0
    def setUp(self):
        super(CategoryLookupVocabFileTest, self).setUp()

        # Contains strings, character names from 'The Wire': omar, stringer, marlo
        self._wire_vocabulary_file_name = test.test_src_dir_path(
            'python/keras/layers/preprocessing/testdata/wire_vocabulary.txt')
        self._wire_vocabulary_size = 3
Пример #18
0
  def testRunCommandWithDebuggerEnabled(self):
    self.parser = saved_model_cli.create_parser()
    base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
    x = np.array([[1], [2]])
    x_notused = np.zeros((6, 3))
    input_path = os.path.join(test.get_temp_dir(),
                              'testRunCommandNewOutdir_inputs.npz')
    output_dir = os.path.join(test.get_temp_dir(), 'new_dir')
    if os.path.isdir(output_dir):
      shutil.rmtree(output_dir)
    np.savez(input_path, x0=x, x1=x_notused)
    args = self.parser.parse_args([
        'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
        'serving_default', '--inputs', 'x=' + input_path + '[x0]', '--outdir',
        output_dir, '--tf_debug'
    ])

    def fake_wrapper_session(sess):
      return sess

    with test.mock.patch.object(
        local_cli_wrapper,
        'LocalCLIDebugWrapperSession',
        side_effect=fake_wrapper_session,
        autospec=True) as fake:
      saved_model_cli.run(args)
      fake.assert_called_with(test.mock.ANY)

    y_actual = np.load(os.path.join(output_dir, 'y.npy'))
    y_expected = np.array([[2.5], [3.0]])
    self.assertAllClose(y_expected, y_actual)
Пример #19
0
 def testShowCommandErrorNoTagSet(self):
   base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
   self.parser = saved_model_cli.create_parser()
   args = self.parser.parse_args(
       ['show', '--dir', base_path, '--tag_set', 'badtagset'])
   with self.assertRaises(RuntimeError):
     saved_model_cli.show(args)
  def testEval(self):
    if not is_tensorrt_enabled():
      return
    model_dir = test.test_src_dir_path(
        'python/compiler/tensorrt/test/testdata/mnist')

    accuracy_tf_native = self._Run(
        is_training=False,
        use_trt=False,
        batch_size=128,
        num_epochs=None,
        model_dir=model_dir)['accuracy']
    logging.info('accuracy_tf_native: %f', accuracy_tf_native)
    self.assertAllClose(0.9662, accuracy_tf_native, rtol=3e-3, atol=3e-3)

    if get_linked_tensorrt_version()[0] < 5:
      return

    accuracy_tf_trt = self._Run(
        is_training=False,
        use_trt=True,
        batch_size=128,
        num_epochs=None,
        model_dir=model_dir)['accuracy']
    logging.info('accuracy_tf_trt: %f', accuracy_tf_trt)
    self.assertAllClose(0.9675, accuracy_tf_trt, rtol=1e-3, atol=1e-3)
Пример #21
0
 def testScanCommand(self):
   self.parser = saved_model_cli.create_parser()
   base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
   args = self.parser.parse_args(['scan', '--dir', base_path])
   with captured_output() as (out, _):
     saved_model_cli.scan(args)
   output = out.getvalue().strip()
   self.assertTrue('does not contain denylisted ops' in output)
Пример #22
0
 def __init__(self):
     self.asset = asset.Asset(
         test.test_src_dir_path(
             "cc/saved_model/testdata/static_hashtable_asset.txt"))
     self.table = lookup_ops.StaticHashTable(
         lookup_ops.TextFileInitializer(
             self.asset, dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,
             dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER), -1)
Пример #23
0
 def testRunCommandInvalidInputKeyError(self):
   self.parser = saved_model_cli.create_parser()
   base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
   args = self.parser.parse_args([
       'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
       'regress_x2_to_y3', '--input_exprs', 'x2=np.ones((3,1))'
   ])
   with self.assertRaises(ValueError):
     saved_model_cli.run(args)
Пример #24
0
 def testRunCommandInputNotGivenError(self):
   self.parser = saved_model_cli.create_parser()
   base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
   args = self.parser.parse_args([
       'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
       'serving_default'
   ])
   with self.assertRaises(AttributeError):
     saved_model_cli.run(args)
Пример #25
0
 def test_restore_old_saved_model(self):
   saved_model_dir = test.test_src_dir_path(
       'python/keras/mixed_precision/experimental/testdata/'
       'lso_savedmodel_tf2.2')
   model = save.load_model(saved_model_dir)
   expected_kernel = np.array([[9.229685, 10.901115], [10.370763, 9.757362]])
   self.assertAllClose(backend.eval(model.weights[0]), expected_kernel)
   self.assertIsInstance(model.optimizer,
                         loss_scale_optimizer.LossScaleOptimizer)
Пример #26
0
 def testRunCommandInvalidInputKeyError(self):
   self.parser = saved_model_cli.create_parser()
   base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
   args = self.parser.parse_args([
       'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
       'regress_x2_to_y3', '--input_exprs', 'x2=np.ones((3,1))'
   ])
   with self.assertRaises(ValueError):
     saved_model_cli.run(args)
Пример #27
0
 def testBadPath(self):
   base_path = test.test_src_dir_path("/no/such/a/dir")
   ops.reset_default_graph()
   with self.assertRaises(RuntimeError) as cm:
     _, _ = session_bundle.load_session_bundle_from_path(
         base_path,
         target="local",
         config=config_pb2.ConfigProto(device_count={"CPU": 2}))
   self.assertTrue("Expected meta graph file missing" in str(cm.exception))
Пример #28
0
 def testRunCommandInputNotGivenError(self):
   self.parser = saved_model_cli.create_parser()
   base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
   args = self.parser.parse_args([
       'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
       'serving_default'
   ])
   with self.assertRaises(AttributeError):
     saved_model_cli.run(args)
Пример #29
0
 def testShowCommandTags(self):
   base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
   self.parser = saved_model_cli.create_parser()
   args = self.parser.parse_args(['show', '--dir', base_path])
   with captured_output() as (out, err):
     saved_model_cli.show(args)
   output = out.getvalue().strip()
   exp_out = 'The given SavedModel contains the following tag-sets:\nserve'
   self.assertMultiLineEqual(output, exp_out)
   self.assertEqual(err.getvalue().strip(), '')
Пример #30
0
 def testShowCommandTags(self):
   base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
   self.parser = saved_model_cli.create_parser()
   args = self.parser.parse_args(['show', '--dir', base_path])
   with captured_output() as (out, err):
     saved_model_cli.show(args)
   output = out.getvalue().strip()
   exp_out = 'The given SavedModel contains the following tag-sets:\n\'serve\''
   self.assertMultiLineEqual(output, exp_out)
   self.assertEqual(err.getvalue().strip(), '')
Пример #31
0
 def testRunCommandInvalidSignature(self):
   self.parser = saved_model_cli.create_parser()
   base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
   args = self.parser.parse_args([
       'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
       'INVALID_SIGNATURE', '--input_exprs', 'x2=np.ones((3,1))'
   ])
   with self.assertRaisesRegex(ValueError,
                               'Could not find signature "INVALID_SIGNATURE"'):
     saved_model_cli.run(args)
Пример #32
0
 def testRunCommandInputExamplesFeatureBadType(self):
   self.parser = saved_model_cli.create_parser()
   base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
   output_dir = os.path.join(test.get_temp_dir(), 'new_dir')
   args = self.parser.parse_args([
       'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
       'regress_x_to_y', '--input_examples', 'inputs=[{"x":[[1],[2]]}]',
       '--outdir', output_dir
   ])
   with self.assertRaisesRegex(ValueError, 'is not supported'):
     saved_model_cli.run(args)
Пример #33
0
 def testRunCommandInputExamplesFeatureBadType(self):
   self.parser = saved_model_cli.create_parser()
   base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
   output_dir = os.path.join(test.get_temp_dir(), 'new_dir')
   args = self.parser.parse_args([
       'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
       'regress_x_to_y', '--input_examples', 'inputs=[{"x":[[1],[2]]}]',
       '--outdir', output_dir
   ])
   with self.assertRaisesRegexp(ValueError, 'is not supported'):
     saved_model_cli.run(args)
Пример #34
0
 def testRunCommandInputExamplesFeatureValueNotListError(self):
   self.parser = saved_model_cli.create_parser()
   base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
   output_dir = os.path.join(test.get_temp_dir(), 'new_dir')
   args = self.parser.parse_args([
       'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
       'regress_x_to_y', '--input_examples', 'inputs=[{"x":8.0,"x2":5.0}]',
       '--outdir', output_dir
   ])
   with self.assertRaisesRegexp(ValueError, 'feature value must be a list'):
     saved_model_cli.run(args)
Пример #35
0
 def testRunCommandInputExamplesFeatureValueNotListError(self):
   self.parser = saved_model_cli.create_parser()
   base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
   output_dir = os.path.join(test.get_temp_dir(), 'new_dir')
   args = self.parser.parse_args([
       'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
       'regress_x_to_y', '--input_examples', 'inputs=[{"x":8.0,"x2":5.0}]',
       '--outdir', output_dir
   ])
   with self.assertRaisesRegex(ValueError, 'feature value must be a list'):
     saved_model_cli.run(args)
Пример #36
0
    def testConvertSignaturesToSignatureDefs(self):
        base_path = test.test_src_dir_path(SESSION_BUNDLE_PATH)
        meta_graph_filename = os.path.join(base_path,
                                           constants.META_GRAPH_DEF_FILENAME)
        metagraph_def = meta_graph.read_meta_graph_file(meta_graph_filename)
        default_signature_def, named_signature_def = (
            bundle_shim._convert_signatures_to_signature_defs(metagraph_def))
        self.assertEqual(default_signature_def.method_name,
                         signature_constants.REGRESS_METHOD_NAME)
        self.assertEqual(len(default_signature_def.inputs), 1)
        self.assertEqual(len(default_signature_def.outputs), 1)
        self.assertProtoEquals(
            default_signature_def.inputs[signature_constants.REGRESS_INPUTS],
            meta_graph_pb2.TensorInfo(name="tf_example:0"))
        self.assertProtoEquals(
            default_signature_def.outputs[signature_constants.REGRESS_OUTPUTS],
            meta_graph_pb2.TensorInfo(name="Identity:0"))
        self.assertEqual(named_signature_def.method_name,
                         signature_constants.PREDICT_METHOD_NAME)
        self.assertEqual(len(named_signature_def.inputs), 1)
        self.assertEqual(len(named_signature_def.outputs), 1)
        self.assertProtoEquals(named_signature_def.inputs["x"],
                               meta_graph_pb2.TensorInfo(name="x:0"))
        self.assertProtoEquals(named_signature_def.outputs["y"],
                               meta_graph_pb2.TensorInfo(name="y:0"))

        # Now try default signature only
        collection_def = metagraph_def.collection_def
        signatures_proto = manifest_pb2.Signatures()
        signatures = collection_def[constants.SIGNATURES_KEY].any_list.value[0]
        signatures.Unpack(signatures_proto)
        named_only_signatures_proto = manifest_pb2.Signatures()
        named_only_signatures_proto.CopyFrom(signatures_proto)

        default_only_signatures_proto = manifest_pb2.Signatures()
        default_only_signatures_proto.CopyFrom(signatures_proto)
        default_only_signatures_proto.named_signatures.clear()
        default_only_signatures_proto.ClearField("named_signatures")
        metagraph_def.collection_def[constants.SIGNATURES_KEY].any_list.value[
            0].Pack(default_only_signatures_proto)
        default_signature_def, named_signature_def = (
            bundle_shim._convert_signatures_to_signature_defs(metagraph_def))
        self.assertEqual(default_signature_def.method_name,
                         signature_constants.REGRESS_METHOD_NAME)
        self.assertEqual(named_signature_def, None)

        named_only_signatures_proto.ClearField("default_signature")
        metagraph_def.collection_def[constants.SIGNATURES_KEY].any_list.value[
            0].Pack(named_only_signatures_proto)
        default_signature_def, named_signature_def = (
            bundle_shim._convert_signatures_to_signature_defs(metagraph_def))
        self.assertEqual(named_signature_def.method_name,
                         signature_constants.PREDICT_METHOD_NAME)
        self.assertEqual(default_signature_def, None)
  def setUp(self):
    self.bundle_file = os.path.join(
        test.test_src_dir_path(_TESTDATA_PATH), 'bundle_checkpoint')
    self.new_feature_vocab_file = os.path.join(
        test.test_src_dir_path(_TESTDATA_PATH), 'bundle_checkpoint_vocab.txt')
    self.old_feature_vocab_file = os.path.join(
        test.test_src_dir_path(_TESTDATA_PATH),
        'bundle_checkpoint_vocab_with_oov.txt')
    self.new_class_vocab_file = os.path.join(
        test.test_src_dir_path(_TESTDATA_PATH), 'keyword_new.txt')
    self.old_class_vocab_file = os.path.join(
        test.test_src_dir_path(_TESTDATA_PATH), 'keyword.txt')
    self.init_val = 42

    def _init_val_initializer(shape, dtype=None, partition_info=None):
      del dtype, partition_info  # Unused by this unit-testing initializer.
      return array_ops.tile(
          constant_op.constant([[self.init_val]], dtype=dtypes.float32), shape)

    self.initializer = _init_val_initializer
Пример #38
0
    def testMetricsCollection(self):
        def _enqueue_vector(sess, queue, values, shape=None):
            if not shape:
                shape = (1, len(values))
            dtype = queue.dtypes[0]
            sess.run(
                queue.enqueue(
                    constant_op.constant(values, dtype=dtype, shape=shape)))

        meta_graph_filename = os.path.join(_TestDir("metrics_export"),
                                           "meta_graph.pb")

        graph = ops.Graph()
        with self.session(graph=graph) as sess:
            values_queue = data_flow_ops.FIFOQueue(4,
                                                   dtypes.float32,
                                                   shapes=(1, 2))
            _enqueue_vector(sess, values_queue, [0, 1])
            _enqueue_vector(sess, values_queue, [-4.2, 9.1])
            _enqueue_vector(sess, values_queue, [6.5, 0])
            _enqueue_vector(sess, values_queue, [-3.2, 4.0])
            values = values_queue.dequeue()

            _, update_op = metrics.mean(values)

            initializer = variables.local_variables_initializer()
            sess.run(initializer)
            sess.run(update_op)

        meta_graph.export_scoped_meta_graph(filename=meta_graph_filename,
                                            graph=graph)

        # Verifies that importing a meta_graph with LOCAL_VARIABLES collection
        # works correctly.
        graph = ops.Graph()
        with self.session(graph=graph) as sess:
            meta_graph.import_scoped_meta_graph(meta_graph_filename)
            initializer = variables.local_variables_initializer()
            sess.run(initializer)

        # Verifies that importing an old meta_graph where "local_variables"
        # collection is of node_list type works, but cannot build initializer
        # with the collection.
        graph = ops.Graph()
        with self.session(graph=graph) as sess:
            meta_graph.import_scoped_meta_graph(
                test.test_src_dir_path(
                    "python/framework/testdata/metrics_export_meta_graph.pb"))
            self.assertEqual(
                len(ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES)), 2)
            with self.assertRaisesRegexp(
                    AttributeError,
                    "'Tensor' object has no attribute 'initializer'"):
                initializer = variables.local_variables_initializer()
Пример #39
0
  def testConvertSignaturesToSignatureDefs(self):
    base_path = test.test_src_dir_path(SESSION_BUNDLE_PATH)
    meta_graph_filename = os.path.join(base_path,
                                       constants.META_GRAPH_DEF_FILENAME)
    metagraph_def = meta_graph.read_meta_graph_file(meta_graph_filename)
    default_signature_def, named_signature_def = (
        bundle_shim._convert_signatures_to_signature_defs(metagraph_def))
    self.assertEqual(default_signature_def.method_name,
                     signature_constants.REGRESS_METHOD_NAME)
    self.assertEqual(len(default_signature_def.inputs), 1)
    self.assertEqual(len(default_signature_def.outputs), 1)
    self.assertProtoEquals(
        default_signature_def.inputs[signature_constants.REGRESS_INPUTS],
        meta_graph_pb2.TensorInfo(name="tf_example:0"))
    self.assertProtoEquals(
        default_signature_def.outputs[signature_constants.REGRESS_OUTPUTS],
        meta_graph_pb2.TensorInfo(name="Identity:0"))
    self.assertEqual(named_signature_def.method_name,
                     signature_constants.PREDICT_METHOD_NAME)
    self.assertEqual(len(named_signature_def.inputs), 1)
    self.assertEqual(len(named_signature_def.outputs), 1)
    self.assertProtoEquals(
        named_signature_def.inputs["x"], meta_graph_pb2.TensorInfo(name="x:0"))
    self.assertProtoEquals(
        named_signature_def.outputs["y"], meta_graph_pb2.TensorInfo(name="y:0"))

    # Now try default signature only
    collection_def = metagraph_def.collection_def
    signatures_proto = manifest_pb2.Signatures()
    signatures = collection_def[constants.SIGNATURES_KEY].any_list.value[0]
    signatures.Unpack(signatures_proto)
    named_only_signatures_proto = manifest_pb2.Signatures()
    named_only_signatures_proto.CopyFrom(signatures_proto)

    default_only_signatures_proto = manifest_pb2.Signatures()
    default_only_signatures_proto.CopyFrom(signatures_proto)
    default_only_signatures_proto.named_signatures.clear()
    default_only_signatures_proto.ClearField("named_signatures")
    metagraph_def.collection_def[constants.SIGNATURES_KEY].any_list.value[
        0].Pack(default_only_signatures_proto)
    default_signature_def, named_signature_def = (
        bundle_shim._convert_signatures_to_signature_defs(metagraph_def))
    self.assertEqual(default_signature_def.method_name,
                     signature_constants.REGRESS_METHOD_NAME)
    self.assertEqual(named_signature_def, None)

    named_only_signatures_proto.ClearField("default_signature")
    metagraph_def.collection_def[constants.SIGNATURES_KEY].any_list.value[
        0].Pack(named_only_signatures_proto)
    default_signature_def, named_signature_def = (
        bundle_shim._convert_signatures_to_signature_defs(metagraph_def))
    self.assertEqual(named_signature_def.method_name,
                     signature_constants.PREDICT_METHOD_NAME)
    self.assertEqual(default_signature_def, None)
Пример #40
0
  def setUp(self):
    self.bundle_file = os.path.join(
        test.test_src_dir_path(_TESTDATA_PATH), 'bundle_checkpoint')
    self.new_feature_vocab_file = os.path.join(
        test.test_src_dir_path(_TESTDATA_PATH), 'bundle_checkpoint_vocab.txt')
    self.old_feature_vocab_file = os.path.join(
        test.test_src_dir_path(_TESTDATA_PATH),
        'bundle_checkpoint_vocab_with_oov.txt')
    self.new_class_vocab_file = os.path.join(
        test.test_src_dir_path(_TESTDATA_PATH), 'keyword_new.txt')
    self.old_class_vocab_file = os.path.join(
        test.test_src_dir_path(_TESTDATA_PATH), 'keyword.txt')
    self.init_val = 42

    def _init_val_initializer(shape, dtype=None, partition_info=None):
      del dtype, partition_info  # Unused by this unit-testing initializer.
      return array_ops.tile(
          constant_op.constant([[self.init_val]], dtype=dtypes.float32), shape)

    self.initializer = _init_val_initializer
Пример #41
0
  def testMetricsCollection(self):

    def _enqueue_vector(sess, queue, values, shape=None):
      if not shape:
        shape = (1, len(values))
      dtype = queue.dtypes[0]
      sess.run(
          queue.enqueue(constant_op.constant(
              values, dtype=dtype, shape=shape)))

    meta_graph_filename = os.path.join(
        _TestDir("metrics_export"), "meta_graph.pb")

    graph = ops.Graph()
    with self.session(graph=graph) as sess:
      values_queue = data_flow_ops.FIFOQueue(
          4, dtypes.float32, shapes=(1, 2))
      _enqueue_vector(sess, values_queue, [0, 1])
      _enqueue_vector(sess, values_queue, [-4.2, 9.1])
      _enqueue_vector(sess, values_queue, [6.5, 0])
      _enqueue_vector(sess, values_queue, [-3.2, 4.0])
      values = values_queue.dequeue()

      _, update_op = metrics.mean(values)

      initializer = variables.local_variables_initializer()
      self.evaluate(initializer)
      self.evaluate(update_op)

    meta_graph.export_scoped_meta_graph(
        filename=meta_graph_filename, graph=graph)

    # Verifies that importing a meta_graph with LOCAL_VARIABLES collection
    # works correctly.
    graph = ops.Graph()
    with self.session(graph=graph) as sess:
      meta_graph.import_scoped_meta_graph(meta_graph_filename)
      initializer = variables.local_variables_initializer()
      self.evaluate(initializer)

    # Verifies that importing an old meta_graph where "local_variables"
    # collection is of node_list type works, but cannot build initializer
    # with the collection.
    graph = ops.Graph()
    with self.session(graph=graph) as sess:
      meta_graph.import_scoped_meta_graph(
          test.test_src_dir_path(
              "python/framework/testdata/metrics_export_meta_graph.pb"))
      self.assertEqual(len(ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES)),
                       2)
      with self.assertRaisesRegexp(
          AttributeError, "'Tensor' object has no attribute 'initializer'"):
        initializer = variables.local_variables_initializer()
Пример #42
0
 def testScanCommandFoundDenylistedOp(self):
   self.parser = saved_model_cli.create_parser()
   base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
   args = self.parser.parse_args(
       ['scan', '--dir', base_path, '--tag_set', 'serve'])
   op_denylist = saved_model_cli._OP_DENYLIST
   saved_model_cli._OP_DENYLIST = set(['VariableV2'])
   with captured_output() as (out, _):
     saved_model_cli.scan(args)
   saved_model_cli._OP_DENYLIST = op_denylist
   output = out.getvalue().strip()
   self.assertTrue('\'VariableV2\'' in output)
Пример #43
0
    def test_graphdef_basic(self):
        sm_pb_file = test.test_src_dir_path(
            "cc/saved_model/testdata/VarsAndArithmeticObjectGraph/saved_model.pb"
        )
        with file_io.FileIO(sm_pb_file, "rb") as f:
            file_content = f.read()

        fingerprint_def = fingerprint_pb2.FingerprintDef()
        fingerprint_def.ParseFromString(
            fingerprinting.CreateFingerprintDef(file_content))
        # We cannot check the value of the graph_def_hash due to non-determinism in
        # serialization.
        self.assertGreater(fingerprint_def.graph_def_hash, 0)
Пример #44
0
 def testRunCommandInputExamples(self):
   self.parser = saved_model_cli.create_parser()
   base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
   output_dir = os.path.join(test.get_temp_dir(), 'new_dir')
   args = self.parser.parse_args([
       'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
       'regress_x_to_y', '--input_examples',
       'inputs=[{"x":[8.0],"x2":[5.0]}, {"x":[4.0],"x2":[3.0]}]', '--outdir',
       output_dir
   ])
   saved_model_cli.run(args)
   y_actual = np.load(os.path.join(output_dir, 'outputs.npy'))
   y_expected = np.array([[6.0], [4.0]])
   self.assertAllEqual(y_expected, y_actual)
Пример #45
0
  def testAOTCompileCPUWrongSignatureDefKey(self):
    if not test.is_built_with_xla():
      self.skipTest('Skipping test because XLA is not compiled in.')

    self.parser = saved_model_cli.create_parser()
    base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
    output_dir = os.path.join(test.get_temp_dir(), 'aot_compile_cpu_dir')
    args = self.parser.parse_args([
        'aot_compile_cpu', '--dir', base_path, '--tag_set', 'serve',
        '--output_prefix', output_dir, '--cpp_class', 'Compiled',
        '--signature_def_key', 'MISSING'
    ])
    with self.assertRaisesRegex(ValueError, 'Unable to find signature_def'):
      saved_model_cli.aot_compile_cpu(args)
Пример #46
0
 def testRunCommandInputExamples(self):
   self.parser = saved_model_cli.create_parser()
   base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
   output_dir = os.path.join(test.get_temp_dir(), 'new_dir')
   args = self.parser.parse_args([
       'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
       'regress_x_to_y', '--input_examples',
       'inputs=[{"x":[8.0],"x2":[5.0]}, {"x":[4.0],"x2":[3.0]}]', '--outdir',
       output_dir
   ])
   saved_model_cli.run(args)
   y_actual = np.load(os.path.join(output_dir, 'outputs.npy'))
   y_expected = np.array([[6.0], [4.0]])
   self.assertAllEqual(y_expected, y_actual)
Пример #47
0
    def testSmileyFace(self):
        """Check warping accuracy by comparing to hardcoded warped images."""

        test_data_dir = test.test_src_dir_path('contrib/image/python/'
                                               'kernel_tests/test_data/')
        input_file = test_data_dir + 'Yellow_Smiley_Face.png'
        with self.test_session() as sess:
            input_image = self.load_image(input_file, sess)
        control_points = np.asarray([[64, 59], [180 - 64, 59], [39, 111],
                                     [180 - 39, 111], [90, 143], [58, 134],
                                     [180 - 58, 134]])  # pyformat: disable
        control_point_displacements = np.asarray([[-10.5, 10.5], [10.5, 10.5],
                                                  [0, 0], [0, 0], [0, -10],
                                                  [-20, 10.25], [10, 10.75]])
        control_points_op = constant_op.constant(
            np.expand_dims(np.float32(control_points[:, [1, 0]]), 0))
        control_point_displacements_op = constant_op.constant(
            np.expand_dims(np.float32(control_point_displacements[:, [1, 0]]),
                           0))
        float_image = np.expand_dims(np.float32(input_image) / 255, 0)
        input_image_op = constant_op.constant(float_image)

        for interpolation_order in (1, 2, 3):
            for num_boundary_points in (0, 1, 4):
                warp_op, _ = sparse_image_warp.sparse_image_warp(
                    input_image_op,
                    control_points_op,
                    control_points_op + control_point_displacements_op,
                    interpolation_order=interpolation_order,
                    num_boundary_points=num_boundary_points)
                with self.test_session() as sess:
                    warped_image = sess.run(warp_op)
                    out_image = np.uint8(warped_image[0, :, :, :] * 255)
                    target_file = (
                        test_data_dir + 'Yellow_Smiley_Face_Warp-interp' +
                        '-{}-clamp-{}.png'.format(interpolation_order,
                                                  num_boundary_points))

                    target_image = self.load_image(target_file, sess)

                    # Check that the target_image and out_image difference is no
                    # bigger than 2 (on a scale of 0-255). Due to differences in
                    # floating point computation on different devices, the float
                    # output in warped_image may get rounded to a different int
                    # than that in the saved png file loaded into target_image.
                    self.assertAllClose(target_image,
                                        out_image,
                                        atol=2,
                                        rtol=1e-3)
 def testMatrixThatFailsWhenFlushingDenormsToZero(self):
   # Test a 32x32 matrix which is known to fail if denorm floats are flushed to
   # zero.
   matrix = np.genfromtxt(
       test.test_src_dir_path(
           "python/kernel_tests/testdata/"
           "self_adjoint_eig_fail_if_denorms_flushed.txt")).astype(np.float32)
   self.assertEqual(matrix.shape, (32, 32))
   matrix_tensor = constant_op.constant(matrix)
   with self.session(use_gpu=True) as sess:
     (e, v) = sess.run(linalg_ops.self_adjoint_eig(matrix_tensor))
     self.assertEqual(e.size, 32)
     self.assertAllClose(
         np.matmul(v, v.transpose()), np.eye(32, dtype=np.float32), atol=2e-3)
     self.assertAllClose(matrix,
                         np.matmul(np.matmul(v, np.diag(e)), v.transpose()))
Пример #49
0
  def testBackwardCompatibility(self):
    """Load and execute a model that was saved in TF2.0."""

    model_dir = test.test_src_dir_path(
        "python/compiler/tensorrt/test/testdata/tftrt_2.0_saved_model")
    saved_model_loaded = load.load(model_dir, tags=[tag_constants.SERVING])
    graph_func = saved_model_loaded.signatures[
        signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]

    np_input1 = ops.convert_to_tensor(np.ones([4, 1, 1]).astype(np.float32))
    np_input2 = ops.convert_to_tensor(np.ones([4, 1, 1]).astype(np.float32))
    output = graph_func(input1=np_input1, input2=np_input2)["output_0"]

    self.assertEqual(output.shape, (4, 1, 1))
    self.assertAllClose(
        np.asarray([5.0, 5.0, 5.0, 5.0]).reshape([4, 1, 1]), output)
Пример #50
0
 def testRunCommandOutputFileExistError(self):
   self.parser = saved_model_cli.create_parser()
   base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
   x = np.array([[1], [2]])
   x_notused = np.zeros((6, 3))
   input_path = os.path.join(test.get_temp_dir(),
                             'testRunCommandOutOverwrite_inputs.npz')
   np.savez(input_path, x0=x, x1=x_notused)
   output_file = os.path.join(test.get_temp_dir(), 'y.npy')
   open(output_file, 'a').close()
   args = self.parser.parse_args([
       'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
       'serving_default', '--inputs', 'x=' + input_path + '[x0]', '--outdir',
       test.get_temp_dir()
   ])
   with self.assertRaises(RuntimeError):
     saved_model_cli.run(args)
  def testSmileyFace(self):
    """Check warping accuracy by comparing to hardcoded warped images."""

    test_data_dir = test.test_src_dir_path('contrib/image/python/'
                                           'kernel_tests/test_data/')
    input_file = test_data_dir + 'Yellow_Smiley_Face.png'
    with self.test_session() as sess:
      input_image = self.load_image(input_file, sess)
    control_points = np.asarray([[64, 59], [180 - 64, 59], [39, 111],
                                 [180 - 39, 111], [90, 143], [58, 134],
                                 [180 - 58, 134]])  # pyformat: disable
    control_point_displacements = np.asarray(
        [[-10.5, 10.5], [10.5, 10.5], [0, 0], [0, 0], [0, -10], [-20, 10.25],
         [10, 10.75]])
    control_points_op = constant_op.constant(
        np.expand_dims(np.float32(control_points[:, [1, 0]]), 0))
    control_point_displacements_op = constant_op.constant(
        np.expand_dims(np.float32(control_point_displacements[:, [1, 0]]), 0))
    float_image = np.expand_dims(np.float32(input_image) / 255, 0)
    input_image_op = constant_op.constant(float_image)

    for interpolation_order in (1, 2, 3):
      for num_boundary_points in (0, 1, 4):
        warp_op, _ = sparse_image_warp.sparse_image_warp(
            input_image_op,
            control_points_op,
            control_points_op + control_point_displacements_op,
            interpolation_order=interpolation_order,
            num_boundary_points=num_boundary_points)
        with self.test_session() as sess:
          warped_image = sess.run(warp_op)
          out_image = np.uint8(warped_image[0, :, :, :] * 255)
          target_file = (
              test_data_dir +
              'Yellow_Smiley_Face_Warp-interp' + '-{}-clamp-{}.png'.format(
                  interpolation_order, num_boundary_points))

          target_image = self.load_image(target_file, sess)

          # Check that the target_image and out_image difference is no
          # bigger than 2 (on a scale of 0-255). Due to differences in
          # floating point computation on different devices, the float
          # output in warped_image may get rounded to a different int
          # than that in the saved png file loaded into target_image.
          self.assertAllClose(target_image, out_image, atol=2, rtol=1e-3)
Пример #52
0
  def testSavedModelBasic(self):
    base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
    ops.reset_default_graph()
    sess, meta_graph_def = (
        bundle_shim.load_session_bundle_or_saved_model_bundle_from_path(
            base_path,
            tags=[tag_constants.SERVING],
            target="",
            config=config_pb2.ConfigProto(device_count={"CPU": 2})))

    self.assertTrue(sess)

    # Check basic signature def property.
    signature_def = meta_graph_def.signature_def
    self.assertEqual(signature_def["regress_x_to_y"].method_name,
                     signature_constants.REGRESS_METHOD_NAME)
    with sess.as_default():
      output1 = sess.run(["filename_tensor:0"])
      self.assertEqual([compat.as_bytes("foo.txt")], output1)
Пример #53
0
 def testRunCommandOutOverwrite(self):
   self.parser = saved_model_cli.create_parser()
   base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
   x = np.array([[1], [2]])
   x_notused = np.zeros((6, 3))
   input_path = os.path.join(test.get_temp_dir(),
                             'testRunCommandOutOverwrite_inputs.npz')
   np.savez(input_path, x0=x, x1=x_notused)
   output_file = os.path.join(test.get_temp_dir(), 'y.npy')
   open(output_file, 'a').close()
   args = self.parser.parse_args([
       'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
       'serving_default', '--inputs', 'x=' + input_path + '[x0]', '--outdir',
       test.get_temp_dir(), '--overwrite'
   ])
   saved_model_cli.run(args)
   y_actual = np.load(output_file)
   y_expected = np.array([[2.5], [3.0]])
   self.assertAllClose(y_expected, y_actual)
Пример #54
0
 def testShowCommandSignature(self):
   base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
   self.parser = saved_model_cli.create_parser()
   args = self.parser.parse_args(
       ['show', '--dir', base_path, '--tag_set', 'serve'])
   with captured_output() as (out, err):
     saved_model_cli.show(args)
   output = out.getvalue().strip()
   exp_header = ('The given SavedModel MetaGraphDef contains SignatureDefs '
                 'with the following keys:')
   exp_start = 'SignatureDef key: '
   exp_keys = [
       '"classify_x2_to_y3"', '"classify_x_to_y"', '"regress_x2_to_y3"',
       '"regress_x_to_y"', '"regress_x_to_y2"', '"serving_default"'
   ]
   # Order of signatures does not matter
   self.assertMultiLineEqual(
       output,
       '\n'.join([exp_header] + [exp_start + exp_key for exp_key in exp_keys]))
   self.assertEqual(err.getvalue().strip(), '')
Пример #55
0
 def testShowCommandInputsOutputs(self):
   base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
   self.parser = saved_model_cli.create_parser()
   args = self.parser.parse_args([
       'show', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
       'serving_default'
   ])
   with captured_output() as (out, err):
     saved_model_cli.show(args)
   output = out.getvalue().strip()
   expected_output = (
       'The given SavedModel SignatureDef contains the following input(s):\n'
       '  inputs[\'x\'] tensor_info:\n'
       '      dtype: DT_FLOAT\n      shape: (-1, 1)\n      name: x:0\n'
       'The given SavedModel SignatureDef contains the following output(s):\n'
       '  outputs[\'y\'] tensor_info:\n'
       '      dtype: DT_FLOAT\n      shape: (-1, 1)\n      name: y:0\n'
       'Method name is: tensorflow/serving/predict')
   self.assertEqual(output, expected_output)
   self.assertEqual(err.getvalue().strip(), '')
Пример #56
0
 def testRunCommandExistingOutdir(self):
   self.parser = saved_model_cli.create_parser()
   base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
   x = np.array([[1], [2]])
   x_notused = np.zeros((6, 3))
   input_path = os.path.join(test.get_temp_dir(), 'testRunCommand_inputs.npz')
   np.savez(input_path, x0=x, x1=x_notused)
   output_file = os.path.join(test.get_temp_dir(), 'outputs.npy')
   if os.path.exists(output_file):
     os.remove(output_file)
   args = self.parser.parse_args([
       'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
       'regress_x2_to_y3', '--inputs', 'inputs=' + input_path + '[x0]',
       '--outdir',
       test.get_temp_dir()
   ])
   saved_model_cli.run(args)
   y_actual = np.load(output_file)
   y_expected = np.array([[3.5], [4.0]])
   self.assertAllClose(y_expected, y_actual)
Пример #57
0
 def testRunCommandNewOutdir(self):
   self.parser = saved_model_cli.create_parser()
   base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
   x = np.array([[1], [2]])
   x_notused = np.zeros((6, 3))
   input_path = os.path.join(test.get_temp_dir(),
                             'testRunCommandNewOutdir_inputs.npz')
   output_dir = os.path.join(test.get_temp_dir(), 'new_dir')
   if os.path.isdir(output_dir):
     shutil.rmtree(output_dir)
   np.savez(input_path, x0=x, x1=x_notused)
   args = self.parser.parse_args([
       'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
       'serving_default', '--inputs', 'x=' + input_path + '[x0]', '--outdir',
       output_dir
   ])
   saved_model_cli.run(args)
   y_actual = np.load(os.path.join(output_dir, 'y.npy'))
   y_expected = np.array([[2.5], [3.0]])
   self.assertAllClose(y_expected, y_actual)
Пример #58
0
  def testLegacyBasic(self):
    base_path = test.test_src_dir_path(SESSION_BUNDLE_PATH)
    ops.reset_default_graph()
    sess, meta_graph_def = (
        bundle_shim.load_session_bundle_or_saved_model_bundle_from_path(
            base_path,
            tags=[""],
            target="",
            config=config_pb2.ConfigProto(device_count={"CPU": 2})))

    self.assertTrue(sess)
    asset_path = os.path.join(base_path, constants.ASSETS_DIRECTORY)
    with sess.as_default():
      path1, path2 = sess.run(["filename1:0", "filename2:0"])
      self.assertEqual(
          compat.as_bytes(os.path.join(asset_path, "hello1.txt")), path1)
      self.assertEqual(
          compat.as_bytes(os.path.join(asset_path, "hello2.txt")), path2)

      collection_def = meta_graph_def.collection_def

      signatures_any = collection_def[constants.SIGNATURES_KEY].any_list.value
      self.assertEqual(len(signatures_any), 1)
Пример #59
0
  def testSavedModelBasic(self):
    base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
    ops.reset_default_graph()
    sess, meta_graph_def = (
        bundle_shim.load_session_bundle_or_saved_model_bundle_from_path(
            base_path,
            tags=[tag_constants.SERVING],
            target="",
            config=config_pb2.ConfigProto(device_count={"CPU": 2})))

    self.assertTrue(sess)

    # Check basic signature def property.
    signature_def = meta_graph_def.signature_def
    self.assertEqual(len(signature_def), 2)
    self.assertEqual(
        signature_def[signature_constants.REGRESS_METHOD_NAME].method_name,
        signature_constants.REGRESS_METHOD_NAME)
    signature = signature_def["tensorflow/serving/regress"]
    asset_path = os.path.join(base_path, saved_model_constants.ASSETS_DIRECTORY)
    with sess.as_default():
      output1 = sess.run(["filename_tensor:0"])
      self.assertEqual(["foo.txt"], output1)