Esempio n. 1
0
    def test_load_with_import_scope(self, builder_cls):
        self.export_graph_with_main_op(builder_cls)
        loader = loader_impl.SavedModelLoader(SAVED_MODEL_WITH_MAIN_OP)
        with self.session(graph=ops.Graph()) as sess:
            saver, _ = loader.load_graph(sess.graph, ["foo_graph"],
                                         import_scope="baz")

            # The default saver should not work when the import scope is set.
            with self.assertRaises(errors.NotFoundError):
                loader.restore_variables(sess, tf_saver.Saver())

            loader.restore_variables(sess, saver)

            if builder_cls == saved_model_builder._SavedModelBuilder:
                with self.assertRaises(errors.NotFoundError):
                    loader.run_init_ops(sess, ["foo_graph"])
                loader.run_init_ops(sess, ["foo_graph"], import_scope="baz")
            else:
                loader.run_init_ops(sess, ["foo_graph"])

            self.assertEqual(5,
                             sess.graph.get_tensor_by_name("baz/x:0").eval())
            self.assertEqual(7,
                             sess.graph.get_tensor_by_name("baz/y:0").eval())

        # Test combined load function.
        loader = loader_impl.SavedModelLoader(SAVED_MODEL_WITH_MAIN_OP)
        with self.session(graph=ops.Graph()) as sess:
            loader.load(sess, ["foo_graph"], import_scope="baa")
            self.assertEqual(5,
                             sess.graph.get_tensor_by_name("baa/x:0").eval())
            self.assertEqual(7,
                             sess.graph.get_tensor_by_name("baa/y:0").eval())
Esempio n. 2
0
    def test_load_with_import_scope(self, builder_cls):
        # Force test to run in graph mode.
        # The SaveModelLoader.restore_variables ahd SaveModelLoader.run_init_ops
        # methods are v1-only APIs that require a session to work.
        with ops.Graph().as_default():
            self.export_graph_with_main_op(builder_cls)
            loader = loader_impl.SavedModelLoader(SAVED_MODEL_WITH_MAIN_OP)
            with self.session(graph=ops.Graph()) as sess:
                saver, _ = loader.load_graph(sess.graph, ["foo_graph"],
                                             import_scope="baz")

                # The default saver should not work when the import scope is set.
                with self.assertRaises(errors.NotFoundError):
                    loader.restore_variables(sess, tf_saver.Saver())

                loader.restore_variables(sess, saver)

                if builder_cls == saved_model_builder._SavedModelBuilder:
                    with self.assertRaises(errors.NotFoundError):
                        loader.run_init_ops(sess, ["foo_graph"])
                    loader.run_init_ops(sess, ["foo_graph"],
                                        import_scope="baz")
                else:
                    loader.run_init_ops(sess, ["foo_graph"])

                self.assertEqual(5, sess.run(_tensor_name("baz/x")))
                self.assertEqual(7, sess.run(_tensor_name("baz/y")))

            # Test combined load function.
            loader = loader_impl.SavedModelLoader(SAVED_MODEL_WITH_MAIN_OP)
            with self.session(graph=ops.Graph()) as sess:
                loader.load(sess, ["foo_graph"], import_scope="baa")
                self.assertEqual(5, sess.run(_tensor_name("baa/x")))
                self.assertEqual(7, sess.run(_tensor_name("baa/y")))
Esempio n. 3
0
  def test_load_function(self):
    loader = loader_impl.SavedModelLoader(SIMPLE_ADD_SAVED_MODEL)
    with self.test_session(graph=ops.Graph()) as sess:
      loader.load(sess, ["foo_graph"])
      self.assertEqual(5, sess.graph.get_tensor_by_name("x:0").eval())
      self.assertEqual(11, sess.graph.get_tensor_by_name("y:0").eval())

    loader2 = loader_impl.SavedModelLoader(SAVED_MODEL_WITH_MAIN_OP)
    with self.test_session(graph=ops.Graph()) as sess:
      loader2.load(sess, ["foo_graph"])
      self.assertEqual(5, sess.graph.get_tensor_by_name("x:0").eval())
      self.assertEqual(7, sess.graph.get_tensor_by_name("y:0").eval())
Esempio n. 4
0
  def __init__(self, saved_model_dir, model_dir=None):
    """Initialize a SavedModelEstimator.

    The SavedModelEstimator loads its model function and variable values from
    the graphs defined in the SavedModel. There is no option to pass in
    `RunConfig` or `params` arguments, because the model function graph is
    defined statically in the SavedModel.

    Args:
      saved_model_dir: Directory containing SavedModel protobuf and subfolders.
      model_dir: Directory to save new checkpoints during training.

    Raises:
      NotImplementedError: If a DistributionStrategy is defined in the config.
        Unless the SavedModelEstimator is subclassed, this shouldn't happen.
    """
    checkpoint = estimator_lib._get_saved_model_ckpt(saved_model_dir)  # pylint: disable=protected-access
    vars_to_warm_start = [name for name, _ in
                          checkpoint_utils.list_variables(checkpoint)]
    warm_start_settings = estimator_lib.WarmStartSettings(
        ckpt_to_initialize_from=checkpoint,
        vars_to_warm_start=vars_to_warm_start)

    super(SavedModelEstimator, self).__init__(
        model_fn=self._model_fn_from_saved_model, model_dir=model_dir,
        warm_start_from=warm_start_settings)
    if self._distribution is not None:
      raise NotImplementedError(
          'SavedModelEstimator currently does not support '
          'DistributionStrategy.')
    self.saved_model_dir = saved_model_dir
    self.saved_model_loader = loader_impl.SavedModelLoader(saved_model_dir)
    self._available_modes = self._extract_available_modes()
Esempio n. 5
0
  def test_matmul_ptq_model(self, activation_fn, has_bias):
    model = self.MatmulModel(has_bias, activation_fn)
    input_saved_model_path = self.create_tempdir('input').full_path
    saved_model_save.save(model, input_saved_model_path)

    def data_gen():
      for _ in range(255):
        yield {
            'input_tensor':
                ops.convert_to_tensor(
                    np.random.uniform(low=0, high=5, size=(1, 4)).astype('f4')),
        }

    tags = [tag_constants.SERVING]
    output_directory = self.create_tempdir().full_path

    quantization_options = quant_opts_pb2.QuantizationOptions(
        quantization_method=quant_opts_pb2.QuantizationMethod(
            experimental_method=_ExperimentalMethod.STATIC_RANGE))

    converted_model = quantize_model.quantize(
        input_saved_model_path, ['serving_default'],
        tags,
        output_directory,
        quantization_options,
        representative_dataset=data_gen())
    self.assertIsNotNone(converted_model)
    self.assertEqual(
        list(converted_model.signatures._signatures.keys()),
        ['serving_default'])

    output_loader = saved_model_loader.SavedModelLoader(output_directory)
    output_meta_graphdef = output_loader.get_meta_graph_def_from_tags(tags)
    self.assertTrue(_contains_quantized_function_call(output_meta_graphdef))
Esempio n. 6
0
  def test_model_use_representative_samples_list(self):
    model = self.MatmulModel()
    input_savedmodel_dir = self.create_tempdir('input').full_path
    saved_model_save.save(model, input_savedmodel_dir)

    quantization_options = quant_opts_pb2.QuantizationOptions(
        quantization_method=quant_opts_pb2.QuantizationMethod(
            experimental_method=_ExperimentalMethod.STATIC_RANGE))
    output_savedmodel_dir = self.create_tempdir().full_path
    tags = {tag_constants.SERVING}

    representative_dataset = [{
        'input_tensor': random_ops.random_uniform(shape=(1, 4))
    } for _ in range(128)]

    converted_model = quantize_model.quantize(
        input_savedmodel_dir, ['serving_default'],
        output_directory=output_savedmodel_dir,
        quantization_options=quantization_options,
        representative_dataset=representative_dataset)

    self.assertIsNotNone(converted_model)
    self.assertEqual(
        list(converted_model.signatures._signatures.keys()),
        ['serving_default'])
    output_loader = saved_model_loader.SavedModelLoader(output_savedmodel_dir)
    output_meta_graphdef = output_loader.get_meta_graph_def_from_tags(tags)
    # Model is not quantized because there was no sample data for calibration.
    self.assertTrue(_contains_quantized_function_call(output_meta_graphdef))
Esempio n. 7
0
def _parse_saved_model_signatures(
    model_path: Text,
    tag_set: Iterable[Text],
    signature_names: Iterable[Text]) -> Mapping[Text, _SignatureDef]:
  """Parse SignatureDefs of given signature names from SavedModel.

  Among one or more MetaGraphDefs in SavedModel, the first one that has all the
  tag_set elements is chosen. Selected MetaGraphDef should have signatures for
  all given signature names.

  Args:
    model_path: A path to the SavedModel directory.
    tag_set: A set of tags MetaGraphDef should have.
    signature_names: A list of signature names to retrieve.

  Returns:
    A mapping from signature name to SignatureDef.
  """
  if not tag_set:
    tag_set = {tf.saved_model.SERVING}
    logging.info('tag_set is not given. Using %r instead.', tag_set)
  if not signature_names:
    signature_names = [tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
    logging.info('signature_names are not given. Using %r instead.',
                 signature_names)
  loader = loader_impl.SavedModelLoader(model_path)
  meta_graph_def = loader.get_meta_graph_def_from_tags(tag_set)
  result = {}
  for signature_name in signature_names:
    if signature_name not in meta_graph_def.signature_def:
      raise ValueError('SignatureDef of name {} could not be found in '
                       'MetaGraphDef'.format(signature_name))
    result[signature_name] = meta_graph_def.signature_def[signature_name]
  return result
    def __init__(self, saved_model_dir, model_dir=None):
        """Initialize a SavedModelEstimator.

    The SavedModelEstimator loads its model function and variable values from
    the graphs defined in the SavedModel. There is no option to pass in
    `RunConfig` or `params` arguments, because the model function graph is
    defined statically in the SavedModel.

    Args:
      saved_model_dir: Directory containing SavedModel protobuf and subfolders.
      model_dir: Directory to save new checkpoints during training.

    Raises:
      NotImplementedError: If a DistributionStrategy is defined in the config.
        Unless the SavedModelEstimator is subclassed, this shouldn't happen.
    """

        super(SavedModelEstimator,
              self).__init__(model_fn=self._model_fn_from_saved_model,
                             model_dir=model_dir)
        if self._train_distribution or self._eval_distribution:
            raise NotImplementedError(
                'SavedModelEstimator currently does not support '
                'DistributionStrategy.')
        self.saved_model_dir = saved_model_dir
        self.saved_model_loader = loader_impl.SavedModelLoader(saved_model_dir)
        self._available_modes = self._extract_available_modes()
Esempio n. 9
0
    def test_matmul_model(self):
        class MatmulModel(module.Module):
            @def_function.function(input_signature=[
                tensor_spec.TensorSpec(shape=[1, 4], dtype=dtypes.float32)
            ])
            def matmul(self, input_tensor):
                filters = np.random.uniform(low=-1.0, high=1.0,
                                            size=(4, 3)).astype('f4')
                out = math_ops.matmul(input_tensor, filters)
                return {'output': out}

        model = MatmulModel()
        input_saved_model_path = self.create_tempdir('input').full_path
        saved_model_save.save(model, input_saved_model_path)

        tags = [tag_constants.SERVING]
        output_directory = self.create_tempdir().full_path

        quantization_options = quant_opts_pb2.QuantizationOptions(
            quantization_method=quant_opts_pb2.QuantizationMethod(
                experimental_method=_ExperimentalMethod.DYNAMIC_RANGE))

        converted_model = quantize_model.quantize(input_saved_model_path,
                                                  ['serving_default'], tags,
                                                  output_directory,
                                                  quantization_options)
        self.assertIsNotNone(converted_model)
        self.assertEqual(list(converted_model.signatures._signatures.keys()),
                         ['serving_default'])

        output_loader = saved_model_loader.SavedModelLoader(output_directory)
        output_meta_graphdef = output_loader.get_meta_graph_def_from_tags(tags)
        self.assertTrue(
            _contains_quantized_function_call(output_meta_graphdef))
Esempio n. 10
0
 def test_parse_saved_model(self, builder_cls):
     self.export_simple_graph(builder_cls)
     loader = loader_impl.SavedModelLoader(SIMPLE_ADD_SAVED_MODEL)
     meta_graph = loader.get_meta_graph_def_from_tags(["foo_graph"])
     self.assertIsNotNone(meta_graph)
     self.assertIn("foo", meta_graph.signature_def)
     self.assertIn("bar", meta_graph.signature_def)
    def test_ptq_model(self):
        class PTQModelWithAdd(tracking.AutoTrackable):
            """Basic model with addition."""
            @def_function.function(input_signature=[
                tensor_spec.TensorSpec(shape=[10],
                                       dtype=dtypes.float32,
                                       name='x'),
                tensor_spec.TensorSpec(shape=[10],
                                       dtype=dtypes.float32,
                                       name='y')
            ])
            def add(self, x, y):
                res = math_ops.add(x, y)
                return {'output': res, 'x': x, 'y': y}

        def data_gen():
            for _ in range(255):
                yield {
                    'x':
                    ops.convert_to_tensor(
                        np.random.uniform(size=(10)).astype('f4')),
                    'y':
                    ops.convert_to_tensor(
                        np.random.uniform(size=(10)).astype('f4'))
                }

        root = PTQModelWithAdd()

        temp_path = self.create_tempdir().full_path
        saved_model_save.save(root,
                              temp_path,
                              signatures=root.add.get_concrete_function())

        output_directory = self.create_tempdir().full_path
        tags = [tag_constants.SERVING]
        model = quantize_model.quantize(temp_path, ['serving_default'],
                                        tags,
                                        output_directory,
                                        representative_dataset=data_gen)
        self.assertIsNotNone(model)
        self.assertEqual(list(model.signatures._signatures.keys()),
                         ['serving_default'])
        func = model.signatures['serving_default']
        func_res = func(x=array_ops.constant(0.1, shape=[10]),
                        y=array_ops.constant(0.1, shape=[10]))
        self.assertAllClose(func_res['output'],
                            array_ops.constant(0.2, shape=[10]),
                            atol=0.01)
        xy_atol = 1e-6
        self.assertAllClose(func_res['x'],
                            array_ops.constant(0.1, shape=[10]),
                            atol=xy_atol)
        self.assertAllClose(func_res['y'],
                            array_ops.constant(0.1, shape=[10]),
                            atol=xy_atol)

        output_loader = saved_model_loader.SavedModelLoader(output_directory)
        output_meta_graphdef = output_loader.get_meta_graph_def_from_tags(tags)
        self.assertTrue(
            _contains_quantized_function_call(output_meta_graphdef))
Esempio n. 12
0
 def test_load_invalid_meta_graph(self):
   loader = loader_impl.SavedModelLoader(SIMPLE_ADD_SAVED_MODEL)
   with self.assertRaises(RuntimeError):
     loader.get_meta_graph_def_from_tags([])
   with self.assertRaises(RuntimeError):
     loader.get_meta_graph_def_from_tags([""])
   with self.assertRaises(RuntimeError):
     loader.get_meta_graph_def_from_tags(["not_a_graph"])
    def test_qat_model(self):
        class QATModelWithAdd(tracking.AutoTrackable):
            """Basic model with Fake quant + add."""
            @def_function.function(input_signature=[
                tensor_spec.TensorSpec(shape=[10],
                                       dtype=dtypes.float32,
                                       name='x'),
                tensor_spec.TensorSpec(shape=[10],
                                       dtype=dtypes.float32,
                                       name='y')
            ])
            def add(self, x, y):
                float_res = math_ops.add(x, y)
                x = array_ops.fake_quant_with_min_max_args(x,
                                                           min=-0.1,
                                                           max=0.2,
                                                           num_bits=8,
                                                           narrow_range=False)
                y = array_ops.fake_quant_with_min_max_args(y,
                                                           min=-0.3,
                                                           max=0.4,
                                                           num_bits=8,
                                                           narrow_range=False)
                res = math_ops.add(x, y)
                res = array_ops.fake_quant_with_min_max_args(
                    res, min=-0.4, max=0.6, num_bits=8, narrow_range=False)
                return {'output': res, 'float_output': float_res}

        root = QATModelWithAdd()

        temp_path = self.create_tempdir().full_path
        saved_model_save.save(root,
                              temp_path,
                              signatures=root.add.get_concrete_function())

        output_directory = self.create_tempdir().full_path
        tags = [tag_constants.SERVING]
        model = quantize_model.quantize(temp_path, ['serving_default'],
                                        [tag_constants.SERVING],
                                        output_directory)
        self.assertIsNotNone(model)
        self.assertEqual(list(model.signatures._signatures.keys()),
                         ['serving_default'])
        func = model.signatures['serving_default']
        func_res = func(x=array_ops.constant(0.1, shape=[10]),
                        y=array_ops.constant(0.1, shape=[10]))
        self.assertAllClose(func_res['output'],
                            array_ops.constant(0.2, shape=[10]),
                            atol=0.01)
        self.assertAllClose(func_res['float_output'],
                            array_ops.constant(0.2, shape=[10]),
                            atol=1e-3)

        output_loader = saved_model_loader.SavedModelLoader(output_directory)
        output_meta_graphdef = output_loader.get_meta_graph_def_from_tags(tags)
        self.assertTrue(
            _contains_quantized_function_call(output_meta_graphdef))
Esempio n. 14
0
    def test_load_function(self, builder_cls):
        # Force test to run in graph mode.
        # The SaveModelLoader.load method is a v1-only API that requires a session
        # to work.
        with ops.Graph().as_default():
            self.export_simple_graph(builder_cls)
            loader = loader_impl.SavedModelLoader(SIMPLE_ADD_SAVED_MODEL)
            with self.session(graph=ops.Graph()) as sess:
                loader.load(sess, ["foo_graph"])
                self.assertEqual(5, sess.run(_tensor_name("x")))
                self.assertEqual(11, sess.run(_tensor_name("y")))

            self.export_graph_with_main_op(builder_cls)
            loader2 = loader_impl.SavedModelLoader(SAVED_MODEL_WITH_MAIN_OP)
            with self.session(graph=ops.Graph()) as sess:
                loader2.load(sess, ["foo_graph"])
                self.assertEqual(5, sess.run(_tensor_name("x")))
                self.assertEqual(7, sess.run(_tensor_name("y")))
Esempio n. 15
0
    def test_model_with_uncalibrated_subgraph(self):
        class IfModel(module.Module):
            @def_function.function(input_signature=[
                tensor_spec.TensorSpec(shape=[1, 4], dtype=dtypes.float32)
            ])
            def model_fn(self, x):
                if math_ops.reduce_sum(x) > 10.0:
                    filters = np.random.uniform(low=-1.0,
                                                high=1.0,
                                                size=(4, 3)).astype('f4')
                    bias = np.random.uniform(low=-1.0, high=1.0,
                                             size=(3, )).astype('f4')
                    out = math_ops.matmul(x, filters)
                    out = nn_ops.bias_add(out, bias)
                    return {'output': out}

                filters = np.random.uniform(low=-1.0, high=1.0,
                                            size=(4, 3)).astype('f4')
                bias = np.random.uniform(low=-1.0, high=1.0,
                                         size=(3, )).astype('f4')
                out = math_ops.matmul(x, filters)
                out = nn_ops.bias_add(out, bias)
                return {'output': out}

        model = IfModel()
        input_saved_model_path = self.create_tempdir('input').full_path
        saved_model_save.save(model, input_saved_model_path)

        def data_gen():
            for _ in range(10):
                yield {
                    'x':
                    ops.convert_to_tensor(
                        np.random.uniform(low=0.0, high=1.0,
                                          size=(1, 4)).astype('f4')),
                }

        tags = [tag_constants.SERVING]
        output_directory = self.create_tempdir().full_path
        with warnings.catch_warnings(record=True) as w:
            converted_model = quantize_model.quantize(
                input_saved_model_path, ['serving_default'],
                tags,
                output_directory,
                optimization_method=quantize_model.OptimizationMethod.
                STATIC_RANGE_QUANT,
                representative_dataset=data_gen)
            self.assertGreaterEqual(len(w), 1)
            self.assertIn('does not have min/max values', str(w[0]))
        self.assertIsNotNone(converted_model)
        self.assertEqual(list(converted_model.signatures._signatures.keys()),
                         ['serving_default'])
        output_loader = saved_model_loader.SavedModelLoader(output_directory)
        output_meta_graphdef = output_loader.get_meta_graph_def_from_tags(tags)
        self.assertTrue(
            _contains_quantized_function_call(output_meta_graphdef))
Esempio n. 16
0
    def test_model_no_representative_sample_shows_warnings(self):
        class SimpleMatmulModel(module.Module):
            @def_function.function(input_signature=[
                tensor_spec.TensorSpec(shape=[1, 4], dtype=dtypes.float32)
            ])
            def matmul(self, input_tensor):
                filters = random_ops.random_uniform(shape=(4, 3),
                                                    minval=-1.,
                                                    maxval=1.)
                bias = random_ops.random_uniform(shape=(3, ),
                                                 minval=-1.,
                                                 maxval=1.)

                out = math_ops.matmul(input_tensor, filters)
                out = nn_ops.bias_add(out, bias)
                return {'output': out}

        model = SimpleMatmulModel()
        input_savedmodel_dir = self.create_tempdir('input').full_path
        output_savedmodel_dir = self.create_tempdir().full_path
        saved_model_save.save(model, input_savedmodel_dir)

        tags = [tag_constants.SERVING]
        quantization_options = quant_opts_pb2.QuantizationOptions(
            quantization_method=quant_opts_pb2.QuantizationMethod(
                experimental_method=_ExperimentalMethod.STATIC_RANGE))

        with warnings.catch_warnings(record=True) as warnings_list:
            converted_model = quantize_model.quantize(
                input_savedmodel_dir,
                ['serving_default'],
                tags,
                output_savedmodel_dir,
                quantization_options,
                # Put no sample into the representative dataset to make calibration
                # impossible.
                representative_dataset=lambda: [])

            self.assertNotEmpty(warnings_list)

            # Warning message should contain the function name.
            self.assertTrue(self._any_warning_contains('matmul',
                                                       warnings_list))
            self.assertTrue(
                self._any_warning_contains('does not have min or max values',
                                           warnings_list))

        self.assertIsNotNone(converted_model)
        self.assertEqual(list(converted_model.signatures._signatures.keys()),
                         ['serving_default'])
        output_loader = saved_model_loader.SavedModelLoader(
            output_savedmodel_dir)
        output_meta_graphdef = output_loader.get_meta_graph_def_from_tags(tags)
        # Model is not quantized because there was no sample data for calibration.
        self.assertFalse(
            _contains_quantized_function_call(output_meta_graphdef))
Esempio n. 17
0
    def test_loader_v1(self):
        read_count = metrics.GetRead(write_version="1")
        ops.disable_eager_execution()
        save_dir = self._create_save_v1_model()
        loader = loader_impl.SavedModelLoader(save_dir)
        with self.session(graph=ops.Graph()) as sess:
            loader.load(sess, ["foo"])
        ops.enable_eager_execution()

        self.assertEqual(metrics.GetReadApi(loader_impl._LOADER_LABEL), 1)
        self.assertEqual(metrics.GetRead(write_version="1"), read_count + 1)
Esempio n. 18
0
  def test_load_saved_model_graph_with_return_elements(self):
    """Ensure that the correct elements are returned."""
    loader = loader_impl.SavedModelLoader(SIMPLE_ADD_SAVED_MODEL)
    graph = ops.Graph()
    _, ret = loader.load_graph(graph, ["foo_graph"],
                               return_elements=["y:0", "x:0"])

    self.assertEqual(graph.get_tensor_by_name("y:0"), ret[0])
    self.assertEqual(graph.get_tensor_by_name("x:0"), ret[1])

    with self.assertRaisesRegexp(ValueError, "not found in graph"):
      loader.load_graph(graph, ["foo_graph"], return_elements=["z:0"])
Esempio n. 19
0
    def test_depthwise_conv_ptq_model(self, activation_fn, has_bias):
        class DepthwiseConvModel(module.Module):
            @def_function.function(input_signature=[
                tensor_spec.TensorSpec(shape=[1, 3, 4, 3],
                                       dtype=dtypes.float32)
            ])
            def conv(self, input_tensor):
                filters = np.random.uniform(low=-10,
                                            high=10,
                                            size=(2, 3, 3, 1)).astype('f4')
                bias = np.random.uniform(low=0, high=10, size=(3)).astype('f4')
                out = nn_ops.depthwise_conv2d_native(input_tensor,
                                                     filters,
                                                     strides=[1, 2, 2, 1],
                                                     dilations=[1, 1, 1, 1],
                                                     padding='SAME',
                                                     data_format='NHWC')
                if has_bias:
                    out = nn_ops.bias_add(out, bias)
                if activation_fn is not None:
                    out = activation_fn(out)
                return {'output': out}

        model = DepthwiseConvModel()
        input_saved_model_path = self.create_tempdir('input').full_path
        saved_model_save.save(model, input_saved_model_path)

        def data_gen():
            for _ in range(255):
                yield {
                    'input_tensor':
                    ops.convert_to_tensor(
                        np.random.uniform(low=0, high=150,
                                          size=(1, 3, 4, 3)).astype('f4')),
                }

        tags = [tag_constants.SERVING]
        output_directory = self.create_tempdir().full_path
        converted_model = quantize_model.quantize(
            input_saved_model_path, ['serving_default'],
            tags,
            output_directory,
            optimization_method=quantize_model.OptimizationMethod.
            STATIC_RANGE_QUANT,
            representative_dataset=data_gen)
        self.assertIsNotNone(converted_model)
        self.assertEqual(list(converted_model.signatures._signatures.keys()),
                         ['serving_default'])

        output_loader = saved_model_loader.SavedModelLoader(output_directory)
        output_meta_graphdef = output_loader.get_meta_graph_def_from_tags(tags)
        self.assertTrue(
            _contains_quantized_function_call(output_meta_graphdef))
Esempio n. 20
0
  def test_run_init_op(self):
    loader = loader_impl.SavedModelLoader(SAVED_MODEL_WITH_MAIN_OP)
    graph = ops.Graph()
    saver, _ = loader.load_graph(graph, ["foo_graph"])
    with self.test_session(graph=graph) as sess:
      loader.restore_variables(sess, saver)
      self.assertEqual(5, sess.graph.get_tensor_by_name("x:0").eval())
      self.assertEqual(11, sess.graph.get_tensor_by_name("y:0").eval())

      loader.run_init_ops(sess, ["foo_graph"])
      self.assertEqual(5, sess.graph.get_tensor_by_name("x:0").eval())
      self.assertEqual(7, sess.graph.get_tensor_by_name("y:0").eval())
Esempio n. 21
0
    def test_matmul_ptq_model(self, activation_fn, has_bias):
        class MatmulModel(module.Module):
            @def_function.function(input_signature=[
                tensor_spec.TensorSpec(shape=[1, 4], dtype=dtypes.float32)
            ])
            def matmul(self, input_tensor):
                filters = np.random.uniform(low=-1.0, high=1.0,
                                            size=(4, 3)).astype('f4')
                bias = np.random.uniform(low=-1.0, high=1.0,
                                         size=(3, )).astype('f4')
                out = math_ops.matmul(input_tensor, filters)
                if has_bias:
                    out = nn_ops.bias_add(out, bias)
                if activation_fn is not None:
                    out = activation_fn(out)
                return {'output': out}

        model = MatmulModel()
        input_saved_model_path = self.create_tempdir('input').full_path
        saved_model_save.save(model, input_saved_model_path)

        def data_gen():
            for _ in range(255):
                yield {
                    'input_tensor':
                    ops.convert_to_tensor(
                        np.random.uniform(low=0, high=5,
                                          size=(1, 4)).astype('f4')),
                }

        tags = [tag_constants.SERVING]
        output_directory = self.create_tempdir().full_path

        quantization_options = quant_opts_pb2.QuantizationOptions(
            quantization_method=quant_opts_pb2.QuantizationMethod(
                experimental_method=_ExperimentalMethod.STATIC_RANGE))

        converted_model = quantize_model.quantize(
            input_saved_model_path, ['serving_default'],
            tags,
            output_directory,
            quantization_options,
            representative_dataset=data_gen)
        self.assertIsNotNone(converted_model)
        self.assertEqual(list(converted_model.signatures._signatures.keys()),
                         ['serving_default'])

        output_loader = saved_model_loader.SavedModelLoader(output_directory)
        output_meta_graphdef = output_loader.get_meta_graph_def_from_tags(tags)
        self.assertTrue(
            _contains_quantized_function_call(output_meta_graphdef))
Esempio n. 22
0
  def test_restore_variables(self):
    loader = loader_impl.SavedModelLoader(SAVED_MODEL_WITH_MAIN_OP)
    with self.test_session(graph=ops.Graph()) as sess:
      x = variables.Variable(0, name="x")
      y = variables.Variable(0, name="y")
      z = x * y

      sess.run(variables.global_variables_initializer())

      # There are variables to restore, so a saver must be created.
      with self.assertRaises(ValueError):
        loader.restore_variables(sess, None)

      loader.restore_variables(sess, tf_saver.Saver())
      self.assertEqual(55, z.eval())
Esempio n. 23
0
    def test_load_saved_model_with_no_variables(self, builder_cls):
        """Test that SavedModel runs saver when there appear to be no variables.

    When no variables are detected, this may mean that the variables were saved
    to different collections, or the collections weren't saved to the
    SavedModel. If the SavedModel MetaGraphDef contains a saver, it should still
    run in either of these cases.

    Args:
      builder_cls: SavedModelBuilder or _SavedModelBuilder class
    """
        # Force test to run in graph mode.
        # The SavedModelBuilder.add_meta_graph_and_variables and
        # SavedModelLoader.load methods are v1-only APIs that require a session to
        # work.
        with ops.Graph().as_default():
            path = _get_export_dir("no_variable_saved_model")
            with session.Session(graph=ops.Graph()) as sess:
                x = variables.VariableV1(5,
                                         name="x",
                                         collections=["not_global_variable"])
                y = variables.VariableV1(11,
                                         name="y",
                                         collections=["not_global_variable"])
                self.assertFalse(variables._all_saveable_objects())
                z = x + y
                self.evaluate(variables.variables_initializer([x, y]))

                foo_sig_def = signature_def_utils.build_signature_def(
                    {"foo_input": utils.build_tensor_info(x)},
                    {"foo_output": utils.build_tensor_info(z)})

                builder = saved_model_builder.SavedModelBuilder(path)
                builder.add_meta_graph_and_variables(
                    sess, ["foo_graph"], {"foo": foo_sig_def},
                    saver=tf_saver.Saver([x, y]))
                builder.save()

            loader = loader_impl.SavedModelLoader(path)
            with self.session(graph=ops.Graph()) as sess:
                saver, _ = loader.load_graph(sess.graph, ["foo_graph"])
                self.assertFalse(variables._all_saveable_objects())
                self.assertIsNotNone(saver)

            with self.session(graph=ops.Graph()) as sess:
                loader.load(sess, ["foo_graph"])
                self.assertEqual(5, sess.run(_tensor_name("x")))
                self.assertEqual(11, sess.run(_tensor_name("y")))
Esempio n. 24
0
    def test_restore_variables(self, builder_cls):
        self.export_graph_with_main_op(builder_cls)
        loader = loader_impl.SavedModelLoader(SAVED_MODEL_WITH_MAIN_OP)
        with self.session(graph=ops.Graph()) as sess:
            x = variables.VariableV1(0, name="x")
            y = variables.VariableV1(0, name="y")
            z = x * y

            self.evaluate(variables.global_variables_initializer())

            # There are variables to restore, so a saver must be created.
            with self.assertRaises(ValueError):
                loader.restore_variables(sess, None)

            loader.restore_variables(sess, tf_saver.Saver())
            self.assertEqual(55, self.evaluate(z))
Esempio n. 25
0
  def test_conv_model(self):

    class ConvModel(module.Module):

      @def_function.function(input_signature=[
          tensor_spec.TensorSpec(shape=[1, 3, 4, 3], dtype=dtypes.float32)
      ])
      def conv(self, input_tensor):
        filters = np.random.uniform(
            low=-10, high=10, size=(2, 3, 3, 2)).astype('f4')
        bias = np.random.uniform(low=0, high=10, size=(2)).astype('f4')
        out = nn_ops.conv2d(
            input_tensor,
            filters,
            strides=[1, 1, 2, 1],
            dilations=[1, 1, 1, 1],
            padding='SAME',
            data_format='NHWC')
        out = nn_ops.bias_add(out, bias, data_format='NHWC')
        out = nn_ops.relu6(out)
        return {'output': out}

    model = ConvModel()
    input_saved_model_path = self.create_tempdir('input').full_path
    saved_model_save.save(model, input_saved_model_path)

    tags = [tag_constants.SERVING]
    output_directory = self.create_tempdir().full_path

    quantization_options = quant_opts_pb2.QuantizationOptions(
        quantization_method=quant_opts_pb2.QuantizationMethod(
            experimental_method=_ExperimentalMethod.DYNAMIC_RANGE))

    converted_model = quantize_model.quantize(input_saved_model_path,
                                              ['serving_default'], tags,
                                              output_directory,
                                              quantization_options)

    self.assertIsNotNone(converted_model)
    self.assertEqual(
        list(converted_model.signatures._signatures.keys()),
        ['serving_default'])

    output_loader = saved_model_loader.SavedModelLoader(output_directory)
    output_meta_graphdef = output_loader.get_meta_graph_def_from_tags(tags)
    # Currently conv is not supported.
    self.assertFalse(_contains_quantized_function_call(output_meta_graphdef))
Esempio n. 26
0
    def test_run_init_op(self, builder_cls):
        # Force test to run in graph mode.
        # The SaveModelLoader.restore_variables ahd SaveModelLoader.run_init_ops
        # methods are v1-only APIs that require a session to work.
        with ops.Graph().as_default():
            self.export_graph_with_main_op(builder_cls)
            loader = loader_impl.SavedModelLoader(SAVED_MODEL_WITH_MAIN_OP)
            graph = ops.Graph()
            saver, _ = loader.load_graph(graph, ["foo_graph"])
            with self.session(graph=graph) as sess:
                loader.restore_variables(sess, saver)
                self.assertEqual(5, sess.run(_tensor_name("x")))
                self.assertEqual(11, sess.run(_tensor_name("y")))

                loader.run_init_ops(sess, ["foo_graph"])
                self.assertEqual(5, sess.run(_tensor_name("x")))
                self.assertEqual(7, sess.run(_tensor_name("y")))
Esempio n. 27
0
  def test_load_graph(self):
    loader = loader_impl.SavedModelLoader(SIMPLE_ADD_SAVED_MODEL)
    graph = ops.Graph()
    loader.load_graph(graph, ["foo_graph"])

    x = graph.get_tensor_by_name("x:0")
    y = graph.get_tensor_by_name("y:0")

    with self.assertRaises(KeyError):
      graph.get_tensor_by_name("z:0")

    with self.test_session(graph=graph) as sess:
      # Check that x and y are not initialized
      with self.assertRaises(errors.FailedPreconditionError):
        sess.run(x)
      with self.assertRaises(errors.FailedPreconditionError):
        sess.run(y)
Esempio n. 28
0
def _get_signatures_from_saved_model(saved_model_path: str,
                                     signature_keys=None,
                                     tags=None):
    """Gets a map from signature keys to their SignatureDef from a saved model."""
    if tags is None:
        tags = set([tag_constants.SERVING])

    loader = saved_model_loader.SavedModelLoader(saved_model_path)
    meta_graphdef = loader.get_meta_graph_def_from_tags(tags)
    signatures = {}
    for key, signature_def in meta_graphdef.signature_def.items():
        if key == _INIT_OP_SIGNATURE_KEY:
            continue
        if signature_keys is not None and key not in signature_keys:
            continue
        signatures[key] = signature_def

    return signatures
Esempio n. 29
0
    def test_load_graph(self, builder_cls):
        self.export_simple_graph(builder_cls)
        loader = loader_impl.SavedModelLoader(SIMPLE_ADD_SAVED_MODEL)
        graph = ops.Graph()
        loader.load_graph(graph, ["foo_graph"])

        x = graph.get_tensor_by_name(_tensor_name("x"))
        y = graph.get_tensor_by_name(_tensor_name("y"))

        with self.assertRaises(KeyError):
            graph.get_tensor_by_name(_tensor_name("z"))

        with graph.as_default(), self.session():
            # Check that x and y are not initialized
            with self.assertRaises(errors.FailedPreconditionError):
                self.evaluate(x)
            with self.assertRaises(errors.FailedPreconditionError):
                self.evaluate(y)
Esempio n. 30
0
    def test_restore_variables(self, builder_cls):
        # Force test to run in graph mode.
        # The SaveModelLoader.restore_variables method is a v1-only API requiring a
        # session to work.
        with ops.Graph().as_default():
            self.export_graph_with_main_op(builder_cls)
            loader = loader_impl.SavedModelLoader(SAVED_MODEL_WITH_MAIN_OP)
            with self.session() as sess:
                x = variables.VariableV1(0, name="x")
                y = variables.VariableV1(0, name="y")
                z = x * y

                self.evaluate(variables.global_variables_initializer())

                # There are variables to restore, so a saver must be created.
                with self.assertRaises(ValueError):
                    loader.restore_variables(sess, None)

                loader.restore_variables(sess, tf_saver.Saver())
                self.assertEqual(55, self.evaluate(z))