def test_debugger_creation_counter_increase_multiple_same_topic_success(self):
   try:
     stub = metrics.TFLiteMetrics()
     stub.increase_counter_debugger_creation()
     self.assertEqual(metrics._counter_debugger_creation.get_cell().value(), 1)
     stub2 = metrics.TFLiteMetrics()
     stub2.increase_counter_debugger_creation()
     self.assertEqual(metrics._counter_debugger_creation.get_cell().value(), 2)
     del stub
     gc.collect()
     stub2.increase_counter_debugger_creation()
     self.assertEqual(metrics._counter_debugger_creation.get_cell().value(), 3)
   except:
     raise Exception('No exception should be raised.')
Ejemplo n.º 2
0
    def _initialize_stats(self):
        """Helper function initializes stats."""
        # TODO(b/177749613) : Fix the dependency on tf.lite._get_ops_details()
        # Following code is needed to get op's name from the output tensor index,
        # since NumericVerify op only provides its quantized input tensor index.
        self._defining_op = dict()
        for op_info in self._quant_interpreter._get_ops_details():  # pylint: disable=protected-access
            self._defining_op.update({
                tensor_idx: op_info['index']
                for tensor_idx in op_info['outputs']
            })

        self._numeric_verify_tensor_details = None
        self._numeric_verify_op_details = None
        if not self._get_numeric_verify_tensor_details():
            raise ValueError(
                'Please check if the quantized model is in debug mode')

        self._layer_debug_metrics = _DEFAULT_LAYER_DEBUG_METRICS.copy()
        if self._debug_options.layer_debug_metrics:
            self._layer_debug_metrics.update(
                self._debug_options.layer_debug_metrics)

        self.layer_statistics = None
        self.model_statistics = None

        self._metrics = metrics_stub.TFLiteMetrics()
        self._metrics.increase_counter_debugger_creation()
Ejemplo n.º 3
0
 def test_converter_params_multiple_set_success(self):
     stub = metrics.TFLiteMetrics()
     stub.set_converter_param('name', 'value')
     stub.set_converter_param('name', 'value1')
     self.assertEqual(
         metrics._gauge_conversion_params.get_cell('name').value(),
         'value1')
Ejemplo n.º 4
0
 def test_interpreter_creation_counter_increase_success(self):
     stub = metrics.TFLiteMetrics()
     stub.increase_counter_interpreter_creation()
     self.assertEqual(
         stub._counter_interpreter_creation.get_cell('python').value(), 1)
Ejemplo n.º 5
0
 def test_TFLiteMetrics_creation_fail2_with_only_model_path(self):
     with self.assertRaises(ValueError):
         metrics.TFLiteMetrics(model_path='/path/to/model')
Ejemplo n.º 6
0
 def test_TFLiteMetrics_creation_fails_with_only_md5(self):
     with self.assertRaises(ValueError):
         metrics.TFLiteMetrics(md5='md5')
Ejemplo n.º 7
0
 def test_TFLiteMetrics_creation_arg_success(self):
     metrics.TFLiteMetrics('md5', '/path/to/model')
Ejemplo n.º 8
0
 def test_TFLiteMetrics_creation_no_arg_success(self):
     metrics.TFLiteMetrics()
Ejemplo n.º 9
0
 def test_converter_success_counter_increase_success(self):
     stub = metrics.TFLiteMetrics()
     stub.increase_counter_converter_success()
     self.assertEqual(
         metrics._counter_conversion_success.get_cell().value(), 1)
Ejemplo n.º 10
0
 def test_TFLiteMetrics_creation_fails_with_only_hash(self):
     with self.assertRaises(ValueError):
         metrics.TFLiteMetrics(model_hash='hash')
Ejemplo n.º 11
0
 def test_converter_params_set_latency(self):
     stub = metrics.TFLiteMetrics()
     stub.set_converter_latency(34566)
     self.assertEqual(metrics._gauge_conversion_latency.get_cell().value(),
                      34566)