Пример #1
0
    def test_quantization_debugger_layer_metrics(self):
        options = debugger.QuantizationDebugOptions(
            layer_debug_metrics={
                'l1_norm': lambda diffs: np.mean(np.abs(diffs))
            })
        quant_debugger = debugger.QuantizationDebugger(
            quant_debug_model_content=QuantizationDebuggerTest.debug_model,
            debug_dataset=_calibration_gen,
            debug_options=options)
        quant_debugger.run()

        expected_metrics = {
            'num_elements': 9,
            'stddev': 0.03850026,
            'mean_error': 0.01673192,
            'max_abs_error': 0.10039272,
            'mean_square_error': 0.0027558778,
            'l1_norm': 0.023704167,
        }
        self.assertLen(quant_debugger.layer_statistics, 1)
        actual_metrics = next(iter(quant_debugger.layer_statistics.values()))

        self.assertCountEqual(expected_metrics.keys(), actual_metrics.keys())
        for key, value in expected_metrics.items():
            self.assertAlmostEqual(value, actual_metrics[key], places=5)
Пример #2
0
    def test_quantization_debugger_layer_metrics(self, quantized_io):
        if quantized_io:
            debug_model = QuantizationDebuggerTest.debug_model_int8
        else:
            debug_model = QuantizationDebuggerTest.debug_model_float

        options = debugger.QuantizationDebugOptions(
            layer_debug_metrics={
                'l1_norm': lambda diffs: np.mean(np.abs(diffs))
            })
        quant_debugger = debugger.QuantizationDebugger(
            quant_debug_model_content=debug_model,
            debug_dataset=_calibration_gen,
            debug_options=options)
        quant_debugger.run()

        expected_metrics = {
            'num_elements': 9,
            'stddev': 0.03850026,
            'mean_error': 0.01673192,
            'max_abs_error': 0.10039272,
            'mean_squared_error': 0.0027558778,
            'l1_norm': 0.023704167,
        }
        self.assertLen(quant_debugger.layer_statistics, 1)
        actual_metrics = next(iter(quant_debugger.layer_statistics.values()))

        self.assertCountEqual(expected_metrics.keys(), actual_metrics.keys())
        for key, value in expected_metrics.items():
            self.assertAlmostEqual(value, actual_metrics[key], places=5)

        buffer = io.StringIO()
        quant_debugger.layer_statistics_dump(buffer)
        reader = csv.DictReader(buffer.getvalue().split())
        actual_values = next(iter(reader))

        expected_values = expected_metrics.copy()
        expected_values.update({
            'op_name': 'CONV_2D',
            'tensor_idx': 7 if quantized_io else 8,
            'scales': [0.15686275],
            'zero_points': [-128],
            'tensor_name': r'Identity[1-9]?$'
        })
        for key, value in expected_values.items():
            if isinstance(value, str):
                self.assertIsNotNone(
                    re.match(value, actual_values[key]),
                    'String is different from expected string. Please fix test code if'
                    " it's being affected by graph manipulation changes.")
            elif isinstance(value, list):
                self.assertAlmostEqual(value[0],
                                       float(actual_values[key][1:-1]),
                                       places=5)
            else:
                self.assertAlmostEqual(value,
                                       float(actual_values[key]),
                                       places=5)
  def test_quantization_debugger_non_debug_model_raises_ValueError(self):
    normal_quant_model = _quantize_model(
        QuantizationDebuggerTest.tf_model, _calibration_gen, debug=False)

    with self.assertRaisesRegex(
        ValueError, 'Please check if the quantized model is in debug mode'):
      debugger.QuantizationDebugger(
          quant_debug_model_content=normal_quant_model,
          debug_dataset=_calibration_gen)
Пример #4
0
    def test_quantization_debugger_wrong_input_raises_ValueError(self):
        def wrong_calibration_gen():
            for _ in range(5):
                yield [
                    np.ones((1, 3, 3, 1), dtype=np.float32),
                    np.ones((1, 3, 3, 1), dtype=np.float32)
                ]

        quant_debugger = debugger.QuantizationDebugger(
            quant_debug_model_content=QuantizationDebuggerTest.debug_model,
            debug_dataset=wrong_calibration_gen)
        with self.assertRaisesRegex(
                ValueError,
                r'inputs provided \(2\).+inputs to the model \(1\)'):
            quant_debugger.run()
Пример #5
0
    def test_quantization_debugger_model_metrics(self):
        options = debugger.QuantizationDebugOptions(
            model_debug_metrics={
                'stdev': lambda x, y: np.std(x[0] - y[0])
            })
        quant_debugger = debugger.QuantizationDebugger(
            quant_debug_model_content=QuantizationDebuggerTest.debug_model,
            float_model_content=QuantizationDebuggerTest.float_model,
            debug_dataset=_calibration_gen,
            debug_options=options)
        quant_debugger.run()

        expected_metrics = {'stdev': 0.050998904}
        actual_metrics = quant_debugger.model_statistics

        self.assertCountEqual(expected_metrics.keys(), actual_metrics.keys())
        for key, value in expected_metrics.items():
            self.assertAlmostEqual(value, actual_metrics[key], places=5)
Пример #6
0
 def test_quantization_debugger_creation_counter(self, increase_call):
     debug_model = QuantizationDebuggerTest.debug_model_float
     debugger.QuantizationDebugger(quant_debug_model_content=debug_model,
                                   debug_dataset=_calibration_gen)
     increase_call.assert_called_once()