Exemplo n.º 1
0
    def test_single_layer(self):
        r"""Quantize SingleLayerLinearModel which has one Linear module, make sure it is swapped
        to nnq.Linear which is the quantized version of the module
        """
        model = SingleLayerLinearModel()
        prepare(model)
        # Check if observers and quant/dequant nodes are inserted
        self.checkNoPrepModules(model)
        self.checkHasPrepModules(model.fc1)
        self.checkObservers(model)

        test_only_eval_fn(model, self.calib_data)
        convert(model)

        def checkQuantized(model):
            self.checkNoPrepModules(model)
            self.checkHasPrepModules(model.fc1)
            self.checkWrappedQuantizedLinear(model.fc1)
            test_only_eval_fn(model, self.calib_data)
            self.checkScriptable(model, self.calib_data)

        checkQuantized(model)

        # test one line API
        model = quantize(SingleLayerLinearModel(), test_only_eval_fn,
                         self.calib_data)
        checkQuantized(model)
Exemplo n.º 2
0
    def test_single_layer(self):
        r"""Quantize SingleLayerLinearModel which has one Linear module, make sure it is swapped
        to nnq.Linear which is the quantized version of the module
        """
        model = SingleLayerLinearModel()
        qconfig_dict = {'': default_qconfig}
        model = prepare(model, qconfig_dict)
        # Check if observers and quant/dequant nodes are inserted
        self.checkNoPrepModules(model)
        self.checkHasPrepModules(model.fc1)
        self.checkObservers(model)

        default_eval_fn(model, calib_data)
        convert(model)

        def checkQuantized(model):
            self.checkNoPrepModules(model)
            self.checkHasPrepModules(model.fc1)
            self.checkQuantizedLinear(model.fc1)
            default_eval_fn(model, calib_data)

        checkQuantized(model)

        # test one line API
        model = quantize(SingleLayerLinearModel(), default_eval_fn, calib_data,
                         qconfig_dict)
        checkQuantized(model)
Exemplo n.º 3
0
    def test_record_observer(self):
        model = SingleLayerLinearModel()
        model.qconfig = default_debug_qconfig
        prepare(model)
        # run the evaluation and dump all tensors
        test_only_eval_fn(model, self.calib_data)
        test_only_eval_fn(model, self.calib_data)
        observer_dict = {}
        get_observer_dict(model, observer_dict)

        self.assertTrue('fc1.module.observer' in observer_dict.keys(),
                        'observer is not recorded in the dict')
        self.assertEqual(len(observer_dict['fc1.module.observer'].get_tensor_value()), 2 * len(self.calib_data))
        self.assertEqual(observer_dict['fc1.module.observer'].get_tensor_value()[0], model(self.calib_data[0][0]))
Exemplo n.º 4
0
    def test_tensor_observer(self):
        model = SingleLayerLinearModel()
        model.qconfig = default_debug_qconfig
        prepare(model)
        # run the evaluation and dump all tensors
        test_only_eval_fn(model, self.calib_data)
        test_only_eval_fn(model, self.calib_data)
        tensor_dict = {}
        dump_tensor(model, tensor_dict)

        # we can torch,save() and torch_load() in bento for further analysis
        self.assertTrue('fc1.module.activation' in tensor_dict.keys(),
                        'activation is not recorded in the dict')
        self.assertEqual(len(tensor_dict['fc1.module.activation']), 2 * len(self.calib_data))
Exemplo n.º 5
0
    def test_single_layer(self):
        r"""Compare the result of quantizing single linear layer in
        eager mode and graph mode
        """
        # eager mode
        annotated_linear_model = AnnotatedSingleLayerLinearModel()
        linear_model = SingleLayerLinearModel()
        # copy the weight from eager mode so that we can
        # compare the result of the two quantized models later
        linear_model.fc1.weight = torch.nn.Parameter(annotated_linear_model.fc1.module.weight.detach())
        linear_model.fc1.bias = torch.nn.Parameter(annotated_linear_model.fc1.module.bias.detach())
        model_eager = quantize(annotated_linear_model, test_only_eval_fn,
                               self.calib_data)

        qconfig_dict = {
            '': QConfig(
                activation=default_observer,
                weight=default_weight_observer)
        }
        model_script = quantize_script(
            torch.jit.script(linear_model),
            qconfig_dict,
            test_only_eval_fn,
            [self.calib_data],
            inplace=False)
        result_eager = model_eager(self.calib_data[0][0])
        torch._C._jit_pass_quant_fusion(model_script._c._get_module('fc1')._get_method('forward').graph)
        result_script = model_script._c._get_method('forward')(self.calib_data[0][0])
        self.assertEqual(result_eager, result_script)
Exemplo n.º 6
0
    def test_single_layer(self):
        r"""Quantize SingleLayerLinearModel which has one Linear module, make sure it is swapped
        to nnq.Linear which is the quantized version of the module
        """
        # eager mode
        model_eager = quantize(AnnotatedSingleLayerLinearModel(),
                               test_only_eval_fn, self.calib_data)

        qconfig_dict = {
            '':
            QConfig(activation=default_observer,
                    weight=default_weight_observer)
        }
        model_script = quantize_script(
            torch.jit.script(SingleLayerLinearModel()), qconfig_dict,
            test_only_eval_fn, [self.calib_data])
        result_eager = model_eager(self.calib_data[0][0])
        result_script = model_script._c._get_method('forward')(
            self.calib_data[0][0])
        self.assertEqual(result_eager, result_script)
Exemplo n.º 7
0
    def test_single_layer(self):
        r"""Quantize SingleLayerLinearModel which has one Linear module, make sure it is swapped
        to nnq.Linear which is the quantized version of the module
        """
        model = SingleLayerLinearModel()
        model = prepare(model)
        # Check if observers and quant/dequant nodes are inserted
        self.checkNoPrepModules(model)
        self.checkHasPrepModules(model.fc1)
        self.checkObservers(model)

        test_only_eval_fn(model, self.calib_data)
        model = convert(model)

        def checkQuantized(model):
            self.checkNoPrepModules(model)
            self.checkHasPrepModules(model.fc1)
            self.checkWrappedQuantizedLinear(model.fc1)
            test_only_eval_fn(model, self.calib_data)
            self.checkScriptable(model, self.calib_data)

        checkQuantized(model)

        # test one line API - out of place version
        base = SingleLayerLinearModel()
        keys_before = set(list(base.state_dict().keys()))
        model = quantize(base, test_only_eval_fn, self.calib_data)
        checkQuantized(model)
        keys_after = set(list(base.state_dict().keys()))
        self.assertEqual(keys_before,
                         keys_after)  # simple check that nothing changed

        # in-place version
        model = SingleLayerLinearModel()
        quantize(model, test_only_eval_fn, self.calib_data, inplace=True)
        checkQuantized(model)