コード例 #1
0
ファイル: test_quantization.py プロジェクト: zhuyglx/pytorch
    def test_manual(self):
        model = ManualLinearQATModel()
        model.qconfig = default_qat_qconfig

        model = prepare_qat(model)
        self.checkObservers(model)

        test_only_train_fn(model, self.train_data)
        convert(model)

        def checkQuantized(model):
            self.assertEqual(type(model.fc1), nnq.Linear)
            self.assertEqual(type(model.fc2), nnq.Linear)
            test_only_eval_fn(model, self.calib_data)

        model = ManualLinearQATModel()
        model.qconfig = default_qat_qconfig
        model = quantize_qat(model, test_only_train_fn, self.train_data)
        checkQuantized(model)
コード例 #2
0
ファイル: test_quantization.py プロジェクト: zhuyglx/pytorch
    def test_eval_only_fake_quant(self):
        r"""Using FakeQuant in evaluation only mode,
        this is useful for estimating accuracy loss when we quantize the
        network
        """
        model = ManualLinearQATModel()
        model.qconfig = default_qat_qconfig

        model = prepare_qat(model)
        self.checkObservers(model)

        model.eval()
        test_only_eval_fn(model, self.calib_data)