Exemple #1
0
 def test_torch_naive_quantizer(self):
     model = TorchModel()
     configure_list = [{
         'quant_types': ['weight'],
         'quant_bits': {
             'weight': 8,
         },
         'op_types': ['Conv2d', 'Linear']
     }]
     torch_compressor.NaiveQuantizer(model, configure_list).compress()
Exemple #2
0
 def test_torch_quantizer(self):
     model = TorchMnist()
     torch_compressor.NaiveQuantizer([{
         'op_types': ['default']
     }]).compress(model)