Exemple #1
0
    def test_prune(self):
        test = '''
        model:
          name: prune_yaml 
          framework: pytorch

        device: cpu

        pruning:
          magnitude:
            prune1:
              weights: ['layer1.0.conv1.weight',  'layer1.0.conv2.weight']
              target_sparsity: 0.3
              end_epoch: 1
            prune2:
              weights: ['layer1.0.conv3.weight', 'layer1.0.conv4.weight']
              target_sparsity: 0.2
          start_epoch: 0
          end_epoch: 20
          frequency: 2
          init_sparsity: 0.0
          target_sparsity: 0.5
      '''
        helper(test)
        config = conf.Conf('fake_conf.yaml')
Exemple #2
0
 def test_inputs_outputs(self):
     test = '''
     model:
       name: inout_yaml 
       framework: mxnet
       inputs: x, y
     '''
     helper(test)
     config = conf.Conf('fake_conf.yaml')
     self.assertEqual(config.usr_cfg.model.inputs, ['x', 'y'])
Exemple #3
0
 def test_prune(self):
     test = '''
     model:
       name: imagenet_prune
       framework: pytorch
     
     pruning:
       train:
         start_epoch: 0
         end_epoch: 4
         dataloader:
           batch_size: 30
           dataset:
             ImageFolder:
               root: /path/to/training/dataset
         optimizer:
           SGD:
             learning_rate: 0.1
             momentum: 0.1   
             nesterov: True
             weight_decay: 0.1     
         criterion:
           CrossEntropyLoss:
             reduction: sum
       approach:
         weight_compression:
           initial_sparsity: 0.0
           target_sparsity: 0.97
           pruners:
             - !Pruner
                 start_epoch: 1
                 end_epoch: 3
                 names: ['layer1.0.conv1.weight']
     
             - !Pruner
                 start_epoch: 0
                 end_epoch: 4
                 target_sparsity: 0.6
                 update_frequency: 2
                 names: ['layer1.0.conv2.weight']
     '''
     helper(test)
     config = conf.Conf('fake_conf.yaml')
Exemple #4
0
    def test_modelwise_conf_merge(self):
        test = '''
        model:
          name: inout_yaml 
          framework: mxnet
        quantization:
          model_wise:
            weight:
              algorithm:  minmax
            activation:
              algorithm:  minmax
        '''
        helper(test)
        config = conf.Conf('fake_conf.yaml')

        framework_modelwise_capability = {
            'CONV2D': {
                'activation': {
                    'dtype': ['uint8', 'fp32'],
                    'scheme': ['asym', 'sym'],
                    'granularity': ['per_tensor'],
                    'algorithm': ['minmax', 'kl']
                },
                'weight': {
                    'dtype': ['int8', 'fp32'],
                    'scheme': [
                        'sym',
                    ],
                    'granularity': ['per_channel', 'per_tensor'],
                    'algorithm': ['minmax']
                },
            },
        }

        tune_space = config.modelwise_tune_space(
            framework_modelwise_capability)
        self.assertEqual(tune_space['CONV2D']['activation']['algorithm'],
                         ['minmax'])
Exemple #5
0
    def test_data_type(self):
        test = '''
        model:
          name: test
          framework: tensorflow

        quantization:
          calibration:
            sampling_size: 20
            dataloader:
              batch_size: 1
              dataset:
                dummy:
                  shape: [[224,224], [256,256]]
                  high: [128., 127]
                  low: 1
                  dtype: ['float32', 'int8']
        '''
        helper(test)
        cfg = conf.Conf('fake_conf.yaml').usr_cfg
        dataset = cfg['quantization']['calibration']['dataloader']['dataset']['dummy']
        self.assertTrue(isinstance(dataset['shape'][0], tuple))
        self.assertTrue(isinstance(dataset['shape'], list))
        self.assertTrue(isinstance(dataset['high'][1], float))
        self.assertTrue(isinstance(dataset['high'][0], float))
        self.assertTrue(isinstance(dataset['low'], float))

        test = '''
        model:
          name: test
          framework: tensorflow

        quantization:
          calibration:
            sampling_size: 20
            dataloader:
              batch_size: 1
              dataset:
                dummy:
                  shape: [224,224]
                  high: 128
                  low: 0.1
                  dtype: ['float32', 'int8']
        '''
        helper(test)
        cfg = conf.Conf('fake_conf.yaml').usr_cfg
        dataset = cfg['quantization']['calibration']['dataloader']['dataset']['dummy']
        self.assertTrue(isinstance(dataset['shape'], tuple))
        self.assertTrue(isinstance(dataset['high'], float)) 

        test = '''
        model:
          name: test
          framework: tensorflow

        quantization:
          calibration:
            sampling_size: 20
            dataloader:
              batch_size: 1
              dataset:
                style_transfer:
                  content_folder: test
                  style_folder: test
                  crop_ratio: 0.5
                  resize_shape: 10,10
              transform:
                RandomResizedCrop:
                  size: 10
                  scale: [0.07, 0.99]
                  ratio: [0.6, 0.8]
        '''
        helper(test)
        cfg = conf.Conf('fake_conf.yaml').usr_cfg
        shape_cfg = cfg['quantization']['calibration']['dataloader']['dataset']['style_transfer']['resize_shape']
        self.assertTrue(isinstance(shape_cfg, list)) 
        transform_cfg = cfg['quantization']['calibration']['dataloader']['transform']['RandomResizedCrop']
        self.assertTrue(isinstance(transform_cfg['scale'], list))
        self.assertTrue(isinstance(transform_cfg['ratio'], list))

        test = '''
        model:
          name: test
          framework: tensorflow

        quantization:
          calibration:
            sampling_size: 20
            dataloader:
              batch_size: 1
              dataset:
                style_transfer:
                  content_folder: test
                  style_folder: test
                  crop_ratio: 0.5
                  resize_shape: [10,10]
        '''
        helper(test)
        cfg = conf.Conf('fake_conf.yaml').usr_cfg
        shape_cfg = cfg['quantization']['calibration']['dataloader']['dataset']['style_transfer']['resize_shape']
        self.assertTrue(isinstance(shape_cfg, list)) 

        test = '''
        model:
          name: test
          framework: tensorflow

        quantization:
          calibration:
            sampling_size: 20
            dataloader:
              batch_size: 1
              dataset:
                dummy:
                  shape: [224,224]
              transform:
                BilinearImagenet:
                  height: 224
                  width: 224
                  mean_value: 123.68 116.78 103.94
        '''
        helper(test)
        cfg = conf.Conf('fake_conf.yaml').usr_cfg
        shape_cfg = cfg['quantization']['calibration']['dataloader']['dataset']['dummy']['shape']
        self.assertTrue(isinstance(shape_cfg, tuple)) 
        transform_cfg = cfg['quantization']['calibration']['dataloader']['transform']['BilinearImagenet']
        self.assertTrue(isinstance(transform_cfg['mean_value'], list))
Exemple #6
0
    def test_ops_override(self):
        test = '''
        model:
          name: ops_override_yaml 
          framework: mxnet
        quantization:
          op_wise: {
            'conv1': {
              'activation':  {'dtype': ['uint8', 'fp32'], 'algorithm': ['minmax'], 'scheme':['sym']},
              'weight': {'dtype': ['int8', 'fp32'], 'algorithm': ['kl']}
            },
            'conv2': {
              'activation':  {'dtype': ['fp32']},
              'weight': {'dtype': ['fp32']}
            }
          }
        tuning:
          accuracy_criterion:
            relative: 0.01
          objective: performance
          
        '''
        helper(test)
        config = conf.Conf('fake_conf.yaml')

        framework_modelwise_capability = {
            'CONV2D': {
                'activation': {
                    'dtype': ['uint8', 'fp32'],
                    'scheme': ['asym', 'sym'],
                    'granularity': ['per_tensor'],
                    'algorithm': ['minmax', 'kl']
                },
                'weight': {
                    'dtype': ['int8', 'fp32'],
                    'scheme': [
                        'sym',
                    ],
                    'granularity': ['per_channel', 'per_tensor'],
                    'algorithm': ['minmax']
                },
            },
        }

        config.modelwise_tune_space(framework_modelwise_capability)

        framework_opwise_capability = {
            ('conv1', 'CONV2D'): {
                'activation': {
                    'dtype': ['uint8', 'fp32'],
                    'scheme': ['asym', 'sym'],
                    'granularity': ['per_tensor'],
                    'algorithm': ['minmax', 'kl']
                },
                'weight': {
                    'dtype': ['int8', 'fp32'],
                    'scheme': [
                        'sym',
                    ],
                    'granularity': ['per_channel', 'per_tensor'],
                    'algorithm': ['minmax']
                }},
            ('conv2', 'CONV2D'): {
                'activation': {
                    'dtype': ['uint8', 'fp32'],
                    'scheme': ['asym', 'sym'],
                    'granularity': ['per_tensor'],
                    'algorithm': ['minmax', 'kl']
                },
                'weight': {
                    'dtype': ['int8', 'fp32'],
                    'scheme': [
                        'sym',
                    ],
                    'granularity': ['per_channel', 'per_tensor'],
                    'algorithm': ['minmax']
                }},
        }

        tune_space = config.opwise_tune_space(framework_opwise_capability)
        self.assertEqual(tune_space[('conv1', 'CONV2D')]['weight']['algorithm'], ['minmax'])
        self.assertEqual(tune_space[('conv2', 'CONV2D')]['activation']['dtype'], ['fp32'])
Exemple #7
0
    def test_quantization(self):
        test = '''
        model:
          name: quant_yaml 
          framework: mxnet
        quantization:
          model_wise:
            weights:
            granularity: per_channel
        '''
        helper(test)
        self.assertRaises(RuntimeError, conf.Conf, 'fake_conf.yaml')

        test = '''
        model:
          name: quant_yaml 
          framework: mxnet
        quantization:
          model_wise:
          approach:
        '''
        helper(test)
        self.assertRaises(RuntimeError, conf.Conf, 'fake_conf.yaml')

        test = '''
        model:
          name: quant_yaml 
          framework: mxnet
        quantization:
          approach: post_training_static_quant, quant_aware_training
        '''
        helper(test)
        self.assertRaises(RuntimeError, conf.Conf, 'fake_conf.yaml')

        test = '''
        model:
          name: quant_yaml 
          framework: mxnet
        quantization:
          model_wise:
            activation:
              scheme: asym
              dtype: int8
            weight:
              scheme: asym
              dtype: int8
        '''
        helper(test)
        conf.Conf('fake_conf.yaml')

        test = '''
        model:
          name: quant_yaml 
          framework: mxnet
        quantization:
          model_wise:
            activation:
              scheme:
              dtype: int8
            weight:
              scheme: asym
              dtype: int8
        '''
        helper(test)
        self.assertRaises(RuntimeError, conf.Conf, 'fake_conf.yaml')