def test_parse_config_file_model_outputs(self): """ Test that model output quantization parameters are set correctly when using json config file """ model = SingleResidual() model.eval() quantsim_config = { "defaults": { "ops": {}, "params": {} }, "params": {}, "op_type": {}, "supergroups": [], "model_input": {}, "model_output": { "is_output_quantized": "True" } } with open('./data/quantsim_config.json', 'w') as f: json.dump(quantsim_config, f) sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, config_file='./data/quantsim_config.json', dummy_input=torch.rand(1, 3, 32, 32)) for name, module in sim.model.named_modules(): if isinstance(module, QcQuantizeWrapper): if name == 'fc': # model.conv3 and model.ada are inputs to add assert module.output_quantizers[0].enabled else: assert not module.output_quantizers[0].enabled assert not module.input_quantizer.enabled if os.path.exists('./data/quantsim_config.json'): os.remove('./data/quantsim_config.json')
def test_get_all_ops_in_neighborhood(self): """ Test that default quantization parameters are set correctly when using json config file """ model = SingleResidual() model.eval() input_shapes = (1, 3, 32, 32) random_inputs = utils.create_rand_tensors_given_shapes(input_shapes) conn_graph = ConnectedGraph(model, random_inputs) starting_op = conn_graph.get_all_ops()['convolution_7'] add_10_op = conn_graph.get_all_ops()['add_10'] adaptive_avg_pool2d_9_op = conn_graph.get_all_ops()['adaptive_avg_pool2d_9'] neighborhood = _get_all_ops_in_neighborhood(starting_op, 'output') assert len(neighborhood) == 3 assert starting_op in neighborhood assert add_10_op in neighborhood assert adaptive_avg_pool2d_9_op in neighborhood
def test_parse_config_file_defaults(self): """ Test that default quantization parameters are set correctly when using json config file """ model = SingleResidual() model.eval() quantsim_config = { "defaults": { "ops": { "is_output_quantized": "True", "is_symmetric": "False" }, "params": { "is_quantized": "False", "is_symmetric": "True" } }, "params": {}, "op_type": {}, "supergroups": [], "model_input": {}, "model_output": {} } with open('./data/quantsim_config.json', 'w') as f: json.dump(quantsim_config, f) sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, config_file='./data/quantsim_config.json', dummy_input=torch.rand(1, 3, 32, 32), in_place=True) for name, module in sim.model.named_modules(): if isinstance(module, QcQuantizeWrapper): # Output of add op is input quantized if name == 'relu3': assert module.input_quantizer.enabled else: assert not module.input_quantizer.enabled assert module.output_quantizers[0].enabled assert not module.input_quantizer.use_symmetric_encodings assert not module.output_quantizers[0].use_symmetric_encodings if module.param_quantizers: for _, param_quantizer in module.param_quantizers.items(): assert not param_quantizer.enabled assert param_quantizer.use_symmetric_encodings if os.path.exists('./data/quantsim_config.json'): os.remove('./data/quantsim_config.json')
def test_find_num_inout_map(self): """ Test functionality to find cardinality of the inputs, outputs for each leaf module """ model = SingleResidual() inout_map = utils.find_num_inout_tensors_per_module( model, [torch.rand(1, 3, 32, 32)]) inout_counts_check = [ num_outputs == (1, 1) for num_outputs in inout_map.values() ] self.assertTrue(all(inout_counts_check)) # Create a model with a layer with multi-outputs class MyLayer(torch.nn.Module): def __init__(self): super(MyLayer, self).__init__() def forward(self, inputs): return inputs * 100, inputs + 100 class MyModel(torch.nn.Module): def __init__(self): super(MyModel, self).__init__() self.conv1 = torch.nn.Conv2d(3, 32, 3) self.relu1 = torch.nn.ReLU() self.layer1 = MyLayer() self.conv2 = torch.nn.Conv2d(32, 32, 3) self.conv3 = torch.nn.Conv2d(32, 32, 3) self.add = elementwise_ops.Add() def forward(self, x): x = self.conv1(x) x = self.relu1(x) x1, x2 = self.layer1(x) x1 = self.conv2(x1) x2 = self.conv2(x2) x = self.add(x1, x2) return x model = MyModel() inout_map = utils.find_num_inout_tensors_per_module( model, [torch.rand(1, 3, 32, 32)]) inout_counts_check = [ num_outputs == (1, 1) for num_outputs in inout_map.values() ] self.assertFalse(all(inout_counts_check)) self.assertEqual(2, inout_counts_check.count(False)) self.assertEqual((1, 2), inout_map[model.layer1]) self.assertEqual((2, 1), inout_map[model.add])
def test_supergroups_with_elementwise_add(self): """ Test that supergroup quantization parameters are set correctly when using json config file """ model = SingleResidual() model.eval() quantsim_config = { "defaults": { "ops": { "is_output_quantized": "True" }, "params": {} }, "params": {}, "op_type": {}, "supergroups": [ { "op_list": ["Add", "Relu"] } ], "model_input": {}, "model_output": {} } with open('./data/quantsim_config.json', 'w') as f: json.dump(quantsim_config, f) # Use in_place=True here for easy access to modules through model instance variables sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, config_file='./data/quantsim_config.json', in_place=True, dummy_input=torch.rand(1, 3, 32, 32)) for _, module in sim.model.named_modules(): if isinstance(module, QcQuantizeWrapper): # Check configs for starts of supergroups if module == model.relu3: # If add were not part of the supergroup, relu's input quantizer would be enabled assert not module.input_quantizer.enabled if os.path.exists('./data/quantsim_config.json'): os.remove('./data/quantsim_config.json')