Esempio n. 1
0
    def test_get_ordered_ops_with_single_residual(self):
        """
        test get_op with simple single residual model
        """
        g = tf.Graph()

        with g.as_default():
            single_residual()

        ordered_ops = get_ordered_ops(g, ['input_1'], ['single_residual/Softmax'])

        self.assertTrue(ordered_ops.index(g.get_operation_by_name('conv2d_4/Conv2D')) >
                        ordered_ops.index(g.get_operation_by_name('conv2d_1/Conv2D')))
    def test_single_residual_get_op_product_graph(self):
        """ Test connected graph construction on single residual model """

        tf.compat.v1.reset_default_graph()
        _ = single_residual()
        conn_graph = ConnectedGraph(tf.compat.v1.get_default_graph(), ['input_1'], ['single_residual/Softmax'])
        self.assertTrue(validate_branch_ops(conn_graph))
        self.assertTrue(validate_product_tensor_lists(conn_graph))
        self.assertEqual(1, conn_graph.branch_count)
        self.assertEqual(18, len(conn_graph.get_all_ops()))
        # 17 products from interop connections, 20 from parameters
        self.assertEqual(37, len(conn_graph.get_all_products()))
    def test_connected_graph_with_detached_ops(self):
        """ Test connected graph construction on a graph with detached ops """
        tf.compat.v1.reset_default_graph()
        _ = single_residual()

        # Detach everything starting from conv2d_4/Conv2D and below
        detach_inputs(tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_4/Conv2D'))
        conn_graph = ConnectedGraph(tf.compat.v1.get_default_graph(), ['input_1'], ['Relu_2'])
        self.assertTrue(validate_branch_ops(conn_graph))
        self.assertTrue(validate_product_tensor_lists(conn_graph))
        self.assertEqual(1, conn_graph.branch_count)
        self.assertEqual(13, len(conn_graph.get_all_ops()))
        # 12 products from interop connections, 16 from parameters
        self.assertEqual(28, len(conn_graph.get_all_products()))
    def test_empty_config_file(self):
        """ Check that with an empty config file, all op modes and use symmetric encoding settings are set to
        passThrough and False respectively. """
        tf.compat.v1.reset_default_graph()
        sess = tf.compat.v1.Session()
        with sess.graph.as_default():
            _ = single_residual()
            init = tf.compat.v1.global_variables_initializer()
            sess.run(init)

        quantsim_config = {
            "defaults": {
                "ops": {},
                "params": {}
            },
            "params": {},
            "op_type": {},
            "supergroups": [],
            "model_input": {},
            "model_output": {}
        }
        with open('./quantsim_config.json', 'w') as f:
            json.dump(quantsim_config, f)

        sim = QuantizationSimModel(sess, ['input_1'],
                                   ['single_residual/Softmax'],
                                   config_file='./quantsim_config.json')
        all_quantize_ops = [
            op for op in sim.session.graph.get_operations()
            if op.type == 'QcQuantize'
        ]
        self.assertTrue(all_quantize_ops is not None)
        for op in all_quantize_ops:
            is_symmetric_tensor = sim.session.graph.get_tensor_by_name(
                op.name + '_use_symmetric_encoding:0')
            op_mode_tensor = sim.session.graph.get_tensor_by_name(op.name +
                                                                  '_op_mode:0')
            self.assertEqual(sim.session.run(is_symmetric_tensor), False)
            self.assertEqual(sim.session.run(op_mode_tensor),
                             int(pymo.TensorQuantizerOpMode.passThrough))
        if os.path.exists('./quantsim_config.json'):
            os.remove('./quantsim_config.json')
        sess.close()
        sim.session.close()
        tf.compat.v1.reset_default_graph()
Esempio n. 5
0
    def test_get_ordered_operations(self):
        """ Test the creation of the ordered operations list """
        tf.compat.v1.reset_default_graph()
        sess = tf.compat.v1.Session()
        with sess.graph.as_default():
            _ = single_residual()
            conn_graph = ConnectedGraph(sess.graph, ["input_1"], ['Relu_2'])
            ordered_ops = get_ordered_ops(conn_graph.starting_ops)

        # check that there are the same number of modules in the ordered ops list as there are in the main ops dict
        self.assertEqual(len(ordered_ops), len(conn_graph.get_all_ops()))

        # check that for any module in the ordered ops list, all of its parent modules are earlier in the list
        seen_ops = set()
        for op in ordered_ops:
            input_products = op.get_input_products()
            for product in input_products:
                self.assertTrue(product.producer in seen_ops)
            seen_ops.add(op)
    def test_parse_config_file_model_outputs(self):
        """ Test that model output quantization parameters are set correctly when using json config file """
        tf.compat.v1.reset_default_graph()
        sess = tf.compat.v1.Session()
        with sess.graph.as_default():
            _ = single_residual()
            init = tf.compat.v1.global_variables_initializer()
            sess.run(init)

        quantsim_config = {
            "defaults": {
                "ops": {},
                "params": {}
            },
            "params": {},
            "op_type": {},
            "supergroups": [],
            "model_input": {},
            "model_output": {
                "is_output_quantized": "True"
            }
        }
        with open('./quantsim_config.json', 'w') as f:
            json.dump(quantsim_config, f)

        sim = QuantizationSimModel(sess, ['input_1'],
                                   ['single_residual/Softmax'],
                                   config_file='./quantsim_config.json')

        op_mode_tensor = sim.session.graph.get_tensor_by_name(
            'single_residual/Softmax_quantized_op_mode:0')
        self.assertEqual(sim.session.run(op_mode_tensor),
                         int(pymo.TensorQuantizerOpMode.updateStats))

        if os.path.exists('./quantsim_config.json'):
            os.remove('./quantsim_config.json')
        sess.close()
        sim.session.close()
        tf.compat.v1.reset_default_graph()
    def test_parse_config_file_supergroups(self):
        """ Test that supergroup quantization parameters are set correctly when using json config file """
        tf.compat.v1.reset_default_graph()
        sess = tf.compat.v1.Session()
        with sess.graph.as_default():
            _ = single_residual()
            init = tf.compat.v1.global_variables_initializer()
            sess.run(init)

        quantsim_config = {
            "defaults": {
                "ops": {
                    "is_output_quantized": "True"
                },
                "params": {}
            },
            "params": {},
            "op_type": {},
            "supergroups": [{
                "op_list": ["Conv", "AveragePool"]
            }, {
                "op_list": ["Add", "Relu"]
            }, {
                "op_list": ["Conv", "BatchNormalization"]
            }],
            "model_input": {},
            "model_output": {}
        }
        with open('./quantsim_config.json', 'w') as f:
            json.dump(quantsim_config, f)
        sim = QuantizationSimModel(sess, ['input_1'],
                                   ['single_residual/Softmax'],
                                   config_file='./quantsim_config.json')

        activation_quantizers = [
            'conv2d/BiasAdd_quantized', 'conv2d_1/BiasAdd_quantized',
            'conv2d_2/BiasAdd_quantized', 'conv2d_3/BiasAdd_quantized',
            'conv2d_4/BiasAdd_quantized', 'input_1_quantized',
            'batch_normalization/cond/Merge_quantized', 'Relu_quantized',
            'max_pooling2d/MaxPool_quantized',
            'batch_normalization_1/cond/Merge_quantized', 'Add_quantized',
            'Relu_2_quantized', 'average_pooling2d/AvgPool_quantized',
            'single_residual/Softmax_quantized', 'Relu_1_quantized'
        ]

        for activation_quantizer in activation_quantizers:
            op_mode_tensor = sim.session.graph.get_tensor_by_name(
                activation_quantizer + '_op_mode:0')
            if activation_quantizer in [
                    'input_1_quantized', 'conv2d/BiasAdd_quantized',
                    'conv2d_3/BiasAdd_quantized', 'Add_quantized',
                    'conv2d_4/BiasAdd_quantized'
            ]:
                self.assertEqual(sim.session.run(op_mode_tensor),
                                 int(pymo.TensorQuantizerOpMode.passThrough))
            else:
                self.assertEqual(sim.session.run(op_mode_tensor),
                                 int(pymo.TensorQuantizerOpMode.updateStats))

        if os.path.exists('./quantsim_config.json'):
            os.remove('./quantsim_config.json')
        sess.close()
        sim.session.close()
        tf.compat.v1.reset_default_graph()
    def test_parse_config_file_op_type(self):
        """ Test that op specific quantization parameters are set correctly when using json config file """
        tf.compat.v1.reset_default_graph()
        sess = tf.compat.v1.Session()
        with sess.graph.as_default():
            _ = single_residual()
            init = tf.compat.v1.global_variables_initializer()
            sess.run(init)

        quantsim_config = {
            "defaults": {
                "ops": {},
                "params": {}
            },
            "params": {},
            "op_type": {
                "Conv": {
                    "is_input_quantized": "True",
                    "params": {
                        "bias": {
                            "is_quantized": "True",
                            "is_symmetric": "True"
                        }
                    }
                },
                "Gemm": {
                    "is_input_quantized": "True",
                    "params": {
                        "bias": {
                            "is_quantized": "True",
                            "is_symmetric": "True"
                        }
                    }
                },
                "BatchNormalization": {
                    "is_input_quantized": "True"
                }
            },
            "supergroups": [],
            "model_input": {},
            "model_output": {}
        }
        with open('./quantsim_config.json', 'w') as f:
            json.dump(quantsim_config, f)
        sim = QuantizationSimModel(sess, ['input_1'],
                                   ['single_residual/Softmax'],
                                   config_file='./quantsim_config.json')

        activation_quantizers = [
            'conv2d/BiasAdd_quantized', 'conv2d_1/BiasAdd_quantized',
            'conv2d_2/BiasAdd_quantized', 'conv2d_3/BiasAdd_quantized',
            'conv2d_4/BiasAdd_quantized', 'input_1_quantized',
            'batch_normalization/cond/Merge_quantized', 'Relu_quantized',
            'max_pooling2d/MaxPool_quantized',
            'batch_normalization_1/cond/Merge_quantized', 'Add_quantized',
            'Relu_2_quantized', 'average_pooling2d/AvgPool_quantized',
            'single_residual/Softmax_quantized', 'Relu_1_quantized'
        ]

        weight_quantizers = [
            'conv2d/Conv2D/ReadVariableOp_quantized',
            'conv2d_1/Conv2D/ReadVariableOp_quantized',
            'conv2d_2/Conv2D/ReadVariableOp_quantized',
            'conv2d_3/Conv2D/ReadVariableOp_quantized',
            'conv2d_4/Conv2D/ReadVariableOp_quantized',
            'single_residual/MatMul/ReadVariableOp_quantized',
            'conv2d/BiasAdd/ReadVariableOp_quantized',
            'conv2d_1/BiasAdd/ReadVariableOp_quantized',
            'conv2d_2/BiasAdd/ReadVariableOp_quantized',
            'conv2d_3/BiasAdd/ReadVariableOp_quantized',
            'conv2d_4/BiasAdd/ReadVariableOp_quantized',
            'single_residual/BiasAdd/ReadVariableOp_quantized'
        ]

        for activation_quantizer in activation_quantizers:
            op_mode_tensor = sim.session.graph.get_tensor_by_name(
                activation_quantizer + '_op_mode:0')
            if activation_quantizer in [
                    'input_1_quantized', 'conv2d/BiasAdd_quantized',
                    'max_pooling2d/MaxPool_quantized',
                    'conv2d_2/BiasAdd_quantized', 'conv2d_3/BiasAdd_quantized',
                    'Relu_2_quantized', 'average_pooling2d/AvgPool_quantized'
            ]:
                self.assertEqual(sim.session.run(op_mode_tensor),
                                 int(pymo.TensorQuantizerOpMode.updateStats))
            else:
                self.assertEqual(sim.session.run(op_mode_tensor),
                                 int(pymo.TensorQuantizerOpMode.passThrough))
        for weight_quantizer in weight_quantizers:
            is_symmetric_tensor = sim.session.graph.get_tensor_by_name(
                weight_quantizer + '_use_symmetric_encoding:0')
            op_mode_tensor = sim.session.graph.get_tensor_by_name(
                weight_quantizer + '_op_mode:0')
            if weight_quantizer in [
                    'conv2d/BiasAdd/ReadVariableOp_quantized',
                    'conv2d_1/BiasAdd/ReadVariableOp_quantized',
                    'conv2d_2/BiasAdd/ReadVariableOp_quantized',
                    'conv2d_3/BiasAdd/ReadVariableOp_quantized',
                    'conv2d_4/BiasAdd/ReadVariableOp_quantized',
                    'single_residual/BiasAdd/ReadVariableOp_quantized'
            ]:
                self.assertEqual(
                    sim.session.run(op_mode_tensor),
                    int(pymo.TensorQuantizerOpMode.oneShotQuantizeDequantize))
                self.assertEqual(sim.session.run(is_symmetric_tensor), True)
            else:

                self.assertEqual(sim.session.run(op_mode_tensor),
                                 int(pymo.TensorQuantizerOpMode.passThrough))
                self.assertEqual(sim.session.run(is_symmetric_tensor), False)

        if os.path.exists('./quantsim_config.json'):
            os.remove('./quantsim_config.json')
        sess.close()
        sim.session.close()
        tf.compat.v1.reset_default_graph()
    def test_parse_config_file_params(self):
        """ Test that param specific quantization parameters are set correctly when using json config file """
        tf.compat.v1.reset_default_graph()
        sess = tf.compat.v1.Session()
        with sess.graph.as_default():
            _ = single_residual()
            init = tf.compat.v1.global_variables_initializer()
            sess.run(init)

        quantsim_config = {
            "defaults": {
                "ops": {},
                "params": {
                    "is_quantized": "False",
                    "is_symmetric": "True"
                }
            },
            "params": {
                "weight": {
                    "is_quantized": "True",
                    "is_symmetric": "False"
                }
            },
            "op_type": {},
            "supergroups": [],
            "model_input": {},
            "model_output": {}
        }
        with open('./quantsim_config.json', 'w') as f:
            json.dump(quantsim_config, f)
        sim = QuantizationSimModel(sess, ['input_1'],
                                   ['single_residual/Softmax'],
                                   config_file='./quantsim_config.json')

        weight_quantizers = [
            'conv2d/Conv2D/ReadVariableOp_quantized',
            'conv2d_1/Conv2D/ReadVariableOp_quantized',
            'conv2d_2/Conv2D/ReadVariableOp_quantized',
            'conv2d_3/Conv2D/ReadVariableOp_quantized',
            'conv2d_4/Conv2D/ReadVariableOp_quantized',
            'single_residual/MatMul/ReadVariableOp_quantized'
        ]

        bias_quantizers = [
            'conv2d/BiasAdd/ReadVariableOp_quantized',
            'conv2d_1/BiasAdd/ReadVariableOp_quantized',
            'conv2d_2/BiasAdd/ReadVariableOp_quantized',
            'conv2d_3/BiasAdd/ReadVariableOp_quantized',
            'conv2d_4/BiasAdd/ReadVariableOp_quantized',
            'single_residual/BiasAdd/ReadVariableOp_quantized'
        ]

        for param_quantizer in weight_quantizers:
            is_symmetric_tensor = sim.session.graph.get_tensor_by_name(
                param_quantizer + '_use_symmetric_encoding:0')
            op_mode_tensor = sim.session.graph.get_tensor_by_name(
                param_quantizer + '_op_mode:0')
            self.assertEqual(
                sim.session.run(op_mode_tensor),
                int(pymo.TensorQuantizerOpMode.oneShotQuantizeDequantize))
            self.assertEqual(sim.session.run(is_symmetric_tensor), False)
        for param_quantizer in bias_quantizers:
            is_symmetric_tensor = sim.session.graph.get_tensor_by_name(
                param_quantizer + '_use_symmetric_encoding:0')
            op_mode_tensor = sim.session.graph.get_tensor_by_name(
                param_quantizer + '_op_mode:0')
            self.assertEqual(sim.session.run(op_mode_tensor),
                             int(pymo.TensorQuantizerOpMode.passThrough))
            self.assertEqual(sim.session.run(is_symmetric_tensor), True)

        sess.close()
        sim.session.close()
        tf.compat.v1.reset_default_graph()
Esempio n. 10
0
    def test_calculate_channel_pruning_cost_two_layers(self):
        """
        test compressed model cost using two layers
        :return:
        """
        config = tf.compat.v1.ConfigProto()
        config.gpu_options.allow_growth = True

        # create session with graph
        sess = tf.compat.v1.Session(graph=tf.Graph(), config=config)

        with sess.graph.as_default():
            # model will be constructed in default graph
            test_models.single_residual()
            init = tf.compat.v1.global_variables_initializer()

        # initialize the weights and biases with appropriate initializer
        sess.run(init)

        meta_path = str('./temp_working_dir/')

        if not os.path.exists(meta_path):
            os.mkdir(meta_path)

        layer_db = LayerDatabase(model=sess,
                                 input_shape=None,
                                 working_dir=meta_path)

        # Create a list of tuples of (layer, comp_ratio)
        layer_ratio_list = []

        layer_names = ['conv2d_2/Conv2D', 'conv2d_3/Conv2D']
        for layer in layer_db:
            if layer.module.name in layer_names:
                layer_ratio_list.append(LayerCompRatioPair(layer, 0.5))
            else:
                layer_ratio_list.append(LayerCompRatioPair(layer, None))

        input_op_names = ['input_1']
        output_op_names = ['single_residual/Softmax']
        data_set = unittest.mock.MagicMock()
        batch_size = unittest.mock.MagicMock()
        num_reconstruction_samples = unittest.mock.MagicMock()

        pruner = InputChannelPruner(
            input_op_names=input_op_names,
            output_op_names=output_op_names,
            data_set=data_set,
            batch_size=batch_size,
            num_reconstruction_samples=num_reconstruction_samples,
            allow_custom_downsample_ops=True)

        cost_calculator = ChannelPruningCostCalculator(pruner)

        compressed_cost = cost_calculator.calculate_compressed_cost(
            layer_db, layer_ratio_list, CostMetric.mac)

        self.assertEqual(108544, compressed_cost.mac)
        self.assertEqual(1264, compressed_cost.memory)

        # delete the meta and the checkpoint files
        shutil.rmtree(meta_path)

        layer_db.model.close()
Esempio n. 11
0
    def test_create_input_feed_dict(self):
        """
        test create_input_feed_dict
        """

        # 1) input_batch_data numpy array
        g = tf.Graph()
        with g.as_default():
            _ = single_residual()

        input_data = np.random.rand(1, 16, 16, 3)
        feed_dict = create_input_feed_dict(graph=g, input_op_names_list=['input_1'], input_data=input_data)
        self.assertEqual(feed_dict[g.get_tensor_by_name('input_1:0')].shape, input_data.shape)

        tf.compat.v1.reset_default_graph()

        # 2) input_batch_data List of numpy array
        g = tf.Graph()
        with g.as_default():
            multiple_input_model()

        input_data = list()
        input_data.append(np.random.rand(10, 10, 3))
        input_data.append(np.random.rand(12, 12, 3))
        feed_dict = create_input_feed_dict(graph=g, input_op_names_list=['input1', 'input2'],
                                           input_data=input_data)

        self.assertEqual(feed_dict[g.get_tensor_by_name('input1:0')].shape, input_data[0].shape)
        self.assertEqual(feed_dict[g.get_tensor_by_name('input2:0')].shape, input_data[1].shape)

        tf.compat.v1.reset_default_graph()

        # 3) input_batch_data Tuple of numpy array
        g = tf.Graph()
        with g.as_default():
            multiple_input_model()

        input_data = (np.random.rand(10, 10, 3), np.random.rand(12, 12, 3))

        feed_dict = create_input_feed_dict(graph=g, input_op_names_list=['input1', 'input2'],
                                           input_data=input_data)

        self.assertEqual(feed_dict[g.get_tensor_by_name('input1:0')].shape, input_data[0].shape)
        self.assertEqual(feed_dict[g.get_tensor_by_name('input2:0')].shape, input_data[1].shape)
        tf.compat.v1.reset_default_graph()

        # 3) input_batch_data and input_op_names mismatch
        g = tf.Graph()
        with g.as_default():
            multiple_input_model()

        input_data = (np.random.rand(10, 10, 3))

        self.assertRaises(ValueError, lambda: create_input_feed_dict(graph=g,
                                                                     input_op_names_list=['input1', 'input2'],
                                                                     input_data=input_data))
        tf.compat.v1.reset_default_graph()

        g = tf.Graph()
        with g.as_default():
            model_with_multiple_training_tensors()
        input_data = (np.random.rand(32, 32, 3))
        feed_dict = create_input_feed_dict(graph=g, input_op_names_list=['input_1'],
                                           input_data=input_data, training=True)
        keras_learning_phase_tensor = g.get_tensor_by_name('keras_learning_phase:0')
        is_training_tensor = g.get_tensor_by_name('is_training:0')
        is_training_2_tensor = g.get_tensor_by_name('is_training_2:0')
        self.assertEqual(feed_dict[keras_learning_phase_tensor], True)
        self.assertEqual(feed_dict[is_training_tensor], True)
        self.assertEqual(feed_dict[is_training_2_tensor], True)
        tf.compat.v1.reset_default_graph()