def test_set_and_freeze_op_mode(self):
        """ Create QuantSim for a CPU model, test set and freeze op mode """
        tf.compat.v1.reset_default_graph()
        with tf.device('/cpu:0'):
            _ = keras_model()
            init = tf.compat.v1.global_variables_initializer()

        session = tf.compat.v1.Session()
        session.run(init)

        sim = QuantizationSimModel(session, ['conv2d_input'],
                                   ['keras_model/Softmax'],
                                   use_cuda=False)
        quantizer = sim.quantizer_config(
            'conv2d/Conv2D/ReadVariableOp_quantized')

        op_mode = int(libpymo.TensorQuantizerOpMode.oneShotQuantizeDequantize)
        quantizer.set_op_mode(op_mode)
        quantizer.freeze_encoding()
        self.assertEqual(op_mode, quantizer.get_op_mode())

        new_op_mode = int(libpymo.TensorQuantizerOpMode.passThrough)
        quantizer.set_op_mode(new_op_mode)
        self.assertNotEqual(new_op_mode, quantizer.get_op_mode())
        self.assertEqual(op_mode, quantizer.get_op_mode())

        session.close()
示例#2
0
    def test_set_and_freeze_param_encodings(self):
        """ Test set and freeze parameter encodings functionality """
        tf.compat.v1.reset_default_graph()
        with tf.device('/cpu:0'):
            _ = keras_model()
            init = tf.compat.v1.global_variables_initializer()

        session = tf.compat.v1.Session()
        session.run(init)

        sim = QuantizationSimModel(session, ['conv2d_input'],
                                   ['keras_model/Softmax'],
                                   use_cuda=False)
        param_encodings = {
            'conv2d/Conv2D/ReadVariableOp:0': [{
                'bitwidth': 4,
                'is_symmetric': False,
                'max': 0.14584073424339294,
                'min': -0.12761062383651733,
                'offset': -7.0,
                'scale': 0.01823008991777897
            }]
        }
        # export encodings to JSON file
        encoding_file_path = os.path.join('./', 'dummy.encodings')
        with open(encoding_file_path, 'w') as encoding_fp:
            json.dump(param_encodings, encoding_fp, sort_keys=True, indent=4)

        sim.set_and_freeze_param_encodings(encoding_path='./dummy.encodings')

        quantizer = sim.quantizer_config(
            'conv2d/Conv2D/ReadVariableOp_quantized')
        encoding = param_encodings['conv2d/Conv2D/ReadVariableOp:0'][0]

        encoding_max = quantizer.get_variable_from_op(
            QuantizeOpIndices.encoding_max)
        encoding_min = quantizer.get_variable_from_op(
            QuantizeOpIndices.encoding_min)

        self.assertEqual(encoding_min, encoding.get('min'))
        self.assertEqual(encoding_max, encoding.get('max'))
        self.assertEqual(int(libpymo.TensorQuantizerOpMode.quantizeDequantize),
                         quantizer.get_op_mode())
        self.assertEqual(quantizer.is_encoding_valid(), True)

        session.close()

        # Delete encodings JSON file
        if os.path.exists("./dummy.encodings"):
            os.remove("./dummy.encodings")
    def test_keras_model_get_op_product_graph(self):
        """ Test connected graph construction on keras model """
        tf.compat.v1.reset_default_graph()

        _ = keras_model()
        conn_graph = ConnectedGraph(tf.compat.v1.get_default_graph(), ['conv2d_input'], ['keras_model/Softmax'])
        self.assertTrue(validate_branch_ops(conn_graph))
        self.assertTrue(validate_product_tensor_lists(conn_graph))
        self.assertEqual(0, conn_graph.branch_count)
        self.assertEqual(11, len(conn_graph.get_all_ops()))

        # 10 products from inter module connections
        # 14 products from parameters
        self.assertEqual(24, len(conn_graph.get_all_products()))
    def test_get_op_info(self):
        """ Test get_op_info() in StructureModuleIdentifier """
        my_op_type_set = set()
        current_module_set = set()

        tf.compat.v1.reset_default_graph()
        _ = keras_model()

        module_identifier = StructureModuleIdentifier(
            tf.compat.v1.get_default_graph(), ["conv2d_input"],
            set(tf.compat.v1.get_default_graph().get_operations()))
        for op_info in module_identifier.op_to_module_dict.values():
            my_op_type_set.add(op_info.op_type)
            current_module_set.add(op_info.module_name)

        # Only identifies 2 conv2d, 2 fusedbatchnorm, flatten, and dense
        self.assertEqual(6, len(current_module_set))
        self.assertEqual(4, len(my_op_type_set))
    def test_set_and_freeze_encoding(self):
        """ Create QuantSim for a CPU model, test set and freeze encoding """
        tf.compat.v1.reset_default_graph()
        with tf.device('/cpu:0'):
            _ = keras_model()
            init = tf.compat.v1.global_variables_initializer()

        session = tf.compat.v1.Session()
        session.run(init)

        sim = QuantizationSimModel(session, ['conv2d_input'],
                                   ['keras_model/Softmax'],
                                   use_cuda=False)
        quantizer = sim.quantizer_config(
            'conv2d/Conv2D/ReadVariableOp_quantized')

        encoding = quantizer.compute_encoding(8, False)
        print(encoding.max, encoding.min)
        # Set and freeze encoding
        quantizer.set_encoding(encoding)
        quantizer.freeze_encoding()

        old_encoding_min = quantizer.get_variable_from_op(
            QuantizeOpIndices.encoding_min)
        old_encoding_max = quantizer.get_variable_from_op(
            QuantizeOpIndices.encoding_max)

        self.assertEqual(encoding.min, old_encoding_min)
        self.assertEqual(encoding.max, old_encoding_max)
        self.assertEqual(quantizer.is_encoding_valid(), True)

        # Try updating encoding min and max with new values, but values can not be changed
        encoding.min = -0.4
        encoding.max = 0.6
        quantizer.set_encoding(encoding)

        self.assertEqual(
            old_encoding_min,
            quantizer.get_variable_from_op(QuantizeOpIndices.encoding_min))
        self.assertEqual(
            old_encoding_max,
            quantizer.get_variable_from_op(QuantizeOpIndices.encoding_max))

        session.close()
    def test_get_encoding(self):
        """ Create QuantSim for a CPU model, test get encoding """
        tf.compat.v1.reset_default_graph()
        with tf.device('/cpu:0'):
            _ = keras_model()
            init = tf.compat.v1.global_variables_initializer()

        session = tf.compat.v1.Session()
        session.run(init)

        sim = QuantizationSimModel(session, ['conv2d_input'],
                                   ['keras_model/Softmax'],
                                   use_cuda=False)
        quantizer = sim.quantizer_config(
            'conv2d/Conv2D/ReadVariableOp_quantized')

        self.assertRaises(AssertionError, lambda: quantizer.get_encoding())

        session.close()