コード例 #1
0
    def __init__(self, list_of_modules_to_winnow, reshape, in_place, verbose):

        self._list_of_modules_to_winnow = list_of_modules_to_winnow
        self._reshape = reshape
        self._in_place = in_place

        if verbose is True:
            AimetLogger.set_area_logger_level(AimetLogger.LogAreas.Winnow, logging.INFO)
        else:
            AimetLogger.set_area_logger_level(AimetLogger.LogAreas.Winnow, logging.WARNING)
コード例 #2
0
ファイル: test_logger.py プロジェクト: Rohan-Chaudhury/aimet
    def test_setting_log_level(self):

        logger.info("*** Testing test_setting_log_level() *** \n")
        svd_logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Svd)

        # The default logging level for SVD defined in default_logging_config.json is used.
        logger.info(
            "Log at the default log level for SVD defined in default_logging_config.json"
        )
        svd_logger.debug("Testing Debug")
        svd_logger.info("Testing Info")
        svd_logger.warning("Testing Warning")
        svd_logger.error("Testing Error")
        svd_logger.critical("Testing Critical")
        svd_logger.critical("****************************************\n")

        # Change the default log level for SVD.
        # Only CRITICAL level logs will be logged.
        logger.info("Change SVD area's logging level to Critical")
        AimetLogger.set_area_logger_level(AimetLogger.LogAreas.Svd,
                                          logging.CRITICAL)
        svd_logger.debug("Testing Debug")
        svd_logger.info("Testing Info")
        svd_logger.warning("Testing Warning")
        svd_logger.error("Testing Error")
        svd_logger.critical("Testing Critical")
        svd_logger.critical("****************************************\n")

        # Change the default log level for SVD.
        # All logs will be logged.
        logger.info("Change SVD area's logging level to Critical")
        AimetLogger.set_area_logger_level(AimetLogger.LogAreas.Svd,
                                          logging.DEBUG)
        svd_logger.debug("Testing Debug")
        svd_logger.info("Testing Info")
        svd_logger.warning("Testing Warning")
        svd_logger.error("Testing Error")
        svd_logger.critical("Testing Critical")
        svd_logger.critical("****************************************\n")
コード例 #3
0
    def test_sort_on_occurrence(self):
        """
        Test sorting of ops based on occurrence
        """
        AimetLogger.set_area_logger_level(AimetLogger.LogAreas.Winnow,
                                          logging.INFO)
        tf.compat.v1.reset_default_graph()

        orig_g = tf.Graph()
        with orig_g.as_default():
            _ = VGG16(weights=None,
                      input_shape=(224, 224, 3),
                      include_top=False)
            orig_init = tf.compat.v1.global_variables_initializer()

        # create sess with graph
        orig_sess = tf.compat.v1.Session(graph=orig_g)
        orig_sess.run(orig_init)

        # create layer database
        layer_db = LayerDatabase(model=orig_sess,
                                 input_shape=(1, 224, 224, 3),
                                 working_dir=None)

        block1_conv2 = layer_db.model.graph.get_operation_by_name(
            'block1_conv2/Conv2D')
        block2_conv1 = layer_db.model.graph.get_operation_by_name(
            'block2_conv1/Conv2D')
        block2_conv2 = layer_db.model.graph.get_operation_by_name(
            'block2_conv2/Conv2D')
        block5_conv3 = layer_db.model.graph.get_operation_by_name(
            'block5_conv3/Conv2D')

        # output shape in NCHW format
        block1_conv2_output_shape = block1_conv2.outputs[0].shape
        block2_conv1_output_shape = block2_conv1.outputs[0].shape
        block2_conv2_output_shape = block2_conv2.outputs[0].shape
        block5_conv3_output_shape = block5_conv3.outputs[0].shape

        # keeping compression ratio = None for all layers
        layer_comp_ratio_list = [
            LayerCompRatioPair(
                Layer(model=layer_db.model,
                      op=block5_conv3,
                      output_shape=block5_conv3_output_shape), None),
            LayerCompRatioPair(
                Layer(model=layer_db.model,
                      op=block2_conv2,
                      output_shape=block2_conv2_output_shape), None),
            LayerCompRatioPair(
                Layer(model=layer_db.model,
                      op=block1_conv2,
                      output_shape=block1_conv2_output_shape), None),
            LayerCompRatioPair(
                Layer(model=layer_db.model,
                      op=block2_conv1,
                      output_shape=block2_conv1_output_shape), None)
        ]

        input_op_names = ['input_1']
        output_op_names = ['block5_pool/MaxPool']
        dataset = unittest.mock.MagicMock()
        batch_size = unittest.mock.MagicMock()
        num_reconstruction_samples = unittest.mock.MagicMock()

        cp = InputChannelPruner(
            input_op_names=input_op_names,
            output_op_names=output_op_names,
            data_set=dataset,
            batch_size=batch_size,
            num_reconstruction_samples=num_reconstruction_samples,
            allow_custom_downsample_ops=True)

        sorted_layer_comp_ratio_list = cp._sort_on_occurrence(
            layer_db.model, layer_comp_ratio_list)

        self.assertEqual(sorted_layer_comp_ratio_list[0].layer.module,
                         block1_conv2)
        self.assertEqual(sorted_layer_comp_ratio_list[1].layer.module,
                         block2_conv1)
        self.assertEqual(sorted_layer_comp_ratio_list[2].layer.module,
                         block2_conv2)
        self.assertEqual(sorted_layer_comp_ratio_list[3].layer.module,
                         block5_conv3)

        self.assertEqual(len(sorted_layer_comp_ratio_list), 4)
        layer_db.model.close()
        # delete temp directory
        shutil.rmtree(str('./temp_meta/'))
コード例 #4
0
    def test_prune_model(self):
        """
        Test end-to-end prune_model with VGG16-imagenet
        """
        AimetLogger.set_area_logger_level(AimetLogger.LogAreas.Winnow,
                                          logging.INFO)
        tf.compat.v1.reset_default_graph()

        batch_size = 1
        input_data = np.random.rand(100, 224, 224, 3)
        dataset = tf.data.Dataset.from_tensor_slices(input_data)
        dataset = dataset.batch(batch_size=batch_size)

        orig_g = tf.Graph()

        with orig_g.as_default():
            _ = VGG16(weights=None,
                      input_shape=(224, 224, 3),
                      include_top=False)
            orig_init = tf.compat.v1.global_variables_initializer()

        input_op_names = ['input_1']
        output_op_names = ['block5_pool/MaxPool']
        # create sess with graph
        orig_sess = tf.compat.v1.Session(graph=orig_g)
        # initialize all the variables in VGG16
        orig_sess.run(orig_init)

        # create layer database
        layer_db = LayerDatabase(model=orig_sess,
                                 input_shape=(1, 224, 224, 3),
                                 working_dir=None)

        block1_conv2 = layer_db.model.graph.get_operation_by_name(
            'block1_conv2/Conv2D')
        block2_conv1 = layer_db.model.graph.get_operation_by_name(
            'block2_conv1/Conv2D')
        block2_conv2 = layer_db.model.graph.get_operation_by_name(
            'block2_conv2/Conv2D')

        # output shape in NCHW format
        block1_conv2_output_shape = block1_conv2.outputs[0].shape
        block2_conv1_output_shape = block2_conv1.outputs[0].shape
        block2_conv2_output_shape = block2_conv2.outputs[0].shape

        # keeping compression ratio = 0.5 for all layers
        layer_comp_ratio_list = [
            LayerCompRatioPair(
                Layer(model=layer_db.model,
                      op=block1_conv2,
                      output_shape=block1_conv2_output_shape), 0.5),
            LayerCompRatioPair(
                Layer(model=layer_db.model,
                      op=block2_conv1,
                      output_shape=block2_conv1_output_shape), 0.5),
            LayerCompRatioPair(
                Layer(model=layer_db.model,
                      op=block2_conv2,
                      output_shape=block2_conv2_output_shape), 0.5)
        ]

        cp = InputChannelPruner(input_op_names=input_op_names,
                                output_op_names=output_op_names,
                                data_set=dataset,
                                batch_size=batch_size,
                                num_reconstruction_samples=20,
                                allow_custom_downsample_ops=True)

        comp_layer_db = cp.prune_model(
            layer_db=layer_db,
            layer_comp_ratio_list=layer_comp_ratio_list,
            cost_metric=CostMetric.mac,
            trainer=None)

        pruned_block1_conv2 = comp_layer_db.find_layer_by_name(
            'reduced_reduced_block1_conv2/Conv2D')
        pruned_block2_conv1 = comp_layer_db.find_layer_by_name(
            'reduced_reduced_block2_conv1/Conv2D')
        pruned_block2_conv2 = comp_layer_db.find_layer_by_name(
            'reduced_block2_conv2/Conv2D')

        # input channels = 64 * 0.5 = 32
        # output channels = 64 * 0.5 = 32
        self.assertEqual(pruned_block1_conv2.weight_shape[1], 32)
        self.assertEqual(pruned_block1_conv2.weight_shape[0], 32)

        # input channels = 64 * 0.5 = 32
        # output channels = 128 * 0.5 = 64
        self.assertEqual(pruned_block2_conv1.weight_shape[1], 32)
        self.assertEqual(pruned_block2_conv1.weight_shape[0], 64)

        # input channels = 128 * 0.5 = 64
        # output channels = 128
        self.assertEqual(pruned_block2_conv2.weight_shape[1], 64)
        self.assertEqual(pruned_block2_conv2.weight_shape[0], 128)

        layer_db.model.close()
        comp_layer_db.model.close()
        # delete temp directory
        shutil.rmtree(str('./temp_meta/'))
コード例 #5
0
#  SPDX-License-Identifier: BSD-3-Clause
#
#  @@-COPYRIGHT-END-@@
# =============================================================================
""" This file contains unit tests for testing ModuleIdentifier modules. """

import unittest
import logging
import tensorflow as tf

from aimet_common.utils import AimetLogger
from aimet_tensorflow.common.module_identifier import StructureModuleIdentifier
from aimet_tensorflow.examples.test_models import keras_model, keras_model_functional, tf_slim_basic_model

logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Test)
AimetLogger.set_area_logger_level(AimetLogger.LogAreas.Test, logging.DEBUG)
AimetLogger.set_area_logger_level(AimetLogger.LogAreas.ConnectedGraph,
                                  logging.DEBUG)


class TestStructureModuleIdentifier(unittest.TestCase):
    """ Test StructureModuleIdentifier module """
    def test_get_op_info(self):
        """ Test get_op_info() in StructureModuleIdentifier """
        my_op_type_set = set()
        current_module_set = set()

        tf.compat.v1.reset_default_graph()
        _ = keras_model()

        module_identifier = StructureModuleIdentifier(