Esempio n. 1
0
    def test_log_areas(self):

        logger.info("            ")
        logger.info("Testing test_log_areas()")

        # Test the UTILS logger
        utils_logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Utils)
        utils_logger.debug("Testing Debug")
        utils_logger.info("Testing Info")
        utils_logger.warning("Testing Warning")
        utils_logger.error("Testing Error")
        utils_logger.critical("Testing Critical")
        utils_logger.critical("**************************************** \n")

        # Test the QUANT logger
        quant_logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Quant)
        quant_logger.debug("Testing Debug")
        quant_logger.info("Testing Info")
        quant_logger.warning("Testing Warning")
        quant_logger.error("Testing Error")
        quant_logger.critical("Testing Critical")
        quant_logger.critical("**************************************** \n")

        # Test the SVD logger
        svd_logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Svd)
        svd_logger.debug("Testing Debug")
        svd_logger.info("Testing Info")
        svd_logger.warning("Testing Warning")
        svd_logger.error("Testing Error")
        svd_logger.critical("Testing Critical")
        svd_logger.critical("**************************************** \n")
Esempio n. 2
0
    def __init__(self, graph, op_map=None, ops_to_ignore=None, strict=True):
        """
        Constructor
        :param graph: The graph to search
        :param op_map: The map of operations used to identify op sequences as "one op".
        The default op_map used is defined in op_deps.py. Please refer to
        that format for passing a custom op_map.
        :param ops_to_ignore: List of ops to ignore
        :param strict: If strict mode is set to True queries will only return the last ops
        at the end of well known "op layers" as defined by the op_map. When False,
        queries will return ops at the end of well known layers and, in addition,
        all ops which are not "known".

        Eg If you have a list of ops in a graph like: Conv2D, BiasAdd, WeirdOp
        Strict mode will return ["BiasAdd"] since it knows that Conv2D+BiasAdd are
        one logical "layer". When strict mode is disabled it will return ["BiasAdd", "WeirdOp"]
        :param debug: Whether to enable debug messages or not.
        """

        self._log = AimetLogger.get_area_logger(AimetLogger.LogAreas.Utils)
        self._graph = graph
        self._strict = strict

        if op_map:
            self._op_map = op_map
        else:
            self._op_map = _OP_MAP

        if ops_to_ignore:
            self._ops_to_ignore = ops_to_ignore
        else:
            self._ops_to_ignore = []

        self._trained_vars = graph.get_collection(
            tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)
Esempio n. 3
0
    def test_setting_log_level_for_all_areas(self):

        logger.info("*** test_setting_log_level_for_all_areas() ***\n")

        svd_logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Svd)
        quant_logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Quant)
        util_logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Utils)
        test_logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Test)

        # The default logging level for all Log Areas defined in default_logging_config.json is used.
        logger.info(
            "Log at the default log level for  all Log Areas defined in default_logging_config.json"
        )
        svd_logger.debug("Testing Debug")
        svd_logger.info("Testing Info")

        quant_logger.warning("Testing Warning")
        quant_logger.error("Testing Error")

        util_logger.critical("Testing Critical")
        util_logger.info("Testing Info")

        test_logger.critical("Testing Critical")
        test_logger.critical("****************************************\n")

        # Change the default log level for all areas
        # Only CRITICAL level logs will be logged.
        logger.info("Change the logging level for all Log Areas to WARNING")
        AimetLogger.set_level_for_all_areas(logging.WARNING)

        svd_logger.debug("Testing Debug")
        svd_logger.info("Testing Info")

        quant_logger.warning("Testing Warning")
        quant_logger.error("Testing Error")

        util_logger.critical("Testing Critical")
        util_logger.info("Testing Info")

        test_logger.critical("Testing Critical")
        test_logger.critical("****************************************\n")
Esempio n. 4
0
    def __init__(self,
                 model=None,
                 quant_mode='tf_enhanced',
                 round_mode='nearest',
                 use_cuda=True):
        """
        :param model: The input model to add quantization ops to
        :param quant_mode: Indicates which quantization algorithm should be used, either
                'tf' or 'tf_enhanced'. Defaults to 'tf_enhanced'.
        :param round_mode: The round scheme to used. One of: 'nearest' or 'stochastic'. Default
                is 'nearest'.
        :param use_cuda: Indicates on which hardware which the quantization algorithm should run. Currently
                defaults to GPU (True).
        :raises: ValueError: An error occurred processing one of the input parameters.
        """

        if quant_mode not in ('tf_enhanced', 'tf'):
            raise ValueError(
                'Parameter quantization mode is not a valid selection. Valid selections are tf, '
                'tf_enhanced')

        if round_mode not in ('nearest', 'stochastic'):
            raise ValueError(
                'Parameter round mode is not a valid selection. Valid selections are nearest or '
                'stochastic')

        self._model = model
        self._use_cuda = use_cuda

        if isinstance(quant_mode, str):
            if quant_mode == 'tf':
                quant_mode = QuantScheme.post_training_tf
            elif quant_mode == 'tf_enhanced':
                quant_mode = QuantScheme.post_training_tf_enhanced

        self._quant_mode = quant_mode

        self._round_mode = round_mode

        self._logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Quant)
Esempio n. 5
0
    def test_setting_log_level(self):

        logger.info("*** Testing test_setting_log_level() *** \n")
        svd_logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Svd)

        # The default logging level for SVD defined in default_logging_config.json is used.
        logger.info(
            "Log at the default log level for SVD defined in default_logging_config.json"
        )
        svd_logger.debug("Testing Debug")
        svd_logger.info("Testing Info")
        svd_logger.warning("Testing Warning")
        svd_logger.error("Testing Error")
        svd_logger.critical("Testing Critical")
        svd_logger.critical("****************************************\n")

        # Change the default log level for SVD.
        # Only CRITICAL level logs will be logged.
        logger.info("Change SVD area's logging level to Critical")
        AimetLogger.set_area_logger_level(AimetLogger.LogAreas.Svd,
                                          logging.CRITICAL)
        svd_logger.debug("Testing Debug")
        svd_logger.info("Testing Info")
        svd_logger.warning("Testing Warning")
        svd_logger.error("Testing Error")
        svd_logger.critical("Testing Critical")
        svd_logger.critical("****************************************\n")

        # Change the default log level for SVD.
        # All logs will be logged.
        logger.info("Change SVD area's logging level to Critical")
        AimetLogger.set_area_logger_level(AimetLogger.LogAreas.Svd,
                                          logging.DEBUG)
        svd_logger.debug("Testing Debug")
        svd_logger.info("Testing Info")
        svd_logger.warning("Testing Warning")
        svd_logger.error("Testing Error")
        svd_logger.critical("Testing Critical")
        svd_logger.critical("****************************************\n")
Esempio n. 6
0
#  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
#  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#  POSSIBILITY OF SUCH DAMAGE.
#
#  SPDX-License-Identifier: BSD-3-Clause
#
#  @@-COPYRIGHT-END-@@
# =============================================================================

"""Utility functions for test/train data cache implementation"""

import os
import shutil
from aimet_common.utils import AimetLogger

logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Utils)


def is_cache_env_set():

    """
    check if Cache Environment variable is set or not
    :param: None
    :return: TRUE in case DEPENDENCY_DATA_PATH environment variable is set, False otherwise
    """
    return 'DEPENDENCY_DATA_PATH' in os.environ

def is_mnist_cache_present():

    """
    check if MNIST data is present in the cache
Esempio n. 7
0
import libpymo

from aimet_common.graph_pattern_matcher import PatternType
from aimet_common.graph_searcher import GraphSearcher

from aimet_torch import utils
from aimet_torch import quantsim as qsim
from aimet_torch.meta.connectedgraph import ConnectedGraph
from aimet_torch.quantsim import QcQuantizeWrapper
from aimet_torch.save_utils import SaveUtils
from aimet_common.utils import AimetLogger
from aimet_common.bias_correction import ConvBnInfoType, ConvBnPatternHandler
from aimet_common.defs import ActivationType
from aimet_torch.utils import get_ordered_lists_of_conv_fc

logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Quant)


class StopForwardException(Exception):
    """ Dummy exception to early-terminate forward-pass """


def forward_pass(model: torch.nn.Module, batch: torch.Tensor):
    """
    forward pass depending model allocation on CPU / GPU till StopForwardException
    :param model: model
    :param batch: batch
    :return: Nothing
    """
    model.eval()
    # first check if the model is on GPU or not
Esempio n. 8
0
#
#  SPDX-License-Identifier: BSD-3-Clause
#
#  @@-COPYRIGHT-END-@@
# =============================================================================

# TODO Need to exclude this file for PyLint checking. We get the following error that needs to be investigated:
# RecursionError: maximum recursion depth exceeded while calling a Python object
# pylint: skip-file
""" Sub-sample data for weight reconstruction for channel pruning feature """

import numpy as np

from aimet_common.utils import AimetLogger

logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Test)


class InputMatchSearch:
    """ Utilities to find a set of input pixels corresponding to an output pixel for weight reconstruction """
    @staticmethod
    def _check_and_update_pixel_sampled_from_output_data(
            input_data_shape: tuple, layer_attributes: tuple, pixel: tuple):
        """
        Function gets shape, layer, pixel indices (height, width) and first check if given height and width satisfy
         O = (I - F + 2P) / S + 1 formula, and then update them using stride and
        padding values of given layer.

        :param input_data_shape: input data shape (Cin, Hin, Win)
        :param layer_attributes: (kernel_size, stride, padding)
        :param pixel: (height, width)
from typing import Tuple, List, Union, Dict
import numpy as np
import tensorflow as tf
import libpymo

from aimet_tensorflow.common.connectedgraph import ConnectedGraph
from aimet_tensorflow.common.operation import Op
from aimet_tensorflow.batch_norm_fold import fold_all_batch_norms
from aimet_tensorflow.utils.graph_saver import save_and_load_graph
from aimet_tensorflow.utils.op.conv import WeightTensorUtils, BiasUtils
import aimet_tensorflow.utils.op.relu as ReluUtils
from aimet_tensorflow.utils.op.fusedbatchnorm import BNUtils
from aimet_common.utils import AimetLogger

logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.CrosslayerEqualization)

ScaleFactor = Union[np.ndarray, Tuple[np.ndarray]]

ClsSet = Union[Tuple[tf.Operation, tf.Operation],
               Tuple[tf.Operation, tf.Operation, tf.Operation]]


class GraphSearchUtils:

    """ Implements graph search utils required by CLE feature"""

    def __init__(self, model: tf.Graph, start_op_names: Union[str, List[str]], output_op_names: Union[str, List[str]]):
        if isinstance(start_op_names, str):
            start_op_names = [start_op_names]
#  @@-COPYRIGHT-END-@@
#
#  =============================================================================
""" Winnow the API provided input channels from the modules in a model. """

import copy
from typing import List, Tuple, Dict
import torch
from aimet_common.utils import AimetLogger, ModelApi
from aimet_common.winnow.mask_propagation_winnower import MaskPropagationWinnower as AimetCommonMaskPropagationWinnower
from aimet_common.winnow.mask_propagator import MaskPropagator
from aimet_torch.meta.connectedgraph import ConnectedGraph
from aimet_torch.utils import get_layer_name, has_hooks
from aimet_torch.winnow.module_reducer import ModuleReducer

logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Winnow)


class MaskPropagationWinnower(AimetCommonMaskPropagationWinnower):
    """ The MaskPropagationWinnower class implements winnowing based on propagating masks corresponding to each
    module's input channels identified to be winnowed.  """
    def __init__(self,
                 model: torch.nn.Module,
                 input_shape: Tuple,
                 list_of_modules_to_winnow: List[Tuple[torch.nn.Module,
                                                       List]] = None,
                 reshape=True,
                 in_place=False,
                 verbose=False):
        """
        MaskPropagationWinnower object initialization.
Esempio n. 11
0
#  POSSIBILITY OF SUCH DAMAGE.
#
#  SPDX-License-Identifier: BSD-3-Clause
#
#  @@-COPYRIGHT-END-@@
# =============================================================================
""" Utilities for working with ModelOptimization C++ library """
from typing import List
import torch

import libpymo as pymo
from aimet_torch.layer_database import Layer
from aimet_common.defs import CostMetric
from aimet_common.utils import AimetLogger

logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Svd)


class PymoSvdUtils:
    """ Utilities for working with SVD ModelOptimization C++ library """
    @classmethod
    def configure_layers_in_pymo_svd(cls, layers: List[Layer],
                                     cost_metric: CostMetric, svd_lib_ref):
        """
        Configure layers with the pymo svd library
        :param layers: List of layers to configure
        :param cost_metric: Cost metric to use
        :param svd_lib_ref: Reference to pymo instance
        :return:
        """
Esempio n. 12
0
import os
import libpymo as pymo

from aimet_common.bokeh_plots import DataTable
from aimet_common.bokeh_plots import LinePlot
from aimet_common.bokeh_plots import ProgressBar
from aimet_common.utils import AimetLogger
from aimet_common.curve_fit import MonotonicIncreasingCurveFit
from aimet_common.defs import CostMetric, LayerCompRatioPair, GreedyCompressionRatioSelectionStats, \
    TarCompressionRatioSelectionStats, LayerCompRatioEvalScore, EvalFunction
from aimet_common.pruner import Pruner
from aimet_common import cost_calculator as cc
from aimet_common.layer_database import Layer, LayerDatabase
from aimet_common.comp_ratio_rounder import CompRatioRounder

logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.CompRatioSelect)


class CompRatioSelectAlgo(metaclass=abc.ABCMeta):
    """
    Abstract interface for all compression-ratio selection algorithms
    """
    def __init__(self, layer_db: LayerDatabase,
                 cost_calculator: cc.CostCalculator, cost_metric: CostMetric,
                 comp_ratio_rounding_algo: Optional[CompRatioRounder]):
        """
        Constructor
        :param layer_db: Database of compressible layers
        """
        # pylint: disable=too-many-arguments
Esempio n. 13
0
from aimet_common.utils import AimetLogger
from aimet_common.pruner import Pruner
from aimet_common.channel_pruner import select_channels_to_prune
from aimet_common.cost_calculator import CostCalculator, Cost
from aimet_common.winnow.winnow_utils import update_winnowed_channels

from aimet_tensorflow.utils.graph_saver import save_and_load_graph
from aimet_tensorflow.utils.common import is_op_compressible, get_ordered_ops
from aimet_tensorflow.layer_database import Layer, LayerDatabase
from aimet_tensorflow.utils.op.conv import WeightTensorUtils
from aimet_tensorflow.winnow import winnow
from aimet_tensorflow.channel_pruning.data_subsampler import DataSubSampler
from aimet_tensorflow.channel_pruning.weight_reconstruction import WeightReconstructor
from aimet_tensorflow.common.graph_eval import initialize_uninitialized_vars

logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.ChannelPruning)


class InputChannelPruner(Pruner):
    """
    Pruner for Channel Pruning method
    """
    def __init__(self, input_op_names: List[str], output_op_names: List[str],
                 data_set: tf.data.Dataset, batch_size: int,
                 num_reconstruction_samples: int,
                 allow_custom_downsample_ops: bool):
        """
        Input Channel Pruner with given dataset, input shape, number of batches and samples per image.

        :param input_op_names: list of input op names
        :param output_op_names: List of output op names of the model, used to help ConnectedGraph determine valid ops
Esempio n. 14
0
#  POSSIBILITY OF SUCH DAMAGE.
#
#  SPDX-License-Identifier: BSD-3-Clause
#
#  @@-COPYRIGHT-END-@@
# =============================================================================
""" Module identifier class """

from abc import ABC, abstractmethod
from typing import List, Set
import tensorflow as tf
from aimet_common.utils import AimetLogger
from aimet_tensorflow.common import sub_graph_matcher
from aimet_tensorflow.common.sub_graph_matcher import ModuleIdentifierOpInfo

logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.ConnectedGraph)


class ModuleIdentifier(ABC):
    """ Module identifier class for identifying submodules from groups of tf Operations """
    def __init__(self, graph: tf.Graph):
        """ Initializer for ModuleIdentifier """
        self._graph = graph

    @abstractmethod
    def get_op_info(self, op: tf.Operation) -> ModuleIdentifierOpInfo:
        """
        Given a tf op in the graph, return OpInfo class containing:
        - opname: Name that op/product graph should use to represent whatever module
        this tf op belongs to
        - type: Module type that should be stored in the op/product graph for this module
Esempio n. 15
0
    def __init__(self,
                 graph,
                 checkpoint,
                 output_file='./quantize/q_graph',
                 quant_mode='tf_enhanced',
                 round_mode='nearest',
                 op_map=None,
                 ops_to_ignore=None,
                 gpu=True,
                 debug=False,
                 skip_output=False,
                 skip_bias=True):
        """
        :param graph: The input meta graph to add quantization ops to
        :param checkpoint: The checkpoint file for the given graph
        :param output_file: The file path for saving the compressed tensorflow graph
        :param quant_mode: Indicates which quantization algorithm should be used, either
                'tf' or 'tf_enhanced'. Defaults to 'tf_enhanced'.
        :param round_mode: The round scheme to used. One of: 'nearest' or 'stochastic'. Default
                is 'nearest'.
        :param op_map: A map representing the op sequences to identify and quantize. See op_defs.py
                for an example of the formatting required.
        :param ops_to_ignore: A list of op names to ignore when selecting quantization ops
        :param gpu: Indicates on which hardware which the quantization algorithm should run. Currently
                defaults to CPU (False). To use GPU pass True (currently broken).
        :param debug: Indicates whether debug information should be printed or not. Defaults to False.
        :param skip_output: If output quantization is to be turned off. Default to False
        :param skip_bias: If bias quantization is to be turned off. Default to True
        :returns: An object which can be used to perform quantization on a tensorflow graph
        :raises: ValueError: An error occurred processing one of the input parameters.
        """

        # pylint: disable=too-many-arguments
        self._log = AimetLogger.get_area_logger(AimetLogger.LogAreas.Quant)
        self._debug = debug
        self._default_meta_graph = graph
        self._default_checkpoint = checkpoint
        self._output_file = output_file
        self._output_dir = os.path.dirname(output_file)
        self._skip_output = skip_output
        self._skip_bias = skip_bias
        if not os.path.exists(self._output_dir):
            os.makedirs(self._output_dir)
        self._log.info('Saving quantized model as: %s', output_file)

        if op_map:
            self._op_map = op_map
        else:
            self._op_map = op_defs.default_op_map

        if not ops_to_ignore:
            ops_to_ignore = []
        self._ops_to_ignore = ops_to_ignore

        if quant_mode not in _QUANT_MODES:
            raise ValueError('Invalid quantization mode: ' + quant_mode)
        self._quant_mode = _QUANT_MODES[quant_mode]
        self._quant_mode_str = quant_mode.upper()

        if round_mode not in _ROUNDING_MODES:
            raise ValueError('Invalid rounding mode: ' + round_mode)
        self._round_mode_str = round_mode.upper()

        self._comp_mode = libpymo.ComputationMode.COMP_MODE_GPU if gpu else libpymo.ComputationMode.COMP_MODE_CPU
        self._gpu = gpu
        self._quant_act_ops = []
        self._activation_encodings = {
            'quant_mode': quant_mode,
            'encodings': {},
            'activation_bitwidth': 8
        }
        self._is_train_variable = None
        self._input_tensor_names = None

        # Todo: Need to look at these attributes and see how to handle them better
        # Very likely these attributes don't need to be object attributes
        self._saver = None
        self._bw_acts = None
        self._bw_params = None
        self._forward_callback = None
        self._sess = None
        self._iterations = None