def test_groups(test_input_info_struct_: GroupPruningModulesTestStruct):
    model = test_input_info_struct_.model
    not_pruned_modules = test_input_info_struct_.not_pruned_modules
    pruned_groups = test_input_info_struct_.pruned_groups
    prune_first, prune_last, prune_downsample = test_input_info_struct_.prune_params

    model = model()
    nncf_config = get_basic_pruning_config(input_sample_size=[1, 1, 8, 8])
    nncf_config['compression']['algorithm'] = 'filter_pruning'
    nncf_config['compression']['params']['prune_first_conv'] = prune_first
    nncf_config['compression']['params']['prune_last_conv'] = prune_last
    nncf_config['compression']['params']['prune_downsample_convs'] = prune_downsample

    compressed_model, compression_ctrl = create_compressed_model_and_algo_for_test(model, nncf_config)

    # 1. Check all not pruned modules
    clusters = compression_ctrl.pruned_module_groups_info
    all_pruned_modules_info = clusters.get_all_nodes()
    all_pruned_modules = [info.module for info in all_pruned_modules_info]
    print([minfo.module_scope for minfo in all_pruned_modules_info])
    for module_name in not_pruned_modules:
        module = compressed_model.get_module_by_scope(Scope.from_str(module_name))
        assert module not in all_pruned_modules

    # 2. Check that all pruned groups are valid
    for group in pruned_groups:
        first_node_scope = Scope.from_str(group[0])
        cluster = clusters.get_cluster_by_node_id(first_node_scope)
        cluster_modules = [n.module for n in cluster.nodes]
        group_modules = [compressed_model.get_module_by_scope(Scope.from_str(module_scope)) for module_scope in group]

        assert cluster_modules == group_modules
Beispiel #2
0
def get_all_modules_by_type(model,
                            module_types,
                            current_scope=None,
                            ignored_scopes=None,
                            target_scopes=None) -> Dict['Scope', Module]:
    if isinstance(module_types, str):
        module_types = [module_types]
    found = OrderedDict()
    from nncf.dynamic_graph.context import Scope
    from nncf.dynamic_graph.context import ScopeElement
    if current_scope is None:
        current_scope = Scope()
        current_scope.push(ScopeElement(model.__class__.__name__))
    for name, module in model.named_children():
        child_scope_element = ScopeElement(module.__class__.__name__, name)
        child_scope = current_scope.copy()
        child_scope.push(child_scope_element)

        if in_scope_list(str(child_scope), ignored_scopes):
            continue

        if target_scopes is None or in_scope_list(str(child_scope),
                                                  target_scopes):
            if module_types.count(str(type(module).__name__)) != 0:
                found[child_scope] = module
            sub_found = get_all_modules_by_type(module,
                                                module_types,
                                                current_scope=child_scope,
                                                ignored_scopes=ignored_scopes,
                                                target_scopes=target_scopes)
            if sub_found:
                found.update(sub_found)
    return found
Beispiel #3
0
class PruningTestModel(nn.Module):
    CONV_SCOPE_1 = Scope.from_str("PruningTestModel/NNCFConv2d[conv1]")
    CONV_SCOPE_2 = Scope.from_str("PruningTestModel/NNCFConv2d[conv2]")
    def __init__(self):
        super().__init__()
        self.conv1 = create_conv(1, 3, 2, 9, -2)
        self.relu = nn.ReLU()
        self.conv2 = create_conv(3, 1, 3, -10, 0)

    def forward(self, x):
        x = self.conv1(x)
        x = self.relu(x)
        x = self.conv2(x)
        return x
Beispiel #4
0
def get_mock_nncf_node_attrs():
    return {
        NNCFGraph.OP_EXEC_CONTEXT_NODE_ATTR: OperationExecutionContext(MOCK_OPERATOR_NAME,
                                                                       Scope(),
                                                                       0,
                                                                       [None])
    }
def get_scopes_of_skipped_weight_quantizers():
    scopes_list = [
        'MobileNetV2/Sequential[features]/ConvBNReLU[18]/NNCFConv2d[0]',
        'MobileNetV2/Sequential[features]/InvertedResidual[17]/Sequential[conv]/NNCFConv2d[2]',
        'MobileNetV2/Sequential[features]/InvertedResidual[16]/Sequential[conv]/NNCFConv2d[2]'
    ]
    return [Scope.from_str(s) for s in scopes_list]
Beispiel #6
0
    def from_str(s: str):
        scope_and_op, _, call_order_str = s.rpartition('_')
        scope_str, _, op_name = scope_and_op.rpartition('/')

        from nncf.dynamic_graph.context import Scope
        return InputAgnosticOperationExecutionContext(
            op_name, Scope.from_str(scope_str), int(call_order_str))
Beispiel #7
0
def test_get_bn_for_module_scope():
    config = get_basic_pruning_config(input_sample_size=(1, 1, 8, 8))
    config['compression']['algorithm'] = 'filter_pruning'
    pruned_model, _ = create_compressed_model_and_algo_for_test(BigPruningTestModel(), config)

    conv1_scope = Scope.from_str('BigPruningTestModel/NNCFConv2d[conv1]')
    bn = get_bn_for_module_scope(pruned_model, conv1_scope)
    assert bn is None

    conv2_scope = Scope.from_str('BigPruningTestModel/NNCFConv2d[conv2]')
    bn = get_bn_for_module_scope(pruned_model, conv2_scope)
    assert bn == pruned_model.bn

    conv3_scope = Scope.from_str('BigPruningTestModel/NNCFConv2d[conv3]')
    bn = get_bn_for_module_scope(pruned_model, conv3_scope)
    assert bn is None
Beispiel #8
0
    def test_insertion_point_data_in_ip_nodes(self):
        # TODO: extend for modules
        mock_graph = nx.DiGraph()
        ref_op_exec_context = OperationExecutionContext(
            "baz", Scope.from_str("Test/Scope[foo]/bar"), 0, [None])
        node_attrs = {NNCFGraph.OP_EXEC_CONTEXT_NODE_ATTR: ref_op_exec_context}

        node_key = 0
        mock_graph.add_node(node_key, **node_attrs)

        ip_graph = InsertionPointGraph(mock_graph)

        for node_key in mock_graph.nodes.keys():
            preds = list(ip_graph.predecessors(node_key))
            succs = list(ip_graph.successors(node_key))
            pre_hook_ip_node = ip_graph.nodes[preds[0]]
            post_hook_ip_node = ip_graph.nodes[succs[0]]

            pre_hook_ip = pre_hook_ip_node[
                InsertionPointGraph.INSERTION_POINT_DATA_NODE_ATTR]
            post_hook_ip = post_hook_ip_node[
                InsertionPointGraph.INSERTION_POINT_DATA_NODE_ATTR]
            assert pre_hook_ip.insertion_type == InsertionType.OPERATOR_PRE_HOOK
            assert post_hook_ip.insertion_type == InsertionType.OPERATOR_POST_HOOK

            assert pre_hook_ip.ia_op_exec_context == ref_op_exec_context.input_agnostic
            assert post_hook_ip.ia_op_exec_context == ref_op_exec_context.input_agnostic
def test_is_module_prunable(test_prunable_struct: ModulePrunableTestStruct):
    model = test_prunable_struct.model()
    nncf_model, algo_builder = create_nncf_model_and_builder(model, test_prunable_struct.config_params)
    for module_scope_str in test_prunable_struct.is_module_prunable:
        scope = Scope.from_str(module_scope_str)
        module = nncf_model.get_module_by_scope(scope)
        is_prunable, _ = algo_builder._is_module_prunable(nncf_model, module, scope)
        assert is_prunable == test_prunable_struct.is_module_prunable[module_scope_str]
Beispiel #10
0
    def disable_all_gradients_except_weights_of_quantized_modules(
        quantizers_switcher: QuantizersSwitcher,
        quantized_weight_modules_registry: Dict[str, torch.nn.Module],
        model: nn.Module,
        scopes_of_skipped_weight_quantizers: List['Scope'] = None
    ) -> ParamsToRestore:  # pylint: disable=undefined-variable
        """
        Disables gradients of all parameters, except for layers that have quantizers for weights, which wasn't skipped
        because of single precision constraints.
        :param quantizers_switcher: object that is responsible for enabling and disabling quantizers
        :param quantized_weight_modules_registry: modules with quantized weights per scope
        :param model: model to access all parameters
        :param scopes_of_skipped_weight_quantizers: list of string scopes of layers that have a single precision
        constraint and which weights should be skipped from bitwidth initialization
        :return: list of names of the parameters that were originally disabled
        """
        originally_disabled_gradients = []
        skipped_gradients_to_enable = []

        # Some quantizers can be disabled in a staged scenario on creation of staged scheduler
        # Need to save originally disabled quantizers for restoring their state after initialization
        quantizers_switcher.disable_quantizers()

        # remember gradients of quantized modules that were enabled
        gradients_to_enable = []
        for scope, quantized_module in quantized_weight_modules_registry.items(
        ):
            is_skipped = False
            for skipped_weight_quantizer_scope in scopes_of_skipped_weight_quantizers:
                if skipped_weight_quantizer_scope in Scope.from_str(scope):
                    is_skipped = True
                    break
            for param_name, param in quantized_module.named_parameters():
                if param.requires_grad:
                    # disable gradients for skipped module for optimization of Hessian Trace search
                    if is_skipped:
                        skipped_gradients_to_enable.append(
                            (quantized_module, param_name))
                        param.requires_grad = False
                    else:
                        gradients_to_enable.append(
                            (quantized_module, param_name))

        # disable all gradients, except already disabled
        for param_name, param in model.named_parameters():
            if not param.requires_grad:
                originally_disabled_gradients.append(param_name)
            else:
                param.requires_grad = False

        # enable gradients of quantized modules that were disabled
        for quantized_module in quantized_weight_modules_registry.values():
            for param_name, param in quantized_module.named_parameters():
                if (quantized_module, param_name
                    ) in gradients_to_enable and not 'bias' in param_name:
                    param.requires_grad = True
        return HAWQPrecisionInitializer.ParamsToRestore(
            originally_disabled_gradients, skipped_gradients_to_enable)
Beispiel #11
0
    def test_inplace_apply_filter_binary_mask(mask, reference_weight,
                                              reference_bias):
        """
        Test that inplace_apply_filter_binary_mask changes the input weight and returns valid result.
        """
        nncf_module = NNCFConv2d(1, 2, 2)
        fill_conv_weight(nncf_module, 1)
        fill_bias(nncf_module, 1)

        result_weight = inplace_apply_filter_binary_mask(
            mask, nncf_module.weight.data, Scope())
        assert torch.allclose(result_weight, reference_weight)
        assert torch.allclose(nncf_module.weight, reference_weight)

        result_bias = inplace_apply_filter_binary_mask(mask,
                                                       nncf_module.bias.data,
                                                       Scope())
        assert torch.allclose(result_bias, reference_bias)
        assert torch.allclose(nncf_module.bias, reference_bias)
Beispiel #12
0
def test_assert_broadcastable_mask_and_weight_shape():
    nncf_module = NNCFConv2d(1, 2, 2)
    fill_conv_weight(nncf_module, 1)
    fill_bias(nncf_module, 1)

    mask = torch.zeros(10)

    with pytest.raises(RuntimeError):
        inplace_apply_filter_binary_mask(mask, nncf_module.weight.data,
                                         Scope())

    with pytest.raises(RuntimeError):
        apply_filter_binary_mask(mask, nncf_module.weight.data)
Beispiel #13
0
def generate_qp(scope_str: str,
                target: QuantizerGroup,
                in_port_id: int = None) -> SingleConfigQuantizationPoint:
    if target is QuantizerGroup.WEIGHTS:
        ip = InsertionPoint(InsertionType.NNCF_MODULE_PRE_OP,
                            module_scope=Scope.from_str(scope_str))
    elif target is QuantizerGroup.ACTIVATIONS:
        ip = InsertionPoint(
            InsertionType.OPERATOR_POST_HOOK
            if in_port_id is None else InsertionType.OPERATOR_PRE_HOOK,
            ia_op_exec_context=InputAgnosticOperationExecutionContext.from_str(
                scope_str),
            input_port_id=in_port_id)
    else:
        raise RuntimeError()
    return SingleConfigQuantizationPoint(ip, QuantizerConfig())
Beispiel #14
0
def replace_modules(model: nn.Module,
                    replace_fn,
                    affected_scopes,
                    ignored_scopes=None,
                    target_scopes=None,
                    memo=None,
                    current_scope=None):
    if memo is None:
        memo = set()
        current_scope = Scope()
        current_scope.push(ScopeElement(model.__class__.__name__))

    if model in memo:
        return model, affected_scopes

    memo.add(model)
    for name, module in model.named_children():
        if module is None:
            continue

        child_scope_element = ScopeElement(module.__class__.__name__, name)
        child_scope = current_scope.copy()
        child_scope.push(child_scope_element)
        replaced_module = replace_fn(module)

        if replaced_module is not None:
            replaced_scope_element = ScopeElement(
                replaced_module.__class__.__name__, name)
            replaced_scope = current_scope.copy()
            replaced_scope.push(replaced_scope_element)
            if module is not replaced_module:
                if in_scope_list(str(child_scope), ignored_scopes):
                    nncf_logger.info(
                        "Ignored wrapping modules in scope: {}".format(
                            child_scope))
                    continue

                if target_scopes is None or in_scope_list(
                        str(child_scope), target_scopes):
                    nncf_logger.info("Wrapping module {} by {}".format(
                        str(child_scope), str(replaced_scope)))
                    if isinstance(model, nn.Sequential):
                        # pylint: disable=protected-access
                        model._modules[name] = replaced_module
                    else:
                        setattr(model, name, replaced_module)
                    affected_scopes.append(replaced_scope)
            elif is_nncf_module(replaced_module):
                # Got an NNCF-wrapped module from previous compression stage, track its scope as well
                affected_scopes.append(replaced_scope)
        _, affected_scopes = replace_modules(module, replace_fn,
                                             affected_scopes, ignored_scopes,
                                             target_scopes, memo, child_scope)
    return model, affected_scopes
Beispiel #15
0
def test_quantize_inputs():
    model = QuantizeInputsTestModel()
    config = get_basic_quantization_config()
    config["input_info"] = [
        {
            "sample_size": (2, 3, 32, 32),
        },
        {
            "sample_size": (2, 3, 32, 32),
        },
        {
            "sample_size": (2, 3, 32, 32),
        },
        {
            "sample_size": (2, 3, 32, 32),
        },
        {
            "sample_size": (2, 3, 32, 32),
        }
    ]

    model, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)
    REF_QUANTIZED_INPUT_MODULE_SCOPES = [
        "QuantizeInputsTestModel/NNCFConv2d[conv1]",
        "QuantizeInputsTestModel/NNCFConv2d[conv2]",
        "QuantizeInputsTestModel/NNCFConv2d[conv5]",
        "QuantizeInputsTestModel/NNCFConv2d[conv6]",
    ]
    for ref_qinput_module_scope_str in REF_QUANTIZED_INPUT_MODULE_SCOPES:
        scope = Scope.from_str(ref_qinput_module_scope_str)
        assert model.get_module_by_scope(scope) is not None
        assert ref_qinput_module_scope_str in compression_ctrl.quantized_inputs_modules_registry

    nncf_modules_dict = model.get_nncf_modules()
    for scope, nncf_module in nncf_modules_dict.items():
        scope_str = str(scope)
        update_inputs_count = sum(1 for pre_op in nncf_module.pre_ops.values() if isinstance(pre_op, UpdateInputs))
        if scope_str in REF_QUANTIZED_INPUT_MODULE_SCOPES:
            assert update_inputs_count == 1
        else:
            assert update_inputs_count == 0
Beispiel #16
0
def get_mock_nncf_node_attrs(op_name=None):
    op_name_to_set = op_name if op_name is not None else MOCK_OPERATOR_NAME
    return {
        NNCFGraph.OP_EXEC_CONTEXT_NODE_ATTR:
        OperationExecutionContext(op_name_to_set, Scope(), 0, [None])
    }
Beispiel #17
0
def get_mock_quantizer_id(key: str) -> WeightQuantizerId:
    scope_element = ScopeElement(key)
    scope = Scope([scope_element])
    return WeightQuantizerId(scope)
Beispiel #18
0
def replace_modules(model: nn.Module,
                    replace_fn,
                    affected_scopes,
                    ignored_scopes=None,
                    target_scopes=None,
                    memo=None,
                    current_scope=None,
                    eval_ops_exec_ctx_str: List[str] = None,
                    reset: bool = False):
    if memo is None:
        memo = set()
        current_scope = Scope()
        current_scope.push(ScopeElement(model.__class__.__name__))

    if model in memo:
        return model, affected_scopes

    memo.add(model)
    for name, module in model.named_children():
        if module is None:
            continue

        child_scope_element = ScopeElement(module.__class__.__name__, name)
        child_scope = current_scope.copy()
        child_scope.push(child_scope_element)
        replaced_module = replace_fn(module)

        if replaced_module is not None:
            replaced_scope_element = ScopeElement(
                replaced_module.__class__.__name__, name)
            replaced_scope = current_scope.copy()
            replaced_scope.push(replaced_scope_element)
            if module is not replaced_module:
                if in_scope_list(str(child_scope), ignored_scopes):
                    nncf_logger.info(
                        "Ignored wrapping modules specified in scope: {}".
                        format(child_scope))
                    continue
                if eval_ops_exec_ctx_str is None:
                    eval_ops_exec_ctx_str = []
                is_ignored = True
                for op_ctx_str in eval_ops_exec_ctx_str:
                    full_op_scope = Scope.from_str(op_ctx_str)
                    # child_scope isn't ignored, if there's at least a single operation or a module called in eval mode
                    # inside it
                    if full_op_scope in child_scope:
                        is_ignored = False
                        break
                if is_ignored and eval_ops_exec_ctx_str:
                    nncf_logger.info(
                        "Ignored wrapping modules not called in eval mode in scope: {}"
                        .format(child_scope))
                    continue

                if target_scopes is None or in_scope_list(
                        str(child_scope), target_scopes):
                    nncf_logger.info("Wrapping module {} by {}".format(
                        str(child_scope), str(replaced_scope)))
                    set_replaced_module_by_name(model, name, replaced_module)
                    affected_scopes.append(replaced_scope)
            elif is_nncf_module(replaced_module):
                # Got an NNCF-wrapped module from previous compression stage, track its scope as well
                affected_scopes.append(replaced_scope)
                if reset:
                    replaced_module.reset()
        _, affected_scopes = replace_modules(module,
                                             replace_fn,
                                             affected_scopes,
                                             ignored_scopes,
                                             target_scopes,
                                             memo,
                                             child_scope,
                                             eval_ops_exec_ctx_str,
                                             reset=reset)
    return model, affected_scopes
Beispiel #19
0
class TestInsertionCommands:
    @pytest.fixture()
    def setup(self):
        self.compressed_model = NNCFNetwork(
            InsertionPointTestModel(),
            [ModelInputInfo([1, 1, 10, 10])])  # type: NNCFNetwork

    conv1_module_scope = Scope.from_str(
        'InsertionPointTestModel/NNCFConv2d[conv1]')
    conv1_module_context = InputAgnosticOperationExecutionContext(
        '', conv1_module_scope, 0)
    point_for_conv1_weights = InsertionPoint(
        ia_op_exec_context=conv1_module_context,
        insertion_type=InsertionType.NNCF_MODULE_PRE_OP)
    point_for_conv1_inputs = InsertionPoint(
        ia_op_exec_context=conv1_module_context,
        insertion_type=InsertionType.NNCF_MODULE_PRE_OP)
    point_for_conv1_activations = InsertionPoint(
        ia_op_exec_context=conv1_module_context,
        insertion_type=InsertionType.NNCF_MODULE_POST_OP)

    conv2_module_scope = Scope.from_str(
        'InsertionPointTestModel/NNCFConv2d[conv2]')
    conv2_module_context = InputAgnosticOperationExecutionContext(
        '', conv2_module_scope, 0)
    point_for_conv2_weights = InsertionPoint(
        ia_op_exec_context=conv2_module_context,
        insertion_type=InsertionType.NNCF_MODULE_PRE_OP)
    point_for_conv2_inputs = InsertionPoint(
        ia_op_exec_context=conv2_module_context,
        insertion_type=InsertionType.NNCF_MODULE_PRE_OP)
    point_for_conv2_activations = InsertionPoint(
        ia_op_exec_context=conv2_module_context,
        insertion_type=InsertionType.NNCF_MODULE_POST_OP)

    linear_op_scope = Scope.from_str('InsertionPointTestModel/linear_0')
    linear_op_context = InputAgnosticOperationExecutionContext(
        'linear', linear_op_scope, 0)
    point_for_linear_weight_input = InsertionPoint(
        ia_op_exec_context=linear_op_context,
        insertion_type=InsertionType.OPERATOR_PRE_HOOK)
    point_for_linear_activation = InsertionPoint(
        ia_op_exec_context=linear_op_context,
        insertion_type=InsertionType.OPERATOR_POST_HOOK)

    relu_op_scope = Scope.from_str('InsertionPointTestModel/ReLU[relu]/relu')
    relu_op_context = InputAgnosticOperationExecutionContext(
        'relu', relu_op_scope, 0)
    point_for_relu_inputs = InsertionPoint(
        ia_op_exec_context=relu_op_context,
        insertion_type=InsertionType.OPERATOR_PRE_HOOK)
    point_for_relu_activations = InsertionPoint(
        ia_op_exec_context=relu_op_context,
        insertion_type=InsertionType.OPERATOR_POST_HOOK)

    available_points = [
        point_for_conv1_weights, point_for_conv2_weights,
        point_for_conv1_inputs, point_for_conv2_inputs,
        point_for_conv1_activations, point_for_conv2_activations,
        point_for_linear_activation, point_for_linear_weight_input,
        point_for_relu_activations, point_for_relu_inputs
    ]

    @pytest.mark.parametrize("insertion_point", available_points)
    def test_single_insertions(self, setup, insertion_point):
        if insertion_point.insertion_type in [
                InsertionType.OPERATOR_PRE_HOOK,
                InsertionType.OPERATOR_POST_HOOK
        ]:
            hook = lambda x: x
        else:
            hook = BaseOp(lambda x: x)

        command = InsertionCommand(insertion_point, hook)
        self.compressed_model.register_insertion_command(command)
        self.compressed_model.commit_compression_changes()

        #pylint:disable=protected-access
        if insertion_point.insertion_type == InsertionType.OPERATOR_PRE_HOOK:
            ctx = self.compressed_model.get_tracing_context()
            assert ctx._pre_hooks[
                command.insertion_point.ia_op_exec_context][0] is hook
        if insertion_point.insertion_type == InsertionType.OPERATOR_POST_HOOK:
            ctx = self.compressed_model.get_tracing_context()
            assert ctx._post_hooks[
                command.insertion_point.ia_op_exec_context][0] is hook
        if insertion_point.insertion_type == InsertionType.NNCF_MODULE_PRE_OP:
            module = self.compressed_model.get_module_by_scope(
                command.insertion_point.ia_op_exec_context.scope_in_model)
            assert module.pre_ops["0"] is hook

        if insertion_point.insertion_type == InsertionType.NNCF_MODULE_POST_OP:
            module = self.compressed_model.get_module_by_scope(
                command.insertion_point.ia_op_exec_context.scope_in_model)
            assert module.post_ops["0"] is hook

    priority_types = ["same", "different"]
    insertion_types = InsertionType
    priority_test_cases = list(
        itertools.product(priority_types, insertion_types))

    @staticmethod
    def check_order(iterable1: List, iterable2: List, ordering: List):
        for idx, order in enumerate(ordering):
            assert iterable1[idx] is iterable2[order]

    # pylint:disable=undefined-variable
    @pytest.mark.parametrize(
        "case",
        priority_test_cases,
        ids=[x[1].name + '-' + x[0] for x in priority_test_cases])
    def test_priority(self, case, setup):
        #pylint:disable=too-many-branches
        priority_type = case[0]
        insertion_type = case[1]
        if insertion_type in [
                InsertionType.NNCF_MODULE_PRE_OP,
                InsertionType.NNCF_MODULE_POST_OP
        ]:
            hook1 = BaseOp(lambda x: x)
            hook2 = BaseOp(lambda x: 2 * x)
            hook3 = BaseOp(lambda x: 3 * x)
        else:
            hook1 = lambda x: x
            hook2 = lambda x: 2 * x
            hook3 = lambda x: 3 * x

        if insertion_type == InsertionType.NNCF_MODULE_PRE_OP:
            point = self.point_for_conv2_weights
        elif insertion_type == InsertionType.NNCF_MODULE_POST_OP:
            point = self.point_for_conv1_activations
        elif insertion_type == InsertionType.OPERATOR_PRE_HOOK:
            point = self.point_for_linear_weight_input
        elif insertion_type == InsertionType.OPERATOR_POST_HOOK:
            point = self.point_for_relu_activations

        if priority_type == "same":
            # Same-priority commands will be executed in registration order
            command1 = InsertionCommand(point, hook1,
                                        OperationPriority.DEFAULT_PRIORITY)
            command2 = InsertionCommand(point, hook2,
                                        OperationPriority.DEFAULT_PRIORITY)
            command3 = InsertionCommand(point, hook3,
                                        OperationPriority.DEFAULT_PRIORITY)
        else:
            # Prioritized commands will be executed in ascending priority order
            command1 = InsertionCommand(
                point, hook1, OperationPriority.SPARSIFICATION_PRIORITY)
            command2 = InsertionCommand(
                point, hook2, OperationPriority.QUANTIZATION_PRIORITY)
            command3 = InsertionCommand(point, hook3,
                                        OperationPriority.DEFAULT_PRIORITY)

        self.compressed_model.register_insertion_command(command1)
        self.compressed_model.register_insertion_command(command2)
        self.compressed_model.register_insertion_command(command3)
        self.compressed_model.commit_compression_changes()

        hook_list = [hook1, hook2, hook3]

        if priority_type == "same":
            order = [0, 1, 2]
        elif priority_type == "different":
            order = [2, 0, 1]

        #pylint:disable=protected-access
        if insertion_type == InsertionType.OPERATOR_PRE_HOOK:
            ctx = self.compressed_model.get_tracing_context()
            self.check_order(ctx._pre_hooks[point.ia_op_exec_context],
                             hook_list, order)
        if insertion_type == InsertionType.OPERATOR_POST_HOOK:
            ctx = self.compressed_model.get_tracing_context()
            self.check_order(ctx._post_hooks[point.ia_op_exec_context],
                             hook_list, order)

        if insertion_type == InsertionType.NNCF_MODULE_PRE_OP:
            module = self.compressed_model.get_module_by_scope(
                point.ia_op_exec_context.scope_in_model)
            # Works because Pytorch ModuleDict is ordered
            self.check_order(list(module.pre_ops.values()), hook_list, order)

        if insertion_type == InsertionType.NNCF_MODULE_POST_OP:
            module = self.compressed_model.get_module_by_scope(
                point.ia_op_exec_context.scope_in_model)
            # Works because Pytorch ModuleDict is ordered
            self.check_order(list(module.post_ops.values()), hook_list, order)