def apply_insert_after(model):
    converter = TFModelConverterFactory.create(model)
    transformations = TFTransformationLayout()
    qconfig = QuantizerConfig(num_bits=8,
                              mode=QuantizationMode.SYMMETRIC,
                              signedness_to_force=None,
                              per_channel=False)

    functional_model = is_functional_model(model)
    for i, layer in enumerate(model.layers):
        original_node_name = layer.name

        if functional_model:
            _, layer_info = converter.get_layer_info_for_node(
                original_node_name)
            instance_idx = layer_info.instance_idx
        else:
            instance_idx = 0

        fake_quantize_name = f'FakeQuantize_{i}/{original_node_name}'
        fake_quantize_layer = FakeQuantize(TFQuantizerSpec.from_config(
            qconfig, narrow_range=False, half_range=False),
                                           name=fake_quantize_name)

        transformations.register(
            TFInsertionCommand(
                target_point=commands.TFAfterLayer(original_node_name,
                                                   instance_idx=instance_idx,
                                                   output_port_id=0),
                callable_object=fake_quantize_layer,
                priority=TransformationPriority.QUANTIZATION_PRIORITY))

    transformer = TFModelTransformer(model)
    transformed_model = transformer.transform(transformations)
    return transformed_model
def test_multiple_insertion_command_has_same_effect_as_multiple_single_insertions(
):
    check_fn = lambda src, dst: dst.type == TargetType.OPERATION_WITH_WEIGHTS

    insertion_command_1 = TFInsertionCommand(
        TFLayerWeight(CUSTOM_LAYER_NAME,
                      TwoWeightCustomLayerForTest.WEIGHT_1_NAME),
        MockIdentityOp('mock_nncf_op_1'),
        TransformationPriority.PRUNING_PRIORITY)
    insertion_command_2 = TFInsertionCommand(
        TFLayerWeight(CUSTOM_LAYER_NAME,
                      TwoWeightCustomLayerForTest.WEIGHT_2_NAME),
        MockIdentityOp('mock_nncf_op_2'),
        TransformationPriority.PRUNING_PRIORITY)
    multiple_insertion_command = TFMultipleInsertionCommands(
        target_point=TFLayer(CUSTOM_LAYER_NAME),
        commands=[insertion_command_1, insertion_command_2],
        check_target_points_fn=check_fn)

    transformation_layout_multi = TFTransformationLayout()
    transformation_layout_multi.register(multiple_insertion_command)
    transformation_layout_two_single = TFTransformationLayout()
    transformation_layout_two_single.register(insertion_command_1)
    transformation_layout_two_single.register(insertion_command_2)

    model_with_multi = create_transformed_model(transformation_layout_multi)
    model_with_two_single = create_transformed_model(
        transformation_layout_two_single)

    multi_config = model_with_multi.get_config()
    two_single_config = model_with_two_single.get_config()
    assert multi_config == two_single_config
Esempio n. 3
0
    def get_transformation_layout(self, model: tf.keras.Model) -> TFTransformationLayout:
        converter = TFModelConverterFactory.create(model)
        nncf_graph = converter.convert()
        transformations = TFTransformationLayout()

        processed_shared_layer_names = set()  # type: Set[str]

        for node in nncf_graph.get_all_nodes():
            if node.is_shared():
                target_layer_name, _ = get_original_name_and_instance_idx(node.node_name)
                if target_layer_name in processed_shared_layer_names:
                    continue
                processed_shared_layer_names.add(target_layer_name)

            if not (node.metatype in SPARSITY_LAYER_METATYPES and
                    should_consider_scope(node.node_name, ignored_scopes=self.ignored_scopes)):
                continue

            _, layer_info = converter.get_layer_info_for_node(node.node_name)
            for weight_def in node.metatype.weight_definitions:
                op_name = self._get_rb_sparsity_operation_name(node.node_name,
                                                               weight_def.weight_attr_name)
                self._op_names.append(op_name)

                transformations.register(
                    TFInsertionCommand(
                        target_point=TFLayerWeight(layer_info.layer_name, weight_def.weight_attr_name),
                        callable_object=RBSparsifyingWeight(op_name),
                        priority=TransformationPriority.SPARSIFICATION_PRIORITY
                    ))

        return transformations
Esempio n. 4
0
    def get_transformation_layout(
            self, model: tf.keras.Model) -> TFTransformationLayout:
        converter = TFModelConverterFactory.create(model)
        nncf_graph = converter.convert()
        transformations = TFTransformationLayout()

        processed_shared_layer_names = set()  # type: Set[str]

        for node in nncf_graph.get_all_nodes():
            if node.is_shared():
                target_layer_name, _ = get_original_name_and_instance_idx(
                    node.node_name)
                if target_layer_name in processed_shared_layer_names:
                    continue
                processed_shared_layer_names.add(target_layer_name)

            if not should_consider_scope(node.node_name,
                                         ignored_scopes=self.ignored_scopes):
                continue

            if node.metatype in OUTPUT_NOOP_METATYPES:
                continue

            is_custom, layer_info = converter.get_layer_info_for_node(
                node.node_name)
            if node.metatype in SPARSITY_LAYER_METATYPES:
                # Processing a regular weighted node
                for weight_def in node.metatype.weight_definitions:
                    op_name = self._get_sparsity_operation_name(
                        node.node_name, weight_def.weight_attr_name)
                    self._op_names.append(op_name)

                    transformations.register(
                        TFInsertionCommand(target_point=TFLayerWeight(
                            layer_info.layer_name,
                            weight_def.weight_attr_name),
                                           callable_object=BinaryMask(op_name),
                                           priority=TransformationPriority.
                                           SPARSIFICATION_PRIORITY))
            elif node.metatype in WEIGHTABLE_TF_OP_METATYPES:
                assert is_custom
                # Processing a custom layer weighted node
                # Caution: here layer_name will refer to the weight itself, not to the op
                weight_attr_name = node.layer_name
                op_name = self._get_sparsity_operation_name(
                    node.node_name, weight_attr_name)
                self._op_names.append(op_name)

                transformations.register(
                    TFInsertionCommand(
                        target_point=TFLayerWeight(layer_info.layer_name,
                                                   weight_attr_name),
                        callable_object=BinaryMaskWithWeightsBackup(
                            op_name, weight_attr_name),
                        priority=TransformationPriority.SPARSIFICATION_PRIORITY
                    ))

        return transformations
Esempio n. 5
0
 def get_transformation_layout(
         self, model: tf.keras.Model) -> TFTransformationLayout:
     transformations = TFTransformationLayout()
     if self._quantizer_setup is None:
         self._quantizer_setup = self._get_quantizer_setup(model)
     insertion_commands = self._build_insertion_commands_for_quantizer_setup(
         self._quantizer_setup)
     for command in insertion_commands:
         transformations.register(command)
     return transformations
def test_transformation_layout_removal_case():
    transformation_layout = TFTransformationLayout()

    command_list = [
        commands.TFInsertionCommand(
            commands.TFLayerWeight('layer_0',
                                   'weight_0'), lambda: 'sparsity_operation',
            TransformationPriority.SPARSIFICATION_PRIORITY),
        commands.TFRemovalCommand(
            commands.TFOperationWithWeights('layer_0', 'weight_0',
                                            'sparsity_operation')),
        commands.TFInsertionCommand(commands.TFAfterLayer('layer_0'),
                                    lambda: 'layer_1'),
        commands.TFRemovalCommand(commands.TFLayer('layer_1')),
        commands.TFInsertionCommand(
            commands.TFLayerWeight('layer_0',
                                   'weight_0'), lambda: 'pruning_operation',
            TransformationPriority.PRUNING_PRIORITY)
    ]

    for cmd in command_list:
        transformation_layout.register(cmd)

    res_transformations = transformation_layout.transformations
    assert len(res_transformations) == 5
    assert res_transformations[0].type == TransformationType.INSERT
    assert res_transformations[
        0].target_point.type == TargetType.OPERATION_WITH_WEIGHTS
    assert res_transformations[0].target_point.layer_name == 'layer_0'
    assert res_transformations[0].target_point.weights_attr_name == 'weight_0'

    assert res_transformations[1].type == TransformationType.REMOVE
    assert res_transformations[
        1].target_point.type == TargetType.OPERATION_WITH_WEIGHTS
    assert res_transformations[1].target_point.layer_name == 'layer_0'
    assert res_transformations[1].target_point.weights_attr_name == 'weight_0'
    assert res_transformations[
        1].target_point.operation_name == 'sparsity_operation'

    assert res_transformations[2].type == TransformationType.INSERT
    assert res_transformations[2].target_point.type == TargetType.AFTER_LAYER
    assert res_transformations[2].target_point.layer_name == 'layer_0'

    assert res_transformations[3].type == TransformationType.REMOVE
    assert res_transformations[3].target_point.type == TargetType.LAYER
    assert res_transformations[3].target_point.layer_name == 'layer_1'

    assert res_transformations[4].type == TransformationType.INSERT
    assert res_transformations[
        4].target_point.type == TargetType.OPERATION_WITH_WEIGHTS
    assert res_transformations[4].target_point.layer_name == 'layer_0'
    assert res_transformations[4].target_point.weights_attr_name == 'weight_0'
Esempio n. 7
0
def strip_model_from_masks(model: tf.keras.Model,
                           op_names: List[str]) -> tf.keras.Model:
    if not isinstance(model, tf.keras.Model):
        raise ValueError(
            f'Expected model to be a `tf.keras.Model` instance but got: {type(model)}'
        )

    transformations = TFTransformationLayout()
    for wrapped_layer, weight_attr, op in get_nncf_operations(model, op_names):
        apply_mask(wrapped_layer, weight_attr, op)
        transformations.register(
            TFRemovalCommand(target_point=TFOperationWithWeights(
                wrapped_layer.name,
                weights_attr_name=weight_attr,
                operation_name=op.name)))

    return TFModelTransformer(model).transform(transformations)
def apply_insert_before(model):
    converter = TFModelConverterFactory.create(model)

    transformations = TFTransformationLayout()
    qconfig = QuantizerConfig(num_bits=8,
                              mode=QuantizationMode.SYMMETRIC,
                              signedness_to_force=None,
                              per_channel=False)

    functional_model = is_functional_model(model)
    for i, layer in enumerate(model.layers):
        # Insertion before input layer is not supported
        if isinstance(layer, layers.InputLayer):
            continue

        original_node_name = layer.name
        if functional_model:
            _, layer_info = converter.get_layer_info_for_node(
                original_node_name)
            instance_idx = layer_info.instance_idx
        else:
            instance_idx = 0

        inputs = [layer.input] if isinstance(layer.input,
                                             tf.Tensor) else layer.input

        for port, _ in enumerate(inputs):
            fake_quantize_name = f'FakeQuantize_{i}.{port}/{original_node_name}'
            fake_quantize_layer = FakeQuantize(TFQuantizerSpec.from_config(
                qconfig, narrow_range=False, half_range=False),
                                               name=fake_quantize_name)

            transformations.register(
                TFInsertionCommand(
                    target_point=commands.TFBeforeLayer(
                        original_node_name,
                        instance_idx=instance_idx,
                        input_port_id=port),
                    callable_object=fake_quantize_layer,
                    priority=TransformationPriority.QUANTIZATION_PRIORITY))

    transformer = TFModelTransformer(model)
    transformed_model = transformer.transform(transformations)
    return transformed_model
Esempio n. 9
0
    def get_transformation_layout(self, model: tf.keras.Model) -> TFTransformationLayout:
        """
        Computes necessary model transformations (pruning mask insertions) to enable pruning.

        :param model: The original uncompressed model.
        :return: The instance of the `TransformationLayout` class containing
            a list of pruning mask insertions.
        """
        converter = TFModelConverterFactory.create(model)
        self._graph = converter.convert()
        groups_of_nodes_to_prune = self._pruning_node_selector.create_pruning_groups(self._graph)

        transformations = TFTransformationLayout()
        shared_layers = set()

        self._pruned_layer_groups_info = Clusterization[PrunedLayerInfo](lambda x: x.layer_name)

        for i, group in enumerate(groups_of_nodes_to_prune.get_all_clusters()):
            group_minfos = []
            for node in group.elements:
                layer_name = get_layer_identifier(node)
                layer = model.get_layer(layer_name)
                group_minfos.append(PrunedLayerInfo(node.node_name, layer_name, node.node_id,
                                                    is_prunable_depthwise_conv(node)))

                # Add output_mask to elements to run mask_propagation
                # and detect spec_nodes that will be pruned.
                # It should be done for all elements of shared layer.
                node.data['output_mask'] = TFNNCFTensor(tf.ones(node.layer_attributes.out_channels))
                if layer_name in shared_layers:
                    continue
                if node.is_shared():
                    shared_layers.add(layer_name)
                # Check that we need to prune weights in this op
                assert self._is_pruned_layer(layer)
                nncf_logger.info('Adding Weight Pruner in: %s', layer_name)

                _, layer_info = converter.get_layer_info_for_node(node.node_name)
                for weight_def in node.metatype.weight_definitions:
                    transformations.register(
                        self._get_insertion_command_binary_mask(
                            layer_info.layer_name, weight_def.weight_attr_name)
                    )
                if node.metatype.bias_attr_name is not None and \
                        getattr(layer, node.metatype.bias_attr_name) is not None:
                    transformations.register(
                        self._get_insertion_command_binary_mask(
                            layer_info.layer_name, node.metatype.bias_attr_name)
                    )

            cluster = Cluster[PrunedLayerInfo](i, group_minfos, [n.node_id for n in group.elements])
            self._pruned_layer_groups_info.add_cluster(cluster)

        # Propagating masks across the graph to detect spec_nodes that will be pruned
        mask_propagator = MaskPropagationAlgorithm(self._graph, TF_PRUNING_OPERATOR_METATYPES,
                                                   TFNNCFPruningTensorProcessor)
        mask_propagator.mask_propagation()

        # Add masks for all spec modules, because prunable batchnorm layers can be determined
        # at the moment of mask propagation
        types_spec_layers = [TFBatchNormalizationLayerMetatype] \
            if self._prune_batch_norms else []

        spec_nodes = self._graph.get_nodes_by_metatypes(types_spec_layers)
        for spec_node in spec_nodes:
            layer_name = get_layer_identifier(spec_node)
            layer = model.get_layer(layer_name)
            if spec_node.data['output_mask'] is None:
                # Skip elements that will not be pruned
                continue
            if layer_name in shared_layers:
                continue
            if spec_node.is_shared():
                shared_layers.add(layer_name)
            nncf_logger.info('Adding Weight Pruner in: %s', layer_name)

            _, layer_info = converter.get_layer_info_for_node(spec_node.node_name)
            for weight_def in spec_node.metatype.weight_definitions:
                if spec_node.metatype is TFBatchNormalizationLayerMetatype \
                        and not layer.scale and weight_def.weight_attr_name == 'gamma':
                    nncf_logger.debug('Fused gamma parameter encountered in BatchNormalization layer. '
                                      'Do not add mask to it.')
                    continue

                transformations.register(
                    self._get_insertion_command_binary_mask(
                        layer_info.layer_name, weight_def.weight_attr_name)
                )
            transformations.register(
                self._get_insertion_command_binary_mask(
                    layer_info.layer_name, spec_node.metatype.bias_attr_name)
            )
        return transformations
Esempio n. 10
0
def test_transformation_layout_insertion_case():
    transformation_layout = TFTransformationLayout()

    check_fn = lambda src, dst: \
        dst.type == TargetType.OPERATION_WITH_WEIGHTS and \
        src.layer_name == dst.layer_name

    command_list = [
        commands.TFInsertionCommand(
            commands.TFLayerWeight('layer_0', 'weight_0'), lambda: 'cmd_0',
            TransformationPriority.SPARSIFICATION_PRIORITY),
        commands.TFInsertionCommand(
            commands.TFLayerWeight('layer_0', 'weight_1'), lambda: 'cmd_1',
            TransformationPriority.SPARSIFICATION_PRIORITY),
        commands.TFInsertionCommand(
            commands.TFLayerWeight('layer_1', 'weight_0'), lambda: 'cmd_2',
            TransformationPriority.SPARSIFICATION_PRIORITY),
        commands.TFMultipleInsertionCommands(
            target_point=commands.TFLayer('layer_0'),
            check_target_points_fn=check_fn,
            commands=[
                commands.TFInsertionCommand(
                    commands.TFLayerWeight('layer_0', 'weight_0'),
                    lambda: 'cmd_3', TransformationPriority.PRUNING_PRIORITY)
            ]),
        commands.TFMultipleInsertionCommands(
            target_point=commands.TFLayer('layer_1'),
            check_target_points_fn=check_fn,
            commands=[
                commands.TFInsertionCommand(
                    commands.TFLayerWeight('layer_1', 'weight_0'),
                    lambda: 'cmd_4', TransformationPriority.PRUNING_PRIORITY),
                commands.TFInsertionCommand(
                    commands.TFLayerWeight('layer_1', 'weight_1'),
                    lambda: 'cmd_5', TransformationPriority.PRUNING_PRIORITY)
            ]),
    ]

    for cmd in command_list:
        transformation_layout.register(cmd)

    res_transformations = transformation_layout.transformations
    assert len(res_transformations) == 2
    assert res_transformations[0].type == TransformationType.MULTI_INSERT
    assert res_transformations[0].target_point.type == TargetType.LAYER
    assert res_transformations[0].target_point.layer_name == 'layer_0'
    assert res_transformations[1].type == TransformationType.MULTI_INSERT
    assert res_transformations[1].target_point.type == TargetType.LAYER
    assert res_transformations[1].target_point.layer_name == 'layer_1'

    res_cmds = res_transformations[0].commands
    assert len(res_cmds) == 2

    res = res_cmds[0].insertion_objects
    assert len(res) == 2
    assert res[0]() == 'cmd_3' and res[1]() == 'cmd_0'

    res = res_cmds[1].insertion_objects
    assert len(res) == 1
    assert res[0]() == 'cmd_1'

    res_cmds = res_transformations[1].commands
    assert len(res_cmds) == 2

    res = res_cmds[0].insertion_objects
    assert len(res) == 2
    assert res[0]() == 'cmd_4' and res[1]() == 'cmd_2'

    res = res_cmds[1].insertion_objects
    assert len(res) == 1
    assert res[0]() == 'cmd_5'