Esempio n. 1
0
def get_nodes_by_type(graph: Graph,
                      types: list,
                      recursively: bool = False) -> list:
    """ Returns all nodes with type from types collection
     :param graph: NetworkX model to collect nodes
     :param types: list of required types
     :param recursively: whether return all nodes from the graph
     and each subgraph or only from the main graph
     :return list of nodes filtered by 'types' collection
      """
    def get_nodes_by_type_from_main_graph(graph, types):
        return [node for t in types for node in graph.get_op_nodes(type=t)]

    if recursively:
        partial_get_nodes_by_type = partial(get_nodes_by_type_from_main_graph,
                                            types=types)
        get_nodes_by_type_recursively = FunctionResultsAccumulator(
            partial_get_nodes_by_type)
        for_graph_and_each_sub_graph_recursively(
            graph, get_nodes_by_type_recursively)
        nodes = [
            node for node in get_nodes_by_type_recursively.results if node
        ]
    else:
        nodes = get_nodes_by_type_from_main_graph(graph, types)
    return nodes
Esempio n. 2
0
def get_node_by_name(graph: Graph,
                     name: str,
                     recursively: bool = False) -> Node:
    """ Returns node by name
    :param graph: NetworkX model to take node
    :param name: name of the node
    :param recursively: whether return all nodes from the graph
    and each subgraph or only from the external graph
    :return node from NetworkX model (of type Node or None if there's no such node)
    """
    if recursively:

        def get_node_by_fullname(graph: Graph, name: str) -> Node:
            nodes = graph.get_nodes_with_attributes(
                **dict(kind='op', fullname=name))
            return [Node(graph, nodes[0])] if nodes else None

        partial_get_node_by_fullname = partial(get_node_by_fullname, name=name)
        get_node_by_fullname_func = FunctionResultsAccumulator(
            partial_get_node_by_fullname)
        for_graph_and_each_sub_graph_recursively(graph,
                                                 get_node_by_fullname_func)
        node = get_node_by_fullname_func.results
    else:
        node = graph.get_op_nodes(name=name)

    return node[0] if node else None
Esempio n. 3
0
def save_restored_graph(graph: Graph, path: str, meta_data, name=None):
    """
    Function to apply all necessary transforms from back stage to prepare and save restored graph and metadata.
    :param graph: Graph to save
    :param path: Path to saved IR
    :param meta_data: Namespace with converting parameters restored from IR
    :param name: Name for saved IR
    :return:
    """

    if name is None:
        name = graph.name

    if 'data_type' not in meta_data:
        log.debug(
            'Provided `meta_data` does not contain `data_type` parameter. Set `data_type`'
            ' parameter value to `FP32`.')
        # Set data_type to FP32. All restored constants will be saved in provided data type.
        data_type = 'FP32'

        # We need to specify this attribute to pass graph transformations. This information will not be saved into IR.
        # All constants and placeholders will be saved with same types as restored from IR
        graph.graph['cmd_params'].data_type = data_type
    else:
        data_type = data_type_str_to_precision(
            graph.graph['cmd_params'].data_type)

    assert data_type in ['FP16', 'FP32'], '`data_type` value {} is not supported by MO,' \
                                          ' cannot save graph'.format(data_type)

    # List items order matters, do not change it.
    transformation_list = [
        ConvolutionWithGroupsResolver,
        StridedSliceMasksNormalizer,
        PackBinaryWeights,
        BlobNormalizer,
        ConvolutionNormalizer,
        MarkNodesWithShapeValues,
    ]

    # We need to run some specific passes from MO back stage.
    apply_replacements_list(graph, transformation_list)

    # Transformations with enabled=False should be run manually.
    for_graph_and_each_sub_graph_recursively(
        graph,
        RemoveConstOps().find_and_replace_pattern)
    for_graph_and_each_sub_graph_recursively(
        graph,
        CreateConstNodesReplacement().find_and_replace_pattern)

    prepare_emit_ir(graph,
                    data_type,
                    path,
                    name,
                    meta_info=meta_data,
                    used_by_ir_reader=True)
Esempio n. 4
0
def get_all_operation_nodes(graph: Graph, recursively: bool = False):
    """ Returns sequence of all nodes in graph
    :param graph: NetworkX model to take nodes
    :param recursively: whether return all nodes from the graph
    and each subgraph or only from the external graph
    :return list of all nodes
    """
    if recursively:
        get_all_op_nodes_func = FunctionResultsAccumulator(
            lambda graph: graph.get_op_nodes())
        for_graph_and_each_sub_graph_recursively(graph, get_all_op_nodes_func)
        return get_all_op_nodes_func.results

    return graph.get_op_nodes()
Esempio n. 5
0
def send_op_names_info(framework: str, graph: Graph):
    """
    This function sends information about operations in model.
    :param framework: framework name.
    :param graph: model graph.
    """
    op_counter = Counter()

    def gather_op_statistics(g: Graph, op_c: Counter = op_counter):
        if hasattr(g, 'op_names_statistic'):
            op_c += g.op_names_statistic

    for_graph_and_each_sub_graph_recursively(graph, gather_op_statistics)

    t = tm.Telemetry()
    for op_name in op_counter:
        t.send_event('mo', 'op_count', "{}_{}".format(framework, op_name),
                     op_counter[op_name])
Esempio n. 6
0
def prepare_emit_ir(graph: Graph, data_type: str, output_dir: str, output_model_name: str,
                    mean_data: [list, None] = None, input_names: list = None, meta_info: dict = None,
                    use_temporary_path=False, used_by_ir_reader=False):
    if input_names is None:
        input_names = []
    if meta_info is None:
        meta_info = {}
    graph.strict_mode = False

    # temporary disable new FP16 generation
    # if not used_by_ir_reader:
    if True:
        # convert Parameter data types
        convert_data_type.convert_parameters_data_type(graph, data_type)
        # convert blobs (usually weights and biases)
        for sub_graph in [graph] + collect_sub_graphs(graph):
            convert_data_type.convert_blobs(sub_graph, data_type)

    # restore data type for specific inputs/outputs of specific ops to the data types required by nGraph
    for_graph_and_each_sub_graph_recursively(graph, convert_inputs_of_specific_ops)

    for_graph_and_each_sub_graph_recursively(graph, OpVersioning().find_and_replace_pattern)

    # do not run the type inference in sub-graphs. It will be called automatically as part of the type inference of
    # the TensorIterator nodes
    type_infer(graph)

    for_graph_and_each_sub_graph_recursively(graph, RemoveUselessConvert().find_and_replace_pattern)

    ResultRename().find_and_replace_pattern(graph)

    for sub_graph in [graph] + collect_sub_graphs(graph):
        op_order, data_order = determined_sort(get_sorted_outputs(sub_graph))
        mapping = {v: u for u, v in enumerate(op_order)}
        mapping.update({v: u for u, v in enumerate(data_order, start=len(sub_graph))})
        relabel_nodes_inplace_safe(sub_graph, mapping)
        port_renumber(sub_graph)

    tensor_names.propagate_op_name_to_tensor(graph)

    ir_path_suffix = "_tmp" if use_temporary_path else ""

    bin_file = os.path.join(output_dir, '{}{}.bin'.format(output_model_name, ir_path_suffix))
    serialize_constants(graph, bin_file)

    mean_offset = None
    mean_size = None
    if mean_data:
        mean_offset, mean_size = serialize_mean_image(bin_file, mean_data=mean_data)

    generate_ie_ir(graph=graph,
                   file_name=os.path.join(output_dir, '{}{}.xml'.format(output_model_name, ir_path_suffix)),
                   input_names=input_names,
                   mean_offset=mean_offset,
                   mean_size=mean_size,
                   meta_info=meta_info)
    tensor_names.output_tensor_names_map(graph, os.path.join(output_dir, '{}{}.mapping'.format(output_model_name, ir_path_suffix)))
Esempio n. 7
0
    def find_and_replace_pattern(self, graph: Graph):
        fw = graph.graph['fw']
        argv = graph.graph['cmd_params']
        layout = graph.graph['layout']

        for_graph_and_each_sub_graph_recursively(graph, fuse_pad)
        for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up())

        # Mark nodes with attr 'can_be_fused': False to disable fusing for specified nodes
        for_graph_and_each_sub_graph_recursively(
            graph,
            lambda graph: mark_unfused_nodes(graph, argv.finegrain_fusing))

        # Converting FusedBatchNorm layer to Mul->Add->Mul->Add sequence
        # IE doesn't support batchNormInference with 4 inputs, so we have to split it to two ScaleShift
        for_graph_and_each_sub_graph_recursively(graph, convert_batch_norm)

        if fw == 'caffe':
            # Converting ScaleShift layer to Mul->Add
            for_graph_and_each_sub_graph_recursively(
                graph, convert_scale_shift_to_mul_add)

        for_graph_and_each_sub_graph_recursively(
            graph,
            Div().find_and_replace_pattern)
        for_graph_and_each_sub_graph_recursively(
            graph,
            Sub().find_and_replace_pattern)
        for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up())

        if not argv.disable_fusing:
            if fw != 'caffe':
                # Converting ScaleShift layer to Mul->Add
                for_graph_and_each_sub_graph_recursively(
                    graph, convert_scale_shift_to_mul_add)
                for_graph_and_each_sub_graph_recursively(
                    graph, lambda G: G.clean_up())

            # Fusing the sequences of Mul/Add operations
            for_graph_and_each_sub_graph_recursively(graph,
                                                     fuse_mul_add_sequence)
            for_graph_and_each_sub_graph_recursively(graph,
                                                     lambda G: G.clean_up())

            normalize_eltwise_inputs(graph)
            for_graph_and_each_sub_graph_recursively(graph,
                                                     lambda G: G.clean_up())

            # Fusing linear operation to Convolution
            for_graph_and_each_sub_graph_recursively(graph, fuse_linear_ops)
            for_graph_and_each_sub_graph_recursively(graph,
                                                     lambda G: G.clean_up())

        if not argv.disable_gfusing:
            for_graph_and_each_sub_graph_recursively(
                graph, grouped_convolutions_fusing)
            for_graph_and_each_sub_graph_recursively(graph,
                                                     lambda G: G.clean_up())
            if not argv.disable_fusing:
                for_graph_and_each_sub_graph_recursively(
                    graph, fuse_linear_ops)
                for_graph_and_each_sub_graph_recursively(
                    graph, lambda G: G.clean_up())

        for_graph_and_each_sub_graph_recursively(graph,
                                                 normalize_eltwise_inputs)
        for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up())

        if not argv.disable_fusing:
            MarkNodesToFuseUpToFakeQuantize().find_and_replace_pattern(graph)
            FakeQuantizeFuse().find_and_replace_pattern(graph)
            AddFakeQuantizeFuse().find_and_replace_pattern(graph)
            MulFakeQuantizeFuse().find_and_replace_pattern(graph)
            for_graph_and_each_sub_graph_recursively(graph,
                                                     lambda G: G.clean_up())

        for_graph_and_each_sub_graph_recursively(graph, fuse_pad)
        for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up())

        if layout != 'NHWC' and not argv.disable_resnet_optimization:
            stride_optimization(graph)
Esempio n. 8
0
def apply_transform(graph: Graph, replacer_cls, **kwargs):
    """
    Safely executes transform if it should be and validates graph after transform execution
    """
    replacer = replacer_cls()
    replacement_id = 'REPLACEMENT_ID'
    if hasattr(replacer, 'replacement_id'):
        replacement_id = replacer.replacement_id

    if hasattr(replacer, 'enabled') and not replacer.enabled:
        log.info("Skip replacer {} (enabled = False)".format(replacer_cls))
        return

    if hasattr(replacer, 'graph_condition') and \
            not all([condition(graph) for condition in replacer.graph_condition]):
        log.info("Skip replacer {} (graph_condition not satisfied)".format(
            replacer_cls))
        return

    log.debug("Run replacer {}".format(replacer_cls))

    try:
        if hasattr(replacer,
                   'run_not_recursively') and replacer.run_not_recursively:
            replacer.find_and_replace_pattern(graph)
        else:
            for_graph_and_each_sub_graph_recursively(
                graph, replacer.find_and_replace_pattern)

        if hasattr(replacer, 'force_clean_up') and replacer.force_clean_up:
            for_graph_and_each_sub_graph_recursively(graph,
                                                     lambda G: G.clean_up())

        if hasattr(replacer,
                   'force_shape_inference') and replacer.force_shape_inference:
            shape_inference(graph)

        if hasattr(replacer,
                   'run_not_recursively') and replacer.run_not_recursively:
            graph.check_empty_graph(replacer_cls)
            graph.check_shapes_consistency()
        else:
            for_graph_and_each_sub_graph_recursively(
                graph, lambda _: graph.check_empty_graph(replacer_cls))
            for_graph_and_each_sub_graph_recursively(
                graph, lambda _: graph.check_shapes_consistency())

    except Error as err:
        raise Error(
            'Exception occurred during running replacer "{}" ({}): {}'.format(
                replacement_id,
                replacer_cls,
                str(err).replace('[REPLACEMENT_ID]', replacement_id),
            )) from err
    except FrameworkError as err:
        raise FrameworkError('{}'.format(str(err))) from err
    except Exception as err:
        raise Exception(
            'Exception occurred during running replacer "{} ({})": {}'.format(
                replacement_id,
                replacer_cls,
                str(err).replace('[REPLACEMENT_ID]', replacement_id),
            )) from err
Esempio n. 9
0
    def load(self, graph: Graph):
        argv = graph.graph['cmd_params']
        if argv.tensorflow_custom_layer_libraries:
            libraries = argv.tensorflow_custom_layer_libraries.split(',')
            for library in libraries:
                log.info('Loading library "{}" with custom operations'.format(
                    library))
                tf_v1.load_op_library(library)

        graph_def, variables_values, framework, inputs_outputs_order = load_tf_graph_def(
            graph_file_name=argv.input_model,
            is_binary=not argv.input_model_is_text,
            checkpoint=argv.input_checkpoint,
            user_output_node_names_list=argv.output,
            model_dir=argv.saved_model_dir,
            meta_graph_file=argv.input_meta_graph,
            saved_model_tags=argv.saved_model_tags)

        if inputs_outputs_order is not None and isinstance(
                inputs_outputs_order, tuple):
            graph.inputs_order = inputs_outputs_order[0]
            graph.outputs_order = inputs_outputs_order[1]

        send_framework_info(framework)

        try:
            tf_v1.import_graph_def(graph_def, name='')
        except:
            log.warning(
                "TensorFlow post-processing of loaded model was unsuccessful. "
                "This is an optional step that Model Optimizer performs for any input model but it is not usually "
                "required for all models. "
                "It likely means that the original model is ill-formed. "
                "Model Optimizer will continue converting this model.")

        log.debug("Number of nodes in graph_def: {}".format(len(
            graph_def.node)))  # pylint: disable=no-member

        if argv.tensorboard_logdir:
            tensorboard_util.dump_for_tensorboard(graph_def,
                                                  argv.tensorboard_logdir)

        update_extractors_with_extensions(tf_op_extractors)

        try:
            protobuf2nx(graph, graph_def)
        except Exception as e:
            raise Error(
                'Cannot pre-process TensorFlow graph after reading from model file "{}". ' \
                'File is corrupt or has unsupported format. Details: {}. ' +
                refer_to_faq_msg(44),
                argv.model_name,
                str(e)
            ) from e

        graph.__setattr__('name', argv.model_name)
        # 'layout' parameter change may cause an issue in EltwiseInputReshape replacer
        # and convert_nhwc_to_nchw(graph)
        graph.graph['layout'] = 'NCHW' if argv.disable_nhwc_to_nchw else 'NHWC'
        graph.graph['fw'] = 'tf'

        graph.graph['variables_values'] = variables_values
        del variables_values

        used_tensors = restore_edges(graph, get_tf_edges)

        # Tensor names information corresponding to a node is stored on outgoing edges.
        # As output nodes do not have outgoing edges, fake outputs are required. In the following code
        # for each output Identity node is added, and tensor name for the output is kept
        # on (output, fake output) edge. After Result nodes adding transformation fake outputs
        # are deleted from graph.
        add_outputs_identity(
            graph, graph.nodes - used_tensors,
            lambda g, output, fake_node_name: g.add_edges_from(
                [create_tf_edge(output, fake_node_name, 0)]))

        remove_control_dependency_inputs(graph)

        graph.check_empty_graph(
            'protobuf2nx. It may happen due to problems with loaded model')
        extract_node_attrs(
            graph, lambda node: tf_op_extractor(
                node, check_for_duplicates(tf_op_extractors)))

        # try to detect layout from the nodes of the graph. If there are no convolution nodes in N(D)HWC layout then we
        # consider that the graph is in NCHW layout and no layout conversion should be performed
        if not argv.disable_nhwc_to_nchw and not graph_or_sub_graph_has_nhwc_ops(
                graph):
            if not argv.silent:
                log.debug('disable_nhwc_to_nchw" was automatically enabled.')
            for_graph_and_each_sub_graph_recursively(
                graph, update_cmd_params_and_layout)

        send_op_names_info(framework, graph)
        send_shapes_info(framework, graph)
Esempio n. 10
0
def emit_ir(graph: Graph, argv: argparse.Namespace):
    NormalizeTI().find_and_replace_pattern(graph)
    for_graph_and_each_sub_graph_recursively(
        graph,
        RemoveConstOps().find_and_replace_pattern)
    for_graph_and_each_sub_graph_recursively(
        graph,
        CreateConstNodesReplacement().find_and_replace_pattern)

    if 'feManager' in argv:
        del argv.feManager

    mean_data = deepcopy(graph.graph['mf']) if 'mf' in graph.graph else None
    input_names = deepcopy(
        graph.graph['input_names']) if 'input_names' in graph.graph else []

    prepare_emit_ir(graph=graph,
                    data_type=graph.graph['cmd_params'].data_type,
                    output_dir=argv.output_dir,
                    output_model_name=argv.model_name,
                    mean_data=mean_data,
                    input_names=input_names,
                    meta_info=get_meta_info(argv),
                    use_temporary_path=True)

    # This graph cleanup is required to avoid double memory consumption
    graph.clear()

    if not (argv.framework == 'tf'
            and argv.tensorflow_custom_operations_config_update):
        output_dir = argv.output_dir if argv.output_dir != '.' else os.getcwd()
        orig_model_name = os.path.normpath(
            os.path.join(output_dir, argv.model_name))

        return_code = "not executed"
        try:
            if not argv.legacy_ir_generation:
                from openvino.tools.mo.back.offline_transformations import apply_offline_transformations
                apply_offline_transformations(orig_model_name, argv)
                if "compress_fp16" in argv and argv.compress_fp16:
                    # restore data_type cmd parameter
                    argv.data_type = 'FP16'
                return_code = 0
        except Exception as e:
            return_code = "failed"
            log.error(e)

        message = str(
            dict({
                "platform": platform.system(),
                "mo_version": get_simplified_mo_version(),
                "ie_version": get_simplified_ie_version(env=os.environ),
                "python_version": sys.version,
                "return_code": return_code
            }))
        t = tm.Telemetry()
        t.send_event('mo', 'offline_transformations_status', message)

        if return_code != 0:
            raise Error("offline transformations step has failed.")

        for suf in [".xml", ".bin", ".mapping"]:
            # remove existing files
            path_to_file = orig_model_name + "_tmp" + suf
            if os.path.exists(path_to_file):
                os.remove(path_to_file)

        # add meta information to IR
        append_ir_info(file=orig_model_name,
                       meta_info=get_meta_info(argv),
                       mean_data=mean_data,
                       input_names=input_names)

        print('[ SUCCESS ] Generated IR version {} model.'.format(
            get_ir_version(argv)))
        print('[ SUCCESS ] XML file: {}.xml'.format(orig_model_name))
        print('[ SUCCESS ] BIN file: {}.bin'.format(orig_model_name))

    return 0
Esempio n. 11
0
    def test_pad_fusing(self):
        nodes = {
            **shaped_parameter('input', shape_array([1, 3, 248, 248])),
            **valued_const_with_data('pads_begin', shape_array([0, 0, 1, 1])),
            **valued_const_with_data('pads_end', shape_array([0, 0, 1, 1])),
            **valued_const_with_data('fill_value', shape_array(0.0)),
            **valued_const_with_data('weights',
                                     shape_array(np.zeros([3, 16, 4, 4]))),
            **regular_op_with_empty_data('pad', {
                'type': 'Pad',
                'op': 'Pad',
                'infer': Pad.infer,
                'mode': 'constant'
            }),
            **regular_op_with_empty_data(
                'conv',
                {
                    'type': 'Convolution',
                    'op': 'Convolution',
                    'infer': Convolution.infer,
                    # zeros, no paddings
                    'pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]]),
                    'dilation': np.array([1, 1, 1, 1]),
                    'stride': np.array([1, 1, 1, 1]),
                    'group': 1,
                    'kernel_spatial_idx': np.array([2, 3]),
                    'output': 64,
                    'spatial_dims': np.array([2, 3]),
                    'channel_dims': np.array([1]),
                    'batch_dims': np.array([0]),
                    'input_feature_channel': 1,
                    'output_feature_channel': 0
                }),
            **result(),
        }

        graph = build_graph(nodes_attrs=nodes,
                            edges=[
                                *connect('input', '0:pad'),
                                *connect('pads_begin', '1:pad'),
                                *connect('pads_end', '2:pad'),
                                *connect('fill_value', '3:pad'),
                                *connect('pad', '0:conv'),
                                *connect('weights', '1:conv'),
                                *connect('conv', 'output'),
                            ],
                            nodes_with_edges_only=True)

        graph.graph['layout'] = 'NCHW'
        graph.stage = 'middle'

        graph = partial_infer(graph)
        mark_shape_of_sugraph_as_unfusable(graph)
        for_graph_and_each_sub_graph_recursively(graph, fuse_pad)
        graph.clean_up()

        conv_fused_with_pad = regular_op_with_empty_data(
            'conv',
            {
                'type': 'Convolution',
                'op': 'Convolution',
                # ones are taken from fused Pad
                'pad': np.array([[0, 0], [0, 0], [1, 1], [1, 1]]),
                'dilation': np.array([1, 1, 1, 1]),
                'stride': np.array([1, 1, 1, 1]),
                'group': 1,
                'kernel_spatial_idx': np.array([2, 3]),
                'output': 64,
                'spatial_dims': np.array([2, 3]),
                'channel_dims': np.array([1]),
                'batch_dims': np.array([0]),
                'input_feature_channel': 1,
                'output_feature_channel': 0,
                'infer': Convolution.infer
            })

        graph_ref = build_graph(nodes_attrs=nodes,
                                update_attributes=conv_fused_with_pad,
                                edges=[
                                    *connect('input', '0:conv'),
                                    *connect('weights', '1:conv'),
                                    *connect('conv', 'output'),
                                ],
                                nodes_with_edges_only=True)
        graph_ref.graph['layout'] = 'NCHW'
        graph_ref.stage = 'middle'

        (flag, resp) = compare_graphs(graph,
                                      graph_ref,
                                      'output',
                                      check_op_attrs=True)
        self.assertTrue(flag, resp)
Esempio n. 12
0
    def test_pad_fusing_shape_subgraph(self):
        nodes = {
            **shaped_parameter('input', shape_array([1, 3, 1020, 1020])),
            **regular_op_with_empty_data(
                'input_shape', {
                    'type': 'ShapeOf',
                    'op': 'ShapeOf',
                    'output_type': np.int64,
                    'infer': Shape.infer
                }),
            **regular_op_with_empty_data('gathered_shape', {
                'type': 'Gather',
                'batch_dims': 0,
                'infer': Gather.infer
            }),
            **valued_const_with_data('axis', np.array([0])),
            **valued_const_with_data('indices', np.array([2, 3])),
            **regular_op_with_empty_data(
                'div', {
                    'type': 'Div',
                    'infer': lambda node: eltwise_infer(
                        node, lambda a, b: a / b)
                }),
            **regular_op_with_empty_data(
                'sub_1', {
                    'type': 'Sub',
                    'infer': lambda node: eltwise_infer(
                        node, lambda a, b: a - b)
                }),
            **regular_op_with_empty_data(
                'sub_2', {
                    'type': 'Sub',
                    'infer': lambda node: eltwise_infer(
                        node, lambda a, b: a - b)
                }),
            **valued_const_with_data('div_const', shape_array([2])),
            **valued_const_with_data('sub_const', shape_array([512])),
            **regular_op_with_empty_data('pad', {
                'type': 'Pad',
                'op': 'Pad',
                'infer': Pad.infer,
                'mode': 'constant'
            }),
            **regular_op_with_empty_data('concat', {
                'type': 'Concat',
                'op': 'Concat',
                'axis': 0,
                'infer': concat_infer
            }),
            **valued_const_with_data('pad_end', shape_array([0, 0, 0, 0])),
            **valued_const_with_data('blank_zeros', shape_array([0, 0])),
            **regular_op_with_empty_data(
                'conv', {
                    'type': 'Convolution',
                    'op': 'Convolution',
                    'pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]]),
                    'dilation': np.array([1, 1, 1, 1]),
                    'stride': np.array([1, 1, 1, 1]),
                    'group': 1,
                    'kernel_spatial_idx': np.array([2, 3]),
                    'output': 64,
                    'spatial_dims': np.array([2, 3]),
                    'channel_dims': np.array([1]),
                    'batch_dims': np.array([0]),
                    'input_feature_channel': 1,
                    'output_feature_channel': 0,
                    'infer': Convolution.infer
                }),
            **valued_const_with_data('weights',
                                     shape_array(np.zeros([3, 16, 4, 4]))),
            **result(),
        }

        graph = build_graph(
            nodes_attrs=nodes,
            update_attributes={
                'gathered_shape_d': {
                    'kind': 'data',
                    'value': shape_array([256, 256]),
                    'shape': shape_array([2])
                }
            },
            edges=[
                *connect('input', 'input_shape', skip_data=True),
                *connect('input_shape', '0:gathered_shape'),
                *connect('indices', '1:gathered_shape'),
                *connect('axis', '2:gathered_shape'),
                *connect('gathered_shape', 'sub_1'),
                *connect('sub_const', 'sub_1'),
                *connect('sub_1', 'div'),
                *connect('div_const', 'div'),
                *connect('div', '0:sub_2'),
                *connect('sub_1', '1:sub_2'),
                *connect('input', '0:pad'),
                *connect('blank_zeros', '0:concat'),
                *connect('sub_2', '1:concat'),
                *connect('concat', '1:pad'),
                *connect('pad_end', '2:pad'),
                *connect('pad', '0:conv'),
                *connect('weights', '1:conv'),
                *connect('conv', 'output'),
            ],
            nodes_with_edges_only=True)

        graph.graph['layout'] = 'NCHW'
        graph.stage = 'middle'

        graph = partial_infer(graph)

        # graph must remain unchanged
        graph_ref = graph.copy()

        mark_shape_of_sugraph_as_unfusable(graph)
        for_graph_and_each_sub_graph_recursively(graph, fuse_pad)

        (flag, resp) = compare_graphs(graph,
                                      graph_ref,
                                      'output',
                                      check_op_attrs=True)
        self.assertTrue(flag, resp)