Example #1
0
def common_onnx_pool_extractor(node):
    kernel_shape = onnx_attr(node,
                             'kernel_shape',
                             'ints',
                             default=None,
                             dst_type=lambda x: int64_array(x))
    final_kernel_shape = int64_array([1, 1, *[x for x in kernel_shape]
                                      ]) if kernel_shape is not None else None

    pads = onnx_attr(node,
                     'pads',
                     'ints',
                     default=None,
                     dst_type=lambda x: int64_array(x))

    if kernel_shape is not None and pads is not None and kernel_shape.size * 2 != pads.size:
        log.warning(
            'Node {} has pad = {} which is ill-formed -- it should have even amount of elements.'
            .format(node.soft_get('name', node.id), pads))

        # Try to convert slightly incorrect models with insufficient pad parameters
        assert pads.size == kernel_shape.size
        pads = np.concatenate([pads, pads])
        log.warning('Extended pads to {}'.format(pads))

    final_pads = None
    if pads is not None:
        assert len(pads) % 2 == 0
        pads = pads.reshape([2, -1])
        pads = np.transpose(pads)
        final_pads = int64_array([[0, 0], [0, 0], *[p for p in pads]])

    # Extract strides attribute
    # In case if strides is not specified it will be set in default (1) in infer function
    strides = onnx_attr(node,
                        'strides',
                        'ints',
                        default=None,
                        dst_type=lambda x: int64_array(x))
    final_strides = int64_array([1, 1, *[x for x in strides]
                                 ]) if strides is not None else None

    dilation = onnx_attr(node,
                         'dilations',
                         'ints',
                         default=None,
                         dst_type=lambda x: int64_array(x))
    final_dilation = int64_array([1, 1, *[x for x in dilation]
                                  ]) if dilation is not None else None

    # exclude_pad = True only when count_include_pad == 0
    exclude_pad = onnx_attr(node, 'count_include_pad', 'i', default=0) == 0

    global_pooling = False
    if node.op in ['MaxPool', 'GlobalMaxPool']:
        method = 'max'
    elif node.op in ['AveragePool', 'GlobalAveragePool']:
        method = 'avg'
    else:
        raise Error('Unsupported pooling op {}', node.op)

    # TODO check if it is a correct choice for ONNX
    pooling_convention = 'valid'  # for Caffe rounding type should be ceil
    rt = 'floor' if onnx_attr(node, 'ceil_mode', 'i',
                              default=0) == 0 else 'ceil'

    auto_pad = onnx_attr(node,
                         'auto_pad',
                         's',
                         default=None,
                         dst_type=get_onnx_autopad)
    if auto_pad:
        rt = 'ceil'

    attrs = {
        'op': node.op,
        'auto_pad': auto_pad,
        'window': final_kernel_shape,
        'stride': final_strides,
        'pad': final_pads,
        'pad_spatial_shape': int64_array(pads) if pads is not None else None,
        'pool_method': method,
        'exclude_pad': True if exclude_pad else False,
        'global_pool': global_pooling,
        'output_spatial_shape': None,
        'rounding_type': rt,
        'dilation': final_dilation,
        'spatial_dims': None,
        'channel_dims': int64_array([1]),
        'batch_dims': int64_array([0]),
        'layout': 'NCHW',
        'pooling_convention': pooling_convention
    }
    return attrs
Example #2
0
from openvino.tools.mo.back.add_outputs_recursive import AddOutputRecursive
from openvino.tools.mo.ops.If import If
from openvino.tools.mo.ops.loop import Loop
from openvino.tools.mo.ops.tensor_iterator import TensorIterator
from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer
from openvino.tools.mo.front.common.partial_infer.utils import int64_array, dynamic_dimension_value, shape_array
from openvino.tools.mo.graph.graph import Node
from unit_tests.utils.graph import build_graph, regular_op_with_empty_data, result, connect, shaped_parameter, \
    valued_const_with_data, shaped_const_with_data, regular_op_with_shaped_data

# test for Loop
main_graph_nodes = {
    **shaped_parameter("IN_1", [1, 4, 64, 54]),
    **shaped_parameter("IN_2", [1, 4, 64, 54]),
    **valued_const_with_data("M", int64_array([5])),
    **valued_const_with_data("cond", int64_array([1])),
    **regular_op_with_empty_data(
        "Loop", {
            'op':
            "Loop",
            'type':
            'Loop',
            'sub_graphs': ['body'],
            "body":
            None,
            'input_port_map': [{
                'external_port_id': 1,
                'internal_layer_id': 2,
                'axis': None
            }, {
Example #3
0
    def replace_sub_graph(self, graph: Graph, match: dict):
        node = match['op']
        name = node.soft_get('name', node.id)
        axis = node.axis
        input_shape_node = Shape(graph, {
            'name': name + '/ShapeOf'
        }).create_node()
        range_node = create_op_with_const_inputs(graph, Range, {
            0: mo_array(node.start),
            2: mo_array(node.step)
        }, {'name': name + '/Range'})
        node.in_port(0).get_connection().set_destination(
            input_shape_node.in_port(0))

        if axis is not None:
            '''
            Replace arange_like op to subgraph:
            Shape - Gather - Range
            '''
            gather_node = create_op_with_const_inputs(graph, Gather, {
                1: int64_array([axis]),
                2: int64_array(0)
            }, {'name': name + '/Gather'})
            input_shape_node.out_port(0).connect(gather_node.in_port(0))
            gather_node.out_port(0).connect(range_node.in_port(1))
            node.out_port(0).get_connection().set_source(
                range_node.out_port(0))
            rename_nodes([(node, name + '/ShouldBeDeleted'),
                          (range_node, name)])
        else:
            r'''
            Replace arange_like op to subgraph:
                    |
                 ShapeOf ----------- | 
                    |                |
                 ReduceProd          |
                    |                |
                  Range              |
                    |                |
                 Reshape ----------- | 
                    |
            '''

            flattened_shape_node = create_op_with_const_inputs(
                graph, ReduceProd, {1: int64_array([0])}, {
                    'name': input_shape_node.name + '/ReduceProd',
                    'keep_dims': True
                })
            reshape_backward_node = Reshape(graph, {
                'name': name + '/Reshape_backward'
            }).create_node()

            input_shape_node.out_port(0).connect(
                flattened_shape_node.in_port(0))
            flattened_shape_node.out_port(0).connect(range_node.in_port(1))
            range_node.out_port(0).connect(reshape_backward_node.in_port(0))
            input_shape_node.out_port(0).connect(
                reshape_backward_node.in_port(1))
            node.out_port(0).get_connection().set_source(
                reshape_backward_node.out_port(0))
            rename_nodes([(node, name + '/ShouldBeDeleted'),
                          (reshape_backward_node, name)])

        if node.repeat != 1:
            r"""
            First, we generate the correct stop value for Range like new_stop_value = stop_value // repeat + 1.
            Then repeats each value of the interval using Tile. After that we can get a longer interval
            so we reduce it with Slice.
            
            Sub-graph after Range node will be look like
            
            Range - Reshape([-1, 1]) - Tile([1, repeat]) - Reshape(-1) - Slice
            
            """

            if node.repeat < 1:
                raise Error(
                    "Unexpected value {} of the attribute 'repeat' for the node {}"
                    .format(node.repeat, name))

            div_node = create_op_with_const_inputs(
                graph, Div, {1: int64_array([node.repeat])},
                {'name': name + '/Divide'})
            add_node = create_op_with_const_inputs(
                graph, Add, {1: int64_array([1])},
                {'name': div_node.name + '/Add'})
            cast_node = Cast(graph, {
                'name': name + '/ConvertToI64',
                'dst_type': np.int64
            }).create_node()

            cast_node.out_port(0).connect(div_node.in_port(0))
            div_node.out_port(0).connect(add_node.in_port(0))
            range_node.in_port(1).get_connection().set_destination(
                cast_node.in_port(0))
            add_node.out_port(0).connect(range_node.in_port(1))

            tile_forward_reshape = create_op_with_const_inputs(
                graph, Reshape, {1: int64_array([-1, 1])},
                {'name': range_node.name + '/ForwardReshape'})
            tile = create_op_with_const_inputs(
                graph, Tile, {1: int64_array([1, node.repeat])},
                {'name': tile_forward_reshape.name + '/Tile'})
            tile_backward_reshape = create_op_with_const_inputs(
                graph, Reshape, {1: int64_array([-1])},
                {'name': tile.name + '/BackwardReshape'})
            slice_node = create_op_with_const_inputs(
                graph, Slice, {
                    1: int64_array([0]),
                    3: int64_array([0]),
                    4: int64_array([1])
                }, {'name': tile_backward_reshape.name + '/Slice'})

            tile_forward_reshape.out_port(0).connect(tile.in_port(0))
            tile.out_port(0).connect(tile_backward_reshape.in_port(0))
            tile_backward_reshape.out_port(0).connect(slice_node.in_port(0))
            slice_node.in_port(2).connect(div_node.in_port(0).get_source())

            range_node.out_port(0).get_connection().set_source(
                slice_node.out_port(0))
            range_node.out_port(0).connect(tile_forward_reshape.in_port(0))

            if axis is not None:
                rename_nodes([(range_node, name + '/Range'),
                              (slice_node, name)])

        # MXNet arange_like op has no stop attribute and the result tensor always matches the input shape, so
        # we have to correct the stop value for the Range node if step != 1 or start != 0
        if node.step != 1:
            # If step attribute is not integer, we will generate an interval with a larger size and then reduce it
            # using Slice
            true_elements_count_port = range_node.in_port(1).get_source()
            mul_value = np.ceil(node.step) if node.step > 0 else np.floor(
                node.step)
            stop_value = create_op_with_const_inputs(
                graph,
                Mul,
                port_value_dict={1: mo_array(np.ceil(mul_value))},
                op_attrs={'name': range_node.name + '/Stop'})
            range_node.in_port(1).get_connection().insert_node(stop_value)

            slice_range_values = create_op_with_const_inputs(
                graph, Slice, {
                    1: int64_array([0]),
                    3: int64_array([0]),
                    4: int64_array([1])
                }, {'name': range_node.name + '/Slice'})
            slice_range_values.in_port(2).connect(true_elements_count_port)
            range_node.out_port(0).get_connection().insert_node(
                slice_range_values)

            if axis is not None and node.repeat == 1:
                rename_nodes([(range_node, name + '/Range'),
                              (slice_range_values, name)])

        if node.start != 0:
            correct_stop_value = create_op_with_const_inputs(
                graph,
                Add,
                port_value_dict={1: mo_array(node.start)},
                op_attrs={'name': range_node.name + '/Correct_Stop'})
            range_node.in_port(1).get_connection().insert_node(
                correct_stop_value)

        # Range node supports only scalar inputs
        squeeze_node = create_op_with_const_inputs(
            graph,
            Squeeze,
            port_value_dict={1: int64_array(0)},
            op_attrs={"name": range_node.name + '/Stop/Squeeze'})
        range_node.in_port(1).get_connection().insert_node(squeeze_node)
Example #4
0
         'top_k': 1,
         'axis': 0,
         'output_type': np.int32,
         'remove_values_output': True
     }),
 **regular_op_with_empty_data(
     'argmin', {
         'op': 'ArgMin',
         'type': None,
         'top_k': 1,
         'axis': 0,
         'output_type': np.int32,
         'remove_values_output': True
     }),
 **result('result'),
 **valued_const_with_data('axis_const', int64_array([1])),
 **regular_op(
     'topk', {
         'op': 'TopK',
         'type': 'TopK',
         'sort': 'index',
         'index_element_type': np.int32
     }),
 **empty_data('topk_out_0_data'),
 **empty_data('topk_out_1_data'),
 **regular_op_with_empty_data('topk_scalar', {
     'op': 'Const',
     'type': 'Const',
     'value': int64_array([1]),
     'shape': []
 }),
    def replace_sub_graph(self, graph: Graph, match: dict):
        identity_spw = match['identity_spw']
        gather0_1 = match['gather0_1']
        gather0_2 = match['gather0_2']
        greaterequal0 = match['greaterequal0']
        sparse_fill_empty_rows = match['sparse_fill_empty_rows']
        gather = match['gather']
        select = match['select']
        where0 = match['where0']
        sparse_segment_op = match['sparse_segment_op']
        output_node_name = select.soft_get('name', select.id)

        log.debug('Found EmbeddingSparseSegmentsSingleFeature pattern after {} with name {}'.format(
            sparse_fill_empty_rows.op,
            sparse_fill_empty_rows.name))

        split_for_indices = create_op_with_const_inputs(graph, Split, {1: int64_array(1)}, {'num_splits': 2})
        squeeze_for_indices = create_op_with_const_inputs(graph, Squeeze, {1: int64_array([1])})
        split_for_dense_shape = create_op_with_const_inputs(graph, Split, {1: int64_array(0)}, {'num_splits': 2})
        squeeze_to_scalar = create_op_with_const_inputs(graph, Squeeze, {1: int64_array([0])})

        # TODO: remove Cast nodes once we start to support EmbeddingSegmentSum (new version) with segment_ids,
        #  indices, and num_segments of different integer type.
        #  Because the real cases show that it is possible to have it in TensorFlow
        cast_indices = Cast(graph, {'name': output_node_name + '/CastIndices', 'dst_type': np.int32}).create_node()
        cast_segment_ids = Cast(graph, {'name': output_node_name + '/CastSegmentIds',
                                        'dst_type': np.int32}).create_node()
        cast_default_value = Cast(graph, {'name': output_node_name + '/CastDefaultValue',
                                          'dst_type': np.int32}).create_node()
        cast_num_segments = Cast(graph, {'name': output_node_name + '/CastSegmentsNumber',
                                         'dst_type': np.int32}).create_node()
        if sparse_segment_op.op == 'SparseSegmentSum':
            embedding_segments_op = EmbeddingSegmentsSum(graph, {'name': output_node_name}).create_node()
        else:
            embedding_segments_op = EmbeddingSegmentsMean(graph, {'name': output_node_name}).create_node()
        rename_nodes([(select, output_node_name + '/AbandonedName'), (embedding_segments_op, output_node_name)])

        # connect parameters table
        gather.in_port(0).get_connection().set_destination(embedding_segments_op.in_port(0))
        # connect indices values
        greaterequal0.in_port(0).get_connection().set_destination(cast_indices.in_port(0))
        embedding_segments_op.in_port(1).connect(cast_indices.out_port(0))
        # split and connect segment ids
        gather0_1.in_port(0).get_connection().set_destination(split_for_indices.in_port(0))
        squeeze_for_indices.in_port(0).connect(split_for_indices.out_port(0))
        cast_segment_ids.in_port(0).connect(squeeze_for_indices.out_port(0))
        embedding_segments_op.in_port(2).connect(cast_segment_ids.out_port(0))
        # split and connect number of segments
        identity_spw.in_port(0).get_connection().set_destination(split_for_dense_shape.in_port(0))
        squeeze_to_scalar.in_port(0).connect(split_for_dense_shape.out_port(0))
        cast_num_segments.in_port(0).connect(squeeze_to_scalar.out_port(0))
        embedding_segments_op.in_port(3).connect(cast_num_segments.out_port(0))
        # connect default value
        sparse_fill_empty_rows.in_port(3).get_connection().set_destination(cast_default_value.in_port(0))
        embedding_segments_op.in_port(4).connect(cast_default_value.out_port(0))
        # no input port for per_sample_weight

        identity_spw.in_port(0).disconnect()
        gather0_1.in_port(0).disconnect()
        gather0_2.in_port(0).disconnect()
        greaterequal0.in_port(0).disconnect()
        sparse_fill_empty_rows.in_port(2).disconnect()
        gather.in_port(0).disconnect()

        select.out_port(0).get_connection().set_source(embedding_segments_op.out_port(0))
        graph.remove_nodes_from(
            [gather0_1.id, gather0_2.id, greaterequal0.id, sparse_fill_empty_rows.id, select.id, where0.id])
Example #6
0
    def find_and_replace_pattern(self, graph: Graph):
        for node in graph.get_op_nodes(op='Slice'):
            node_name = node.soft_get('name', node.id)

            input_shape = node.in_port(0).data.get_shape()
            if node.is_in_port_connected(3):
                axes = node.in_port(3).data.get_value().copy()
                assert axes is not None, 'The input with axes is not constant for node {}'.format(
                    node_name)
                for i, val in enumerate(axes):
                    axes[i] = get_canonical_axis_index(input_shape, val)
            else:
                axes = int64_array(range(len(input_shape)))

            ss_begin = create_ss_interval_border(graph,
                                                 node.in_port(1).get_source(),
                                                 input_shape, axes, node_name)
            ss_end = create_ss_interval_border(graph,
                                               node.in_port(2).get_source(),
                                               input_shape, axes, node_name)
            node.in_port(1).disconnect()
            node.in_port(2).disconnect()
            rename_nodes([(ss_begin, node_name + '/Begin'),
                          (ss_end, node_name + '/End')])

            if node.is_in_port_connected(4):
                steps = node.in_port(4).data.get_value()
                assert steps is not None, 'The input with steps is not constant for node {}'.format(
                    node_name)
            else:
                steps = np.ones([axes.size], dtype=np.int64)

            ss_begin_mask = np.zeros(len(input_shape), dtype=np.int64)
            ss_end_mask = np.zeros(len(input_shape), dtype=np.int64)
            ss_step = np.ones(len(input_shape), dtype=np.int64)

            for i, axis in enumerate(axes):
                ss_begin_mask[axis] = 1
                ss_end_mask[axis] = 1
                ss_step[axis] = steps[i]

            ss_strides = Const(
                graph, dict(name=node_name + '/Strides',
                            value=ss_step)).create_node()

            ss = StridedSlice(
                graph,
                dict(name='ss',
                     new_axis_mask=np.zeros(len(input_shape), dtype=np.int64),
                     shrink_axis_mask=np.zeros(len(input_shape),
                                               dtype=np.int64),
                     ellipsis_mask=np.zeros(len(input_shape), dtype=np.int64),
                     begin_mask=ss_begin_mask,
                     end_mask=ss_end_mask)).create_node()

            node.in_port(0).get_connection().set_destination(ss.in_port(0))
            ss.in_port(1).connect(ss_begin.out_port(0))
            ss.in_port(2).connect(ss_end.out_port(0))
            ss.in_port(3).connect(ss_strides.out_port(0))
            node.out_port(0).get_connection().set_source(ss.out_port(0))

            rename_nodes([(node, node_name + '/ShouldBeDeleted'),
                          (ss, node_name)])
Example #7
0
    def replace_pattern(self, graph: Graph, match: dict):
        y = match['maximum'].in_port(0).data.get_value()
        if y is None:
            y = match['maximum'].in_port(1).data.get_value()

        if y is None or y.shape != ():
            log.debug(
                'The value of the "maximum_y_data" is not defined or is not constant'
            )
            return

        # We need to check axes which performed reduction because IE supports only 2D, 3D, 4D inputs and
        # reduction only along spatial and channel dimensions.
        input_rank = len(match['sum'].in_port(0).data.get_shape())
        if input_rank not in [2, 3, 4]:
            log.debug(
                'IE supports L2 normalization only for 2D, 3D and 4D tensors.')
            return

        axes = match['sum'].in_port(1).data.get_value()
        axes = int64_array(axes)
        if axes.shape == ():
            axes = int64_array([axes])
        axes = int64_array(
            [axis if axis >= 0 else axis + input_rank for axis in axes])
        axes.sort()

        transformation_applicable = False
        # check for case C + all spatial dims. Works for 2D (NC), 3D (NCH) and 4D (NCHW and NHWC)
        if len(axes) + 1 == input_rank and np.array_equal(
                axes, int64_array(np.arange(start=1, stop=input_rank))):
            transformation_applicable = True

        # check for pure C channel normalization
        if len(axes) == 1 and ((input_rank == 4 and get_features_dim(
                graph.graph['layout'], input_rank) == axes[0]) or
                               (input_rank != 4 and axes[0] == 1)):
            transformation_applicable = True

        if not transformation_applicable:
            log.debug(
                'IE doesn\'t support l2 normalization with reduction along axes {}.'
                .format(axes))
            return

        output_name = match['l2_normalize'].soft_get('name',
                                                     match['l2_normalize'].id)
        normalize_node = create_op_node_with_second_input(
            graph, NormalizeL2Op, axes, {
                'name': output_name,
                'eps_mode': 'max',
                'eps': y
            })
        match['square'].in_port(0).get_source().connect(
            normalize_node.in_port(0))

        match['square'].in_port(0).disconnect()
        if match['l2_normalize'].in_port(
                0).get_source().node.id == match['rsqrt'].id:
            match['l2_normalize'].in_port(1).disconnect()
        else:
            match['l2_normalize'].in_port(0).disconnect()

        match['l2_normalize'].out_port(0).get_connection().set_source(
            normalize_node.out_port(0))
        rename_nodes([(match['l2_normalize'], output_name + "/TBR"),
                      (normalize_node, output_name)])
Example #8
0
nodes_attributes = {
    'placeholder': {
        'kind': 'op',
        'op': 'Parameter'
    },
    'placeholder_data': {
        'kind': 'data'
    },
    'tile': {
        'kind': 'op',
        'op': 'Tile'
    },
    'tile_data': {
        'kind': 'data',
        'shape': int64_array([1, 1, 1, 1])
    },
    'result': {
        'kind': 'op',
        'op': 'Result'
    },
    'unsqueeze_1': {
        'kind': 'op',
        'op': 'Unsqueeze'
    },
    'unsqueeze_1_data': {
        'kind': 'data'
    },
    'unsqueeze_1_const': {
        'kind': 'op',
        'op': 'Const'
Example #9
0
    def test_fake_results(self):
        then_graph_nodes = {
            **valued_const_with_data('fake_const', int64_array(0)),
            **regular_op_with_empty_data(
                'shapeof', {
                    'kind': 'op',
                    'type': 'ShapeOf',
                    'op': 'ShapeOf',
                    'infer': Shape.infer,
                    'output_type': np.int64
                }),
            **regular_op_with_empty_data(
                'res_1', {
                    'kind': 'op',
                    'type': 'Result',
                    'op': 'Result',
                    'infer': lambda x: 0,
                    'output_id': 0
                })
        }
        then_graph_edges = [
            *connect('fake_const', 'shapeof'),
            *connect('shapeof', 'res_1'),
        ]

        else_graph_nodes = {
            **regular_op_with_empty_data(
                'param_1', {
                    'type': 'Parameter',
                    'kind': 'op',
                    'input_id': 1,
                    'shape': None,
                    'infer': Parameter.infer
                }),
            **regular_op_with_empty_data(
                'res_1', {
                    'kind': 'op',
                    'type': 'Result',
                    'op': 'Result',
                    'infer': lambda x: 0,
                    'output_id': 0
                })
        }
        else_graph_edges = [*connect('param_1', 'res_1')]
        then_graph = build_graph_with_edge_attrs(then_graph_nodes,
                                                 then_graph_edges)
        else_graph = build_graph_with_edge_attrs(else_graph_nodes,
                                                 else_graph_edges)
        external_graph_nodes = {
            **valued_const_with_data('cond',
                                     shape_array([dynamic_dimension_value])),
            **valued_const_with_data(
                'input_1',
                int64_array([1, 2, 3, 3, 2, 3]).reshape((2, 3))),
            **regular_op_with_empty_data(
                'if', {
                    'kind': 'op',
                    'op': 'If',
                    'then_graph': then_graph,
                    'else_graph': else_graph,
                    'infer': If.infer
                }),
            **result('res_1')
        }
        external_graph_edges = [
            *connect('cond', '0:if'), *connect('input_1', '1:if'),
            *connect('if', 'res_1')
        ]

        graph = build_graph(external_graph_nodes, external_graph_edges)
        graph.stage = 'middle'
        partial_infer(graph)
        npt.assert_array_equal(
            Node(graph, 'if').out_port(0).data.get_shape(), int64_array([2,
                                                                         3]))
Example #10
0
    def replace_pattern(self, graph: Graph, match: Dict[str, Node]):
        log.debug('UpsampleToResample is triggered')
        upsample = match['upsample']
        upsample_name = upsample.soft_get('name', upsample.id)
        input_shape = upsample.in_port(0).data.get_shape()
        input_shape_rank = len(input_shape)
        if input_shape_rank not in [4, 5]:
            log.warning('The input shape is not 4D or 5D for op {}'.format(
                upsample.soft_get('name')))
            return

        depth_scale = None
        layout = graph.graph['layout']

        if len(upsample.in_nodes()) == 2:
            if upsample.in_node(1).value is None:
                return
            scales = upsample.in_node(1).value
            assert len(scales) in (
                4, 5
            ), 'Supported scales rank is 4 or 5, but it is {} for node {}'.format(
                len(scales), upsample_name)
            if not (math.isclose(scales[0], 1, rel_tol=1e-5)
                    and math.isclose(scales[1], 1, rel_tol=1e-5)):
                return
            height_scale = scales[get_height_dim(layout, input_shape_rank)]
            width_scale = scales[get_width_dim(layout, input_shape_rank)]
            if len(scales) == 5:
                depth_scale = scales[get_depth_dim(layout, input_shape_rank)]
        else:
            height_scale = upsample['height_scale']
            width_scale = upsample['width_scale']

        if 1 in upsample.in_ports() and not upsample.in_port(1).disconnected():
            upsample.in_port(1).disconnect()

        upsample_name = upsample.soft_get('name', upsample.id)
        shape = Shape(graph, {'name': upsample_name + '/0_port'}).create_node()

        layout = graph.graph['layout']

        if input_shape_rank == 4:
            begin_value = int64_array(
                [get_height_dim(layout, input_shape_rank)])
            factor_value = float32_array([height_scale, width_scale])
        else:
            begin_value = int64_array(
                [get_depth_dim(layout, input_shape_rank)])
            factor_value = float32_array(
                [depth_scale, height_scale, width_scale])

        ss = create_op_with_const_inputs(
            graph, StridedSlice, {
                1: begin_value,
                2: int64_array([get_width_dim(layout, input_shape_rank) + 1]),
                3: int64_array([1])
            }, {
                'name': upsample_name + '/ss_0_port',
                'begin_mask': int64_array([1]),
                'end_mask': int64_array([1]),
                'new_axis_mask': int64_array([0]),
                'shrink_axis_mask': int64_array([0]),
                'ellipsis_mask': int64_array([0])
            })

        mul = create_op_node_with_second_input(
            graph, Mul, factor_value, {'name': upsample_name + '/factor_mul'})

        source = upsample.in_port(0).get_connection().get_source()
        source.connect(shape.in_port(0))
        shape.out_port(0).connect(ss.in_port(0))

        ss.out_port(0).connect(mul.in_port(0))

        # Create Interpolate operation
        if input_shape_rank == 4:
            axes = int64_array([
                get_height_dim(layout, input_shape_rank),
                get_width_dim(layout, input_shape_rank)
            ])
        else:
            axes = int64_array([
                get_depth_dim(layout, input_shape_rank),
                get_height_dim(layout, input_shape_rank),
                get_width_dim(layout, input_shape_rank)
            ])

        axes_node = Const(graph, {
            'name': upsample_name + '/axis',
            'value': axes
        }).create_node()

        interpolate = Interpolate(
            graph, {
                'mode': upsample.attrs()['mode'],
                'antialias': 0,
                'pads_begin': int64_array([0]),
                'pads_end': int64_array([0]),
                'coordinate_transformation_mode': 'half_pixel',
                'nearest_mode': 'round_prefer_floor',
                'cube_coeff': -0.75,
                'shape_calculation_mode': 'scales',
                'version': 'opset4',
                'in_ports_count': 4
            }).create_node()

        upsample.add_input_port(1, skip_if_exist=True)
        assert upsample.in_port(1).disconnected()
        mul.out_port(0).connect(interpolate.in_port(1))
        axes_node.out_port(0).connect(interpolate.in_port(3))

        scales_node = Const(graph, {
            'name': upsample_name + '/scales',
            'value': factor_value
        }).create_node()
        scales_node.out_port(0).connect(interpolate.in_port(2))

        upsample.in_port(0).get_connection().set_destination(
            interpolate.in_port(0))
        upsample.out_port(0).get_connection().set_source(
            interpolate.out_port(0))

        rename_nodes([(upsample, upsample_name + '/delete'),
                      (interpolate, upsample_name)])

        convert_to_float = Cast(graph, dict(dst_type=np.float32)).create_node()
        convert_to_int = Cast(graph, dict(dst_type=np.int64)).create_node()

        mul.in_port(0).get_connection().insert_node(convert_to_float)
        mul.out_port(0).get_connection().insert_node(convert_to_int)
import unittest

from openvino.tools.mo.front.AttributedRollToRoll import AttributedRollToRoll
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.graph.graph import Node
from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs
from unit_tests.utils.graph import build_graph, const, result, regular_op

nodes_attributes = {
    **regular_op('placeholder', {'type': 'Parameter'}),
    **regular_op(
        'attr_roll', {
            'type': 'AttributedRoll',
            'op': 'AttributedRoll',
            'axes': int64_array([-1, 2, 3]),
            'shift': int64_array([5, -2, 3])
        }),
    **result('result'),

    # new Roll node and inputs
    **regular_op('roll', {'type': 'Roll'}),
    **const('roll_axes', int64_array([-1, 2, 3])),
    **const('roll_shift', int64_array([5, -2, 3]))
}


class AttributedRollToRollTest(unittest.TestCase):
    def test_axes_shift(self):
        graph = build_graph(nodes_attributes, [('placeholder', 'attr_roll', {
            'in': 0,
Example #12
0
 def test_2d(self):
     graph = build_graph(
         nodes_attrs=graph_node_attrs_when_transformation_is_not_applicable,
         edges=graph_edges_when_transformation_is_not_applicable,
         update_attributes={
             'placeholder_data': {
                 'shape': int64_array([5, 8])
             },
             'dim': {
                 'value': int64_array([1])
             },
             'dim_data': {
                 'value': int64_array([1])
             },
             'unsqueeze_data': {
                 'shape': int64_array([5, 1, 8])
             },
             'multipliers': {
                 'value': int64_array([1, 10, 1])
             },
             'multipliers_data': {
                 'value': int64_array([1, 10, 1]),
                 'shape': int64_array([3])
             },
             'tile_data': {
                 'shape': int64_array([5, 10, 8])
             },
             'reshape_data': {
                 'shape': int64_array([50, 8])
             },
             'shape': {
                 'value': int64_array([50, 8]),
                 'shape': int64_array([2])
             },
             'shape_data': {
                 'value': int64_array([50, 8]),
                 'shape': int64_array([2])
             },
             'abs_data': {
                 'shape': int64_array([50, 8])
             },
         })
     ref_graph = build_graph(
         nodes_attrs=graph_node_attrs_when_transformation_is_not_applicable,
         edges=graph_edges_when_transformation_is_not_applicable,
         update_attributes={
             'placeholder_data': {
                 'shape': int64_array([5, 8])
             },
             'dim': {
                 'value': int64_array([1])
             },
             'dim_data': {
                 'value': int64_array([1])
             },
             'unsqueeze_data': {
                 'shape': int64_array([5, 1, 8])
             },
             'multipliers': {
                 'value': int64_array([1, 10, 1])
             },
             'multipliers_data': {
                 'value': int64_array([1, 10, 1]),
                 'shape': int64_array([3])
             },
             'tile_data': {
                 'shape': int64_array([5, 10, 8])
             },
             'reshape_data': {
                 'shape': int64_array([50, 8])
             },
             'shape': {
                 'value': int64_array([50, 8]),
                 'shape': int64_array([2])
             },
             'shape_data': {
                 'value': int64_array([50, 8]),
                 'shape': int64_array([2])
             },
             'abs_data': {
                 'shape': int64_array([50, 8])
             },
         })
     UnsqueezeTileReshapeBlockToInterpolate().find_and_replace_pattern(
         graph)
     (flag, resp) = compare_graphs(graph, ref_graph, 'output')
     self.assertTrue(flag, resp)
Example #13
0
 def test_4d(self):
     graph = build_graph(nodes_attrs=graph_node_attrs,
                         edges=graph_edges,
                         update_attributes={
                             'placeholder_data': {
                                 'shape': int64_array([1, 8, 32, 32])
                             },
                             'unsqueeze_data': {
                                 'shape': int64_array([1, 8, 1, 32, 32])
                             },
                             'multipliers': {
                                 'value': int64_array([1, 1, 2, 1, 1]),
                                 'shape': int64_array([5])
                             },
                             'multipliers_data': {
                                 'value': int64_array([1, 1, 2, 1, 1]),
                                 'shape': int64_array([5])
                             },
                             'tile_data': {
                                 'shape': int64_array([1, 8, 2, 32, 32])
                             },
                             'reshape_data': {
                                 'shape': int64_array([1, 16, 32, 32]),
                                 'value': None
                             },
                             'shape': {
                                 'value': int64_array([1, 16, 32, 32]),
                                 'shape': int64_array([4])
                             },
                             'shape_data': {
                                 'value': int64_array([1, 16, 32, 32]),
                                 'shape': int64_array([4])
                             },
                             'abs_data': {
                                 'shape': int64_array([1, 16, 32, 32])
                             },
                         })
     ref_graph = build_graph(
         nodes_attrs=ref_graph_node_attrs_with_4_inputs_interpolate,
         edges=ref_graph_edges_attrs_with_4_inputs_interpolate,
         update_attributes={
             'placeholder_data': {
                 'shape': int64_array([1, 8, 32, 32])
             },
             'interpolate_data': {
                 'shape': int64_array([1, 16, 32, 32])
             },
             'abs_data': {
                 'shape': int64_array([1, 16, 32, 32])
             },
             'axes': {
                 'shape': int64_array([1]),
                 'value': int64_array([1])
             },
         })
     UnsqueezeTileReshapeBlockToInterpolate().find_and_replace_pattern(
         graph)
     (flag, resp) = compare_graphs(graph, ref_graph, 'output')
     self.assertTrue(flag, resp)
Example #14
0
import numpy as np

from openvino.tools.mo.middle.UnsqueezeTileReshapeBlockToInterpolate import UnsqueezeTileReshapeBlockToInterpolate
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs
from unit_tests.utils.graph import build_graph

graph_node_attrs = {
    'placeholder': {
        'type': 'Parameter',
        'kind': 'op',
        'op': 'Parameter'
    },
    'placeholder_data': {
        'value': None,
        'shape': int64_array([1, 8, 32, 32, 64]),
        'kind': 'data',
        'data_type': None
    },
    'unsqueeze': {
        'type': 'Unsqueeze',
        'kind': 'op',
        'op': 'Unsqueeze'
    },
    'dim': {
        'kind': 'op',
        'op': 'Const',
        'type': 'Const',
        'value': int64_array([2]),
        'shape': int64_array([1]),
    },
    def test_pattern_does_not_satisfy(self, input_shape, scales):
        graph = build_graph(
            graph_node_attrs, graph_edges, {
                'placeholder_data': {
                    'shape': int64_array(input_shape)
                },
                'scales': {
                    'value': int64_array(scales),
                    'shape': int64_array(scales).shape
                },
                'scales_data': {
                    'value': int64_array(scales),
                    'shape': int64_array(scales).shape
                },
                'upsample_data': {
                    'shape': int64_array(input_shape) * int64_array(scales)
                }
            })
        graph.graph['layout'] = 'NCHW'

        ref_graph = build_graph(
            graph_node_attrs, graph_edges, {
                'placeholder_data': {
                    'shape': int64_array(input_shape)
                },
                'scales': {
                    'value': int64_array(scales),
                    'shape': int64_array(scales).shape
                },
                'scales_data': {
                    'value': int64_array(scales),
                    'shape': int64_array(scales).shape
                },
                'upsample_data': {
                    'shape': int64_array(input_shape) * int64_array(scales)
                }
            })

        UpsampleToResample().find_and_replace_pattern(graph)
        (flag, resp) = compare_graphs(graph, ref_graph, 'output')
        self.assertTrue(flag, resp)
Example #16
0
    def test_simple_shape_inf(self, cond, output_port_0_shape,
                              output_port_1_shape):
        then_graph_nodes = {
            **regular_op_with_empty_data(
                'param_1', {
                    'type': 'Parameter',
                    'kind': 'op',
                    'input_id': 1,
                    'shape': None,
                    'infer': Parameter.infer
                }),
            **regular_op_with_empty_data(
                'param_2', {
                    'type': 'Parameter',
                    'kind': 'op',
                    'input_id': 2,
                    'shape': None,
                    'infer': Parameter.infer
                }),
            **regular_op_with_empty_data(
                'add', {
                    'type': 'Add',
                    'kind': 'op',
                    'op': 'Add',
                    'infer': lambda node: eltwise_infer(node, Add.operation)
                }),
            **regular_op_with_empty_data(
                'mul', {
                    'type': 'Mul',
                    'kind': 'op',
                    'op': 'Mul',
                    'infer': lambda node: eltwise_infer(node, Mul.operation)
                }),
            **regular_op_with_empty_data(
                'res1', {
                    'kind': 'op',
                    'type': 'Result',
                    'op': 'Result',
                    'infer': lambda x: 0,
                    'output_id': 0
                }),
            **regular_op_with_empty_data(
                'res2', {
                    'kind': 'op',
                    'type': 'Result',
                    'op': 'Result',
                    'infer': lambda x: 0,
                    'output_id': 1
                })
        }
        then_graph_edges = [
            *connect('param_1', '0:add'),
            *connect('param_2', '1:add'),
            *connect('param_1', '1:mul'),
            *connect('param_2', '0:mul'),
            *connect('add', 'res1'),
            *connect('mul', 'res2'),
        ]

        else_graph_nodes = {
            **regular_op_with_empty_data(
                'param_1', {
                    'type': 'Parameter',
                    'kind': 'op',
                    'input_id': 1,
                    'shape': None,
                    'infer': Parameter.infer
                }),
            **regular_op_with_empty_data(
                'param_2', {
                    'type': 'Parameter',
                    'kind': 'op',
                    'input_id': 3,
                    'shape': None,
                    'infer': Parameter.infer
                }),
            **regular_op_with_empty_data('identity', {
                'kind': 'op',
                'op': 'Identity',
                'infer': Identity.infer
            }),
            **regular_op_with_empty_data('identity_1', {
                'kind': 'op',
                'op': 'Identity',
                'infer': Identity.infer
            }),
            **regular_op_with_empty_data(
                'res1', {
                    'kind': 'op',
                    'type': 'Result',
                    'op': 'Result',
                    'infer': lambda x: 0,
                    'output_id': 0
                }),
            **regular_op_with_empty_data(
                'res2', {
                    'kind': 'op',
                    'type': 'Result',
                    'op': 'Result',
                    'infer': lambda x: 0,
                    'output_id': 1
                })
        }
        else_graph_edges = [
            *connect('param_1', 'identity'),
            *connect('param_2', 'identity_1'),
            *connect('identity_1', 'res2'),
            *connect('identity', 'res1'),
        ]
        then_graph = build_graph_with_edge_attrs(then_graph_nodes,
                                                 then_graph_edges)
        else_graph = build_graph_with_edge_attrs(else_graph_nodes,
                                                 else_graph_edges)
        external_graph_nodes = {
            **valued_const_with_data('cond', cond),
            **valued_const_with_data('input_2', int64_array([3, 2, 1])),
            **valued_const_with_data('input_1', int64_array([1, 2, 3])),
            **valued_const_with_data('input_3', int64_array([8, 4])),
            **regular_op(
                'if', {
                    'kind': 'op',
                    'op': 'If',
                    'then_graph': then_graph,
                    'else_graph': else_graph,
                    'infer': If.infer
                }),
            **empty_data('if_d_1'),
            **empty_data('if_d_2'),
            **result('res_1'),
            **result('res_2')
        }
        external_graph_edges = [
            *connect('cond', '0:if'), *connect('input_1', '1:if'),
            *connect('input_2', '2:if'), *connect('input_3', '3:if'),
            ('if', 'if_d_1', {
                'out': 0
            }), ('if', 'if_d_2', {
                'out': 1
            }), ('if_d_1', 'res_1'), ('if_d_2', 'res_2')
        ]

        graph = build_graph(external_graph_nodes, external_graph_edges)
        graph.stage = 'middle'
        partial_infer(graph)
        if_node = Node(graph, 'if')
        self.assertTrue(
            strict_compare_tensors(
                if_node.out_port(0).data.get_shape(), output_port_0_shape))
        # shape of the "then" branch is [3] and shape of the "else" branch is [2], so the output shape is "[dynamic]"
        self.assertTrue(
            strict_compare_tensors(
                if_node.out_port(1).data.get_shape(), output_port_1_shape))
Example #17
0
def replace_sequence(seq: List[Node], graph: Graph):
    """
    This function replaces a sequence of consecutive Interpolate layers with one Interpolate layer,
    if modes of all nodes of a sequence are the same.
    :param seq: sequence of Interpolate layers
    :param graph: graph to which nodes of seq belong
    :return: Nothing
    """
    if not seq:
        return
    if len(seq) == 1:
        return

    modes = set([n.mode for n in seq])
    if len(modes) != 1:
        return

    dims_and_scales_ = []
    # Each element of the list dims_and_scales_ is a pair
    #      (axis, output size for this axis) (opset1)
    # or
    #      (axis, output size for this axis, output scales for this axis) (opset4)
    if seq[0].get_opset() == 'opset1':
        for interp in seq:
            dims_and_scales_.extend(
                zip(
                    Interpolate.get_axes(interp),
                    interp.in_port(
                        1).get_connection().get_source().data.get_value()))

        axis_to_size = sorted(list(dict(dims_and_scales_).items()),
                              key=lambda x: x[0])
        axes_of_node = int64_array([z[0] for z in axis_to_size])
        sizes = shape_array([z[1] for z in axis_to_size])
        scales = np.ones(len(axis_to_size), dtype=np.float32)
    else:
        for interp in seq:
            dims_and_scales_.extend(
                zip(
                    Interpolate.get_axes(interp),
                    interp.in_port(
                        1).get_connection().get_source().data.get_value(),
                    interp.in_port(
                        2).get_connection().get_source().data.get_value()))

        axis_to_size = sorted(dims_and_scales_, key=lambda x: x[0])
        axes_of_node = int64_array([z[0] for z in axis_to_size])
        sizes = shape_array([z[1] for z in axis_to_size])
        scales = mo_array([z[2] for z in axis_to_size])

    fst_interp_node = seq[0]
    last_interp_node = seq[-1]
    last_interp_node_name = last_interp_node.soft_get('name',
                                                      last_interp_node.id)
    attributes = get_interpolate_attributes(fst_interp_node)

    opset = fst_interp_node.get_opset()
    if opset == 'opset1':
        attributes['axes'] = axes_of_node
        interp_node = create_op_with_const_inputs(graph, Interpolate,
                                                  {1: sizes}, attributes)

        fst_interp_connection = fst_interp_node.in_port(0).get_connection()
        fst_interp_connection.set_destination(interp_node.in_port(0))

        last_interp_node.out_port(0).get_connection().set_source(
            interp_node.out_port(0))
    else:
        attributes['in_ports_count'] = 4
        interp_node = create_op_with_const_inputs(graph, Interpolate, {
            1: sizes,
            2: scales,
            3: axes_of_node
        }, attributes)

        fst_interp_connection = fst_interp_node.in_port(0).get_connection()
        fst_interp_connection.set_destination(interp_node.in_port(0))

        last_interp_node.out_port(0).get_connection().set_source(
            interp_node.out_port(0))

    rename_nodes([(last_interp_node, last_interp_node_name + '/delete'),
                  (interp_node, last_interp_node_name)])
Example #18
0
    def replace_pattern(self, graph: Graph, match: Dict[str, Node]):
        group_norm_node = match['op']
        group_norm_num_input_dims = len(group_norm_node.in_port(0).data.get_shape())

        # node computing initial GroupNorm input shape
        initial_shape_op_node = Shape(graph, {'name': group_norm_node.name + '/Shape'}).create_node()
        initial_shape_op_node.in_port(0).connect(group_norm_node.in_port(0).get_source())

        initial_shape_op_node_float = Cast(
            graph, {'name': initial_shape_op_node.name + '/to_float',
                    'dst_type': data_type_str_to_np(graph.graph['cmd_params'].data_type)}).create_node()
        initial_shape_op_node.out_port(0).connect(initial_shape_op_node_float.in_port(0))

        initial_batch_dim_node = node_to_get_batch_value(initial_shape_op_node_float)
        initial_features_dim_node = node_to_get_features_dimension_value(initial_shape_op_node_float)
        initial_spatial_dims_node_int = node_to_get_spatial_dimensions_value(initial_shape_op_node)
        initial_spatial_dims_node = Cast(
            graph, {'name': initial_spatial_dims_node_int.name + '/to_float',
                    'dst_type': data_type_str_to_np(graph.graph['cmd_params'].data_type)}).create_node()
        initial_spatial_dims_node_int.out_port(0).connect(initial_spatial_dims_node.in_port(0))

        group_size_node = Const(graph, {'value': int64_array([group_norm_node.num_groups]),
                                        'name': group_norm_node.name + '/GroupSize'}).create_node()

        # calculate "features // group_size" value
        reciprocal_group_size_node = Const(graph, {'value': np.array([1.0 / group_norm_node.num_groups]),
                                                   'name': group_norm_node.name + '/ReciprocalGroupSize'}).create_node()

        c_div_g_node = Mul(graph, {}).create_node()
        c_div_g_node.in_port(0).connect(initial_features_dim_node.out_port(0))
        c_div_g_node.in_port(1).connect(reciprocal_group_size_node.out_port(0))

        batch_mul_group_size_node = Mul(graph, {}).create_node()
        batch_mul_group_size_node.in_port(0).connect(initial_batch_dim_node.out_port(0))
        batch_mul_group_size_node.in_port(1).connect(group_size_node.out_port(0))

        # create new node which concatenates several dims to one
        new_shape_node_float = new_shape_node_from_shape_nodes([batch_mul_group_size_node, c_div_g_node,
                                                                initial_spatial_dims_node])
        new_shape_node = Cast(graph,
                              {'name': new_shape_node_float.name + '/to_int64', 'dst_type': np.int64}).create_node()
        new_shape_node_float.out_port(0).connect(new_shape_node.in_port(0))

        reshape_for_mvn_node = Reshape(graph, {}).create_node()

        group_norm_node.in_port(0).get_connection().set_destination(reshape_for_mvn_node.in_port(0))
        reshape_for_mvn_node.in_port(1).connect(new_shape_node.out_port(0))

        # Reshape the gamma and beta constants to correct layout from [C] to [1,C], [1,C,1], [1,C,1,1] etc
        gamma_beta_shape = np.ones([group_norm_num_input_dims], dtype=np.int64)
        gamma_beta_shape[1] = -1

        gamma_value = group_norm_node.in_port(1).get_source().data.get_value()
        beta_value = group_norm_node.in_port(2).get_source().data.get_value()
        assert gamma_value is not None, 'The gamma should be constant'
        assert beta_value is not None, 'The beta should be constant'
        gamma_value = np.reshape(gamma_value, gamma_beta_shape)
        group_norm_node.in_port(1).get_source().data.set_value(gamma_value)
        beta_value = np.reshape(beta_value, gamma_beta_shape)
        group_norm_node.in_port(2).get_source().data.set_value(beta_value)

        # MVN
        mvn_node = MVN(graph, {'name': group_norm_node.name + '/MVN',
                               'normalize_variance': 1,
                               'eps': group_norm_node.eps,
                               'eps_mode': 'inside_sqrt'}).create_node()
        mvn_node.in_port(0).connect(reshape_for_mvn_node.out_port(0))

        # MVN axes
        _, rank = get_shape_and_rank_nodes_by_port(mvn_node.in_port(0).get_connection().get_source(),
                                                   return_as_a_scalar=True)
        rng = create_op_with_const_inputs(graph, Range, {0: int64_array(1), 2: int64_array(1)},
                                          {'name': group_norm_node.name + '/Range', 'output_type': np.int64})
        mvn_node.in_port(1).connect(rng.out_port(0))
        rng.in_port(1).connect(rank.out_port(0))

        # reshape to the initial shape before multiplying with gamma and adding beta
        reshape_to_initial_shape_node = Reshape(graph, {}).create_node()
        reshape_to_initial_shape_node.in_port(0).connect(mvn_node.out_port(0))
        reshape_to_initial_shape_node.in_port(1).connect(initial_shape_op_node.out_port(0))

        mul_node = Mul(graph, {'name': mvn_node.name + '/Mul'}).create_node()
        mul_node.in_port(0).connect(reshape_to_initial_shape_node.out_port(0))
        group_norm_node.in_port(1).get_connection().set_destination(mul_node.in_port(1))

        add_node = Add(graph, {'name': mul_node.name + '/Add'}).create_node()
        add_node.in_port(0).connect(mul_node.out_port(0))
        group_norm_node.in_port(2).get_connection().set_destination(add_node.in_port(1))

        group_norm_node.out_port(0).get_connection().set_source(add_node.out_port(0))
    **result(),

    # Transpose layers
    **regular_op_with_shaped_data('transpose_1', None, {
        'type': 'Transpose',
        'op': 'Transpose',
        'need_shape_inference': True
    }),
    **regular_op_with_shaped_data('transpose_3', None, {
        'type': 'Transpose',
        'op': 'Transpose',
        'need_shape_inference': True
    }),

    # Const layers
    **valued_const_with_data('axis_1_const', int64_array([0, 2, 3, 1])),
    **valued_const_with_data('axis_3_const', int64_array([0, 4, 1, 2, 3])),
}


class LayoutChangeForEinsumTests(unittest.TestCase):
    def test_layout_change_einsum(self):
        graph = build_graph(nodes_attributes,
                            [*connect('placeholder_1', '0:einsum'),
                             *connect('placeholder_2', '1:einsum'),
                             *connect('placeholder_3', '2:einsum'),
                             *connect('einsum', 'output')],
                            {  # this input stays as is since it is of a rank equal to 3
                                'placeholder_1_d': {'shape': np.array([2, 3, 5])},
                                # [3, 5, 7, 8] - NHWC, [3, 8, 5, 7] - NCHW
                                # this input does not require additional transpose
    def replace_pattern(self, graph: Graph, match: dict):
        assert match['operator'].has('multiplication_transparent_ports')

        port = match['operator'].input_ports_with(match['quantized'])
        assert len(port) >= 1
        if len(port) > 1:
            log.debug(
                'BinarizeWeightsM1P1 cannot apply transformation for data {} because it consumed more'
                ' than once'.format(match['quantized'].name))
            return

        assert len(port) == 1
        port = port[0]
        applicable = [
            pair for pair in match['operator'].multiplication_transparent_ports
            if pair[0] == port
        ]
        if len(applicable) == 0:
            return

        # Look at 3-rd and 4-th inputs of FakeQuantize -- they have constants that should be passed through.
        # Assume that the constant that should be passed through is a scalar.
        quantize = match['quantize']
        output_low = quantize.in_node(3)
        output_high = quantize.in_node(4)

        quantize_name = quantize.soft_get('name', quantize.id)

        if not output_low.has_valid('value') and not output_high.has_valid(
                'value'):
            return

        output_low = output_low.value
        output_high = output_high.value

        # This pass is applicable for binarization only. Other intX variants are not relevant.
        if quantize.levels != 2:
            return

        # Recognize two cases: 0/+1 and -1/+1.
        zp1 = np.all(output_low == 0) or np.all(output_high == 0)
        m1p1 = np.all(-output_low == output_high)
        if (not zp1 and not m1p1) or (zp1 and m1p1):
            log.debug(
                'BinarizeWeightsM1P1 cannot apply transformation for data {} because it does\'t has one of'
                ' 0/+1 or -1/+1 forms.'.format(match['quantized'].name))
            return

        # TODO: Extract real scalar from 3rd and 4th inputs; reusing original tensors is dangerous because
        #       it may have incompatible shape.

        mult_term = quantize.in_node(3) if np.all(
            output_high == 0) else quantize.in_node(4)

        new_shape = Const(
            graph, {
                'name': quantize_name + '/Reshape/Shape',
                'value': int64_array([-1, 1, 1])
            }).create_node_with_data()
        reshape = Reshape(graph, {
            'name': quantize_name + '/Reshape'
        }).create_node_with_data([mult_term, new_shape])

        # Patch inflow path (by diving by mult_term)
        # Put a new Pow/Mul combination here:
        #       ---->---- (here)---> data ---> [3rd/4th ports]quantize ---> quantized ---> operator

        if len(match['quantized'].out_nodes()) > 1:
            log.debug(
                'BinarizeWeightsM1P1: len(match[\'quantized\'].out_nodes()) > 1'
            )
            return
        power_of_exponent = Const(graph, {
            'name': quantize_name + '/DivNormalize/Power',
            'value': mo_array(-1.0)
        }).create_node_with_data()
        div_op = Pow(graph, {'name': quantize_name + '/DivNormalize'})
        div_output = div_op.create_node_with_data(
            [mult_term, power_of_exponent])

        for i in [3, 4]:
            match['quantize'].insert_node_with_data_before(
                match['quantize'].in_node(i),
                Mul,
                dict(name=quantize_name + '/MulNormalize'),
                additional_inputs=[div_output],
            )

        match[
            'quantized'].value = None  # reset value because it will be recomputed
        match['quantize'].infer(match['quantize'])

        # Put a complimentary new Mul node here:   operator -->---(here)-----> operator.out_node()

        match['operator'].insert_node_with_data_after(
            match['operator'].out_node(),
            Mul,
            dict(name=match['operator'].name + '/MulNormalize'),
            [reshape],
        )

        # Disable 'operator' fusion with linear ops, otherwise it will annihilate changes that we just made
        match['operator']['can_be_fused'] = False
Example #21
0
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0


import unittest

from openvino.tools.mo.front.tf.SSliceComplex import SSliceComplex
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs
from unit_tests.utils.graph import build_graph


graph_node_attrs = {
    'placeholder': {'shape': int64_array([3, 100, 100, 2]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
    'strided_slice_real': {
        'type': 'StridedSlice', 'kind': 'op', 'op': 'StridedSlice', 'begin_mask': int64_array([1]),
        'end_mask': int64_array([1]), 'ellipsis_mask': int64_array([1]), 'new_axis_mask': int64_array([0]),
        'shrink_axis_mask': int64_array([0, 1]),
    },
    'strided_slice_imag': {
        'type': 'StridedSlice', 'kind': 'op', 'op': 'StridedSlice', 'begin_mask': int64_array([1]),
        'end_mask': int64_array([1]), 'ellipsis_mask': int64_array([1]), 'new_axis_mask': int64_array([0]),
        'shrink_axis_mask': int64_array([0, 1]),
    },
    'complex': {'kind': 'op', 'op': 'Complex'},
    'real_begin': {
        'type': 'Const', 'kind': 'op', 'op': 'Const', 'shape': int64_array([2]), 'value': int64_array([0, 0])
    },
    'imag_begin': {
        'type': 'Const', 'kind': 'op', 'op': 'Const', 'shape': int64_array([2]), 'value': int64_array([0, 1])
    },
    def test_dequantize(self):
        graph = build_graph(nodes1_attributes, [
            ('input', 'input_data'),
            ('input_data', 'dequantize'),
            ('dequantize', 'dequantize_data'),
            ('scale_param_dq', 'scale_param_dq_data'),
            ('zerop_param_dq', 'zerop_param_dq_data'),
            ('scale_param_dq_data', 'dequantize'),
            ('zerop_param_dq_data', 'dequantize'),
            ('dequantize_data', 'out'),
            ('out', 'out_data'),
            ('out_data', 'result'),
        ], {
            'input_data': {
                'shape': int64_array([1, 3, 224, 224])
            },
            'scale_param_dq': {
                'shape': np.array([]),
                'value': np.float32(1.0 / 255)
            },
            'scale_param_dq_data': {
                'shape': np.array([]),
                'value': np.float32(1.0 / 255)
            },
            'zerop_param_dq': {
                'shape': np.array([]),
                'value': np.uint8(0)
            },
            'zerop_param_dq_data': {
                'shape': np.array([]),
                'value': np.uint8(0)
            },
        },
                            nodes_with_edges_only=True)

        graph_ref = build_graph(nodes_ref_attributes, [
            ('input', 'input_data'),
            ('input_data', 'cast'),
            ('cast', 'cast_data'),
            ('cast_data', 'sub'),
            ('zerop_param_dq', 'zerop_param_dq_data'),
            ('zerop_param_dq_data', 'sub'),
            ('sub', 'sub_data'),
            ('sub_data', 'mul'),
            ('scale_param_dq', 'scale_param_dq_data'),
            ('scale_param_dq_data', 'mul'),
            ('mul', 'mul_data'),
            ('mul_data', 'out'),
            ('out', 'out_data'),
            ('out_data', 'result'),
        ], {
            'input_data': {
                'shape': int64_array([1, 3, 224, 224])
            },
            'scale_param_dq': {
                'shape': np.array([]),
                'value': np.float32(1.0 / 255)
            },
            'scale_param_dq_data': {
                'shape': np.array([]),
                'value': np.float32(1.0 / 255)
            },
            'zerop_param_dq': {
                'shape': np.array([]),
                'value': np.uint8(0)
            },
            'zerop_param_dq_data': {
                'shape': np.array([]),
                'value': np.uint8(0)
            },
        },
                                nodes_with_edges_only=True)

        graph.stage = 'middle'
        DequantizeLinearResolver().find_and_replace_pattern(graph)

        (flag, resp) = compare_graphs(graph,
                                      graph_ref,
                                      'out',
                                      check_op_attrs=True)
        self.assertTrue(flag, resp)
Example #23
0
    def create_fake_quantize_net(self, il, ih, num_bits, narrow_range,
                                 nudged_il, nudged_ih, expected_step,
                                 ir_version, use_new_frontend):
        # original tf model
        import tensorflow as tf
        tf.compat.v1.reset_default_graph()
        with tf.compat.v1.Session() as sess:
            data = tf.compat.v1.placeholder(tf.float32, [11], 'parameter')
            input_min = tf.constant(il, name='input_min')
            input_max = tf.constant(ih, name='input_max')
            tf.quantization.fake_quant_with_min_max_vars(
                data, input_min, input_max, num_bits, narrow_range, 'fq')

            tf.compat.v1.global_variables_initializer()
            tf_net = sess.graph_def

        # reference graph to compare with IR
        ref_net = None
        if check_ir_version(10, None, ir_version) and not use_new_frontend:
            levels = 2**num_bits - int(narrow_range)

            # data (shape, value) -> const (shape, vale) -> data (shape, no value)
            const_for_layer_tests = lambda name, value: {
                **{
                    name + '_dd': {
                        'kind': 'data',
                        'value': value,
                        'shape': value.shape
                    }
                },
                **{
                    name: {
                        'kind': 'op',
                        'type': 'Const'
                    }
                },
                **shaped_data(name + '_d', int64_array(value.shape))
            }

            connect_const_for_layer_tests = lambda first_tensor_name, second_tensor_name: [
                *connect_front(first_tensor_name + '_dd', first_tensor_name),
                *connect(first_tensor_name, second_tensor_name)
            ]

            nodes = {
                **regular_op_with_shaped_data('parameter', [11], {
                                                  'type': 'Parameter'
                                              }),
                **const_for_layer_tests(
                    'il', np.array([nudged_il], dtype=np.float32)),
                **const_for_layer_tests(
                    'ih', np.array([nudged_ih], dtype=np.float32)),
                **const_for_layer_tests(
                    'ol', np.array([nudged_il], dtype=np.float32)),
                **const_for_layer_tests(
                    'oh', np.array([nudged_ih], dtype=np.float32)),
                **regular_op_with_shaped_data('fq', [11], {
                                                  'type': 'FakeQuantize',
                                                  'levels': levels
                                              }),
                **regular_op('result', {'type': 'Result'}),
            }
            edges = [
                *connect('parameter', '0:fq'),
                *connect_const_for_layer_tests('il', '1:fq'),
                *connect_const_for_layer_tests('ih', '2:fq'),
                *connect_const_for_layer_tests('ol', '3:fq'),
                *connect_const_for_layer_tests('oh', '4:fq'),
                *connect('fq', 'result'),
            ]
            ref_net = build_graph(nodes, edges)

        return tf_net, ref_net
class TestDequantizeWithAxis(unittest.TestCase):
    @generate(*[
        (int64_array([1, 3, 4, 4]), np.array([2, 3, 4, 5], dtype=np.float32),
         np.array([2, 3, 4, 5], dtype=np.uint8), int64_array([1, 1, 4, 1]), 2),
        (int64_array([1, 3, 4, 4]), int64_array([2, 3, 4, 5]),
         np.array([2, 3, 4, 5], dtype=np.uint8), int64_array([1, 3, 1, 1]), 1),
        (int64_array([2, 3, 4, 4]), int64_array([2, 3, 4, 5]),
         np.array([2, 3, 4, 5], dtype=np.uint8), int64_array([2, 1, 1, 1]), 0),
        (int64_array([1, 3, 4, 4]), int64_array([2, 3, 4, 5]),
         np.array([2, 3, 4, 5], dtype=np.uint8), int64_array([1, 1, 4,
                                                              1]), -2),
        (int64_array([1, 3, 4, 4]), int64_array([2, 3, 4, 5]),
         np.array([2, 3, 4, 5], dtype=np.uint8), int64_array([1, 1, 1,
                                                              4]), -1),
        (int64_array([1, 3, 4, 4]), int64_array([2, 3, 4, 5]),
         np.array([2, 3, 4, 5], dtype=np.int32), int64_array([1, 1, 4, 1]), 2),
        (int64_array([1, 3, 4, 4]), int64_array([2, 3, 4, 5]),
         np.array([2, 3, 4, 5], dtype=np.int32), int64_array([1, 3, 1, 1]), 1),
        (int64_array([2, 3, 4, 4]), int64_array([2, 3, 4, 5]),
         np.array([2, 3, 4, 5], dtype=np.int32), int64_array([2, 1, 1, 1]), 0),
    ])
    def test_dequantize_with_axis(self, input_shape, scale_param_value,
                                  zero_param_value, target_shape, axis):
        graph = build_graph(nodes1_attributes, [
            ('input', 'input_data'),
            ('input_data', 'dequantize'),
            ('dequantize', 'dequantize_data'),
            ('scale_param_dq', 'scale_param_dq_data'),
            ('zerop_param_dq', 'zerop_param_dq_data'),
            ('scale_param_dq_data', 'dequantize'),
            ('zerop_param_dq_data', 'dequantize'),
            ('dequantize_data', 'out'),
            ('out', 'out_data'),
            ('out_data', 'result'),
        ], {
            'input_data': {
                'shape': input_shape
            },
            'dequantize': {
                'axis': axis
            },
            'scale_param_dq': {
                'shape': scale_param_value.shape,
                'value': scale_param_value
            },
            'scale_param_dq_data': {
                'shape': scale_param_value.shape,
                'value': scale_param_value
            },
            'zerop_param_dq': {
                'shape': zero_param_value.shape,
                'value': zero_param_value
            },
            'zerop_param_dq_data': {
                'shape': zero_param_value.shape,
                'value': zero_param_value
            },
        },
                            nodes_with_edges_only=True)

        graph_ref = build_graph(nodes_ref_attributes, [
            ('input', 'input_data'),
            ('input_data', 'cast'),
            ('cast', 'cast_data'),
            ('cast_data', 'sub'),
            ('zerop_param_dq', 'zerop_param_dq_data'),
            ('zerop_param_dq_data', 'sub_reshape'),
            ('sub_reshape_const', 'sub_reshape_const_data'),
            ('sub_reshape_const_data', 'sub_reshape'),
            ('sub_reshape', 'sub_reshape_data'),
            ('sub_reshape_data', 'sub'),
            ('sub', 'sub_data'),
            ('sub_data', 'mul'),
            ('scale_param_dq', 'scale_param_dq_data'),
            ('scale_param_dq_data', 'mul_reshape'),
            ('mul_reshape_const', 'mul_reshape_const_data'),
            ('mul_reshape_const_data', 'mul_reshape'),
            ('mul_reshape', 'mul_reshape_data'),
            ('mul_reshape_data', 'mul'),
            ('mul', 'mul_data'),
            ('mul_data', 'out'),
            ('out', 'out_data'),
            ('out_data', 'result'),
        ], {
            'input_data': {
                'shape': input_shape
            },
            'scale_param_dq': {
                'shape': scale_param_value.shape,
                'value': scale_param_value
            },
            'scale_param_dq_data': {
                'shape': scale_param_value.shape,
                'value': scale_param_value
            },
            'zerop_param_dq': {
                'shape': zero_param_value.shape,
                'value': zero_param_value
            },
            'zerop_param_dq_data': {
                'shape': zero_param_value.shape,
                'value': zero_param_value
            },
            'sub_reshape_const_data': {
                'shape': target_shape.shape,
                'value': target_shape
            },
            'mul_reshape_const_data': {
                'shape': target_shape.shape,
                'value': target_shape
            },
        },
                                nodes_with_edges_only=True)

        graph.stage = 'middle'
        DequantizeLinearResolver().find_and_replace_pattern(graph)

        (flag, resp) = compare_graphs(graph,
                                      graph_ref,
                                      'out',
                                      check_op_attrs=True)
        self.assertTrue(flag, resp)
Example #25
0
    def create_net(self, shape, softmax_axis, ir_version):
        """
            ONNX net                       IR net

            Input->Softmax->Output   =>    Input->Reshape->SoftMax->Reshape

        """

        #
        #   Create ONNX model
        #

        import onnx
        from onnx import helper
        from onnx import TensorProto

        input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
        output = helper.make_tensor_value_info('output', TensorProto.FLOAT, shape)

        node_def = onnx.helper.make_node(
            'Softmax',
            inputs=['input'],
            outputs=['output'],
            axis=softmax_axis
        )

        # Create the graph (GraphProto)
        graph_def = helper.make_graph(
            [node_def],
            'test_model',
            [input],
            [output],
        )

        # Create the model (ModelProto)
        onnx_net = helper.make_model(graph_def, producer_name='test_model')

        #
        #   Create reference IR net
        #

        ref_net = None

        converted_shape = shape if len(shape) != 1 else shape[0]
        flatten_shape = get_flatten_shape(shape, softmax_axis)
        reshape_data_val = second_input_data_of_reshape(shape, softmax_axis)

        if check_ir_version(10, None, ir_version):
            if len(shape) == 2 and shape == flatten_shape:
                ref_nodes_attributes = {
                    'input': {'kind': 'op', 'type': 'Parameter', 'shape': converted_shape},
                    'input_data': {'shape': shape, 'kind': 'data', 'value': None},
                    'flatten_shape_val': {'shape': int64_array(reshape_data_val).shape,
                                          'kind': 'data',
                                          'value': int64_array(reshape_data_val)},
                    'flatten_shape': {'type': 'Const', 'kind': 'op', 'shape': 2},
                    'flatten_shape_data': {'shape': int64_array([2]), 'kind': 'data', 'value': None},
                    'reshape': {'kind': 'op', 'type': 'Reshape'},
                    'reshape_data': {'kind': 'data', 'shape': flatten_shape, 'value': None},
                    'softmax': {'type': 'SoftMax', 'kind': 'op', 'axis': 1},
                    'softmax_data': {'shape': flatten_shape, 'kind': 'data', 'value': None},
                    'result': {'kind': 'op', 'type': 'Result'},
                }

                ref_edges = [
                    ('input', 'input_data'),
                    ('flatten_shape_val', 'flatten_shape'),
                    ('flatten_shape', 'flatten_shape_data'),
                    ('flatten_shape_data', 'reshape', {'in': 1}),
                    ('input_data', 'reshape', {'in': 0}),
                    ('reshape', 'reshape_data'),
                    ('reshape_data', 'softmax'),
                    ('softmax', 'softmax_data'),
                    ('softmax_data', 'result'),
                ]
            else:
                ref_nodes_attributes = {
                    'input': {'kind': 'op', 'type': 'Parameter', 'shape': converted_shape},
                    'input_data': {'shape': shape, 'kind': 'data', 'value': None},
                    'flatten_shape_val': {'shape': int64_array(reshape_data_val).shape,
                                          'kind': 'data',
                                          'value': int64_array(reshape_data_val)},
                    'flatten_shape': {'type': 'Const', 'kind': 'op', 'shape': 2},
                    'flatten_shape_data': {'shape': int64_array([2]), 'kind': 'data', 'value': None},
                    'reshape': {'kind': 'op', 'type': 'Reshape'},
                    'reshape_data': {'kind': 'data', 'shape': flatten_shape, 'value': None},
                    'softmax': {'type': 'SoftMax', 'kind': 'op', 'axis': 1},
                    'softmax_data': {'shape': flatten_shape, 'kind': 'data', 'value': None},
                    'last_shape_val': {'shape': int64_array(shape).shape, 'kind': 'data', 'value': int64_array(shape)},
                    'last_shape': {'type': 'Const', 'kind': 'op', 'shape': len(shape)},
                    'last_shape_data': {'shape': int64_array([len(shape)]), 'kind': 'data', 'value': None},
                    'last_reshape': {'kind': 'op', 'type': 'Reshape'},
                    'last_reshape_data': {'kind': 'data', 'shape': shape, 'value': None},
                    'result': {'kind': 'op', 'type': 'Result'},
                }

                ref_edges = [
                    ('input', 'input_data'),
                    ('flatten_shape_val', 'flatten_shape'),
                    ('flatten_shape', 'flatten_shape_data'),
                    ('flatten_shape_data', 'reshape', {'in': 1}),
                    ('input_data', 'reshape', {'in': 0}),
                    ('reshape', 'reshape_data'),
                    ('reshape_data', 'softmax'),
                    ('softmax', 'softmax_data'),
                    ('last_shape_val', 'last_shape'),
                    ('last_shape', 'last_shape_data'),
                    ('last_shape_data', 'last_reshape', {'in': 1}),
                    ('softmax_data', 'last_reshape', {'in': 0}),
                    ('last_reshape', 'last_reshape_data'),
                    ('last_reshape_data', 'result'),
                ]

            ref_net = build_graph(ref_nodes_attributes, ref_edges)

        return onnx_net, ref_net
 'placeholder': {
     'type': 'Parameter',
     'kind': 'op',
     'op': 'Parameter'
 },
 'placeholder_data': {
     'value': None,
     'shape': None,
     'kind': 'data',
     'data_type': np.float32
 },
 'ss_begin': {
     'kind': 'op',
     'op': 'Const',
     'type': 'Const',
     'value': int64_array([2]),
     'shape': int64_array([1])
 },
 'ss_begin_data': {
     'kind': 'data',
     'value': int64_array([2]),
     'shape': int64_array([1])
 },
 'ss_end': {
     'kind': 'op',
     'op': 'Const',
     'type': 'Const',
     'value': int64_array([4]),
     'shape': int64_array([1])
 },
 'ss_end_data': {
Example #27
0
    def test_add_output_1(self):
        sub_graph_2 = build_graph(nodes_attrs=sub_graph_2_nodes,
                                  edges=[
                                      *connect('cond_2_int', 'cond_2_int_out'),
                                      *connect('in_2_int', 'OUT_2'),
                                      *connect('ones', 'OUT_2'),
                                      *connect('OUT_2', 'OUT_2_out'),
                                      *connect('in_2_int', 'in_2_int_out')
                                  ],
                                  nodes_with_edges_only=True)

        sub_graph_1 = build_graph(nodes_attrs=sub_graph_1_nodes,
                                  edges=[
                                      *connect('M_2', '0:Loop_2'),
                                      *connect('cond_2', '1:Loop_2'),
                                      *connect('IN_2', '2:Loop_2'),
                                      *connect('Loop_2:0', 'Loop_2_out'),
                                      *connect('in_1_int', 'in_1_int_out'),
                                      *connect('cond_1_int', 'cond_1_int_out')
                                  ],
                                  nodes_with_edges_only=True)
        loop_node_1 = Node(sub_graph_1, 'Loop_2')
        loop_node_1.body = sub_graph_2

        main_graph = build_graph(nodes_attrs=main_graph_nodes,
                                 edges=[
                                     *connect('M', '0:Loop'),
                                     *connect('cond', '1:Loop'),
                                     *connect('IN_2', '2:Loop'),
                                     *connect('IN_1', "3:Loop"),
                                     *connect('Loop:0', 'OUT_1')
                                 ],
                                 nodes_with_edges_only=True)
        loop_node = Node(main_graph, 'Loop')
        loop_node.body = sub_graph_1
        main_graph.graph['additional_outputs'] = ['Loop', 'Loop_2']
        loop_node_output_port_map_len = len(loop_node.output_port_map)
        loop_node_out_ports_len = len(loop_node.out_ports())
        loop_2_out_ports_len = len(loop_node_1.out_ports())
        max_layer_id = 5

        AddOutputRecursive().find_and_replace_pattern(main_graph)

        loop_node = Node(main_graph, 'Loop')
        self.assertEqual(len(loop_node.output_port_map),
                         loop_node_output_port_map_len + 1)
        self.assertEqual(len(loop_node.out_ports()),
                         loop_node_out_ports_len + 1)
        self.assertEqual(
            loop_node.out_port(1).get_destination().node.op, 'Result')
        self.assertTrue(
            np.all(
                loop_node.out_port(1).data.get_shape() == int64_array(
                    [5, 10, 4, 64, 54])))
        last_node = Node(sub_graph_1, 'Loop_2')
        self.assertEqual(len(last_node.out_ports()), loop_2_out_ports_len)
        unsq_node = last_node.out_port(0).get_destinations()[1].node
        self.assertEqual(unsq_node.op, 'Unsqueeze')
        self.assertEqual(
            unsq_node.out_port(0).get_destination().node.op, 'Result')
        self.assertEqual(
            unsq_node.out_port(0).get_destination().node.internal_layer_id,
            max_layer_id + 3)
        self.assertTrue(
            np.all(
                unsq_node.out_port(0).data.get_shape() == int64_array(
                    [1, 10, 4, 64, 54])))
 def test_conversion(self, input_shape, scales, axes):
     input_shape_as_array = int64_array(input_shape)
     scales_as_array = float32_array(scales)
     graph = build_graph(
         graph_node_attrs, graph_edges, {
             'placeholder_data': {
                 'shape': input_shape_as_array
             },
             'scales': {
                 'value': scales_as_array,
                 'shape': scales_as_array.shape
             },
             'scales_data': {
                 'value': scales_as_array,
                 'shape': scales_as_array.shape
             },
             'upsample_data': {
                 'shape':
                 ((input_shape_as_array + 1.e-5) * scales_as_array).astype(
                     np.int64)
             }
         })
     graph.graph['layout'] = 'NCHW'
     ref_graph = build_graph(
         new_ref_graph_node_attr, new_ref_graph_edges, {
             'placeholder_data': {
                 'shape': int64_array(input_shape)
             },
             'ss_begin': {
                 'value': int64_array([axes[0]])
             },
             'ss_end': {
                 'value': int64_array([axes[-1] + 1])
             },
             'ss_begin_data': {
                 'value': int64_array([axes[0]])
             },
             'ss_end_data': {
                 'value': int64_array([axes[-1] + 1])
             },
             'factor': {
                 'value': scales_as_array[2:],
                 'shape': scales_as_array[2:].shape
             },
             'factor_data': {
                 'value': scales_as_array[2:],
                 'shape': scales_as_array[2:].shape
             },
             'axes_const': {
                 'value': int64_array(axes),
                 'shape': int64_array(axes).shape
             },
             'interpolate_data': {
                 'shape':
                 (input_shape_as_array * scales_as_array + 1e-5).astype(
                     np.int64)
             },
         })
     UpsampleToResample().find_and_replace_pattern(graph)
     (flag, resp) = compare_graphs(graph, ref_graph, 'output')
     self.assertTrue(flag, resp)
Example #29
0
    def replace_pattern(self, graph: Graph, match: dict):
        node = match['node']
        node_name = node.soft_get('name', node.id)

        if 2 in node.in_ports() and not node.in_port(2).disconnected():
            # Third input represents output shape. Cutting its value according to scheme:
            # [N, C, spatial_dim_0, ..., spatial_dim_n] -> [spatial_dim_0, ..., spatial_dim_n]
            in_rank = node.in_port(0).data.get_shape().size

            shape_src = node.in_port(2).get_source()
            node.in_port(2).disconnect()

            ss_0 = create_op_with_const_inputs(
                graph, StridedSlice, {
                    1: mo_array([2], dtype=np.int32),
                    2: mo_array([in_rank], dtype=np.int32),
                    3: mo_array([1], dtype=np.int32)
                }, {
                    'name': node_name + '/ss_0_port',
                    'begin_mask': mo_array([1], dtype=np.int32),
                    'end_mask': mo_array([0], dtype=np.int32),
                    'new_axis_mask': mo_array([0], dtype=np.int32),
                    'shrink_axis_mask': mo_array([0], dtype=np.int32),
                    'ellipsis_mask': mo_array([0], dtype=np.int32)
                })

            shape_src.connect(ss_0.in_port(0))
            ss_0.out_port(0).connect(node.in_port(2))

            # Specification: *padding amount* is deduced from relation of input and output spatial shapes
            del node['pad']

        elif node.has_valid('original_output_spatial_shape'):
            # node had fixed output spatial shape set in original framework, so we restore it here
            const = Const(
                graph, {
                    'value': int64_array(node.original_output_spatial_shape),
                    'name': node_name + '/original_spatial_shape'
                }).create_node()
            node.add_input_port(2, skip_if_exist=True)
            const.out_port(0).connect(node.in_port(2))

            # Specification: *padding amount* is deduced from relation of input and output spatial shapes
            del node['pad']

        group = node.soft_get('group', 1)

        if group != 1:
            assert group > 1

            weights_shape = node.in_port(1).data.get_shape()
            assert weights_shape is not None
            I = node.in_port(0).data.get_shape()[1]
            assert I % group == 0
            assert node.output % group == 0

            new_shape = shape_array(
                [group, I // group, node.output // group, *weights_shape[2:]])

            assert not is_fully_defined(new_shape) or not is_fully_defined(weights_shape) or \
                   np.prod(weights_shape) == np.prod(new_shape), 'Initial weights shape {}, grouped weights shape {}' \
                                                                 ''.format(weights_shape, new_shape)
            reshape = create_op_node_with_second_input(
                graph, Reshape, new_shape, {'override_output_shape': True},
                node.in_port(1).get_source().node)

            node.in_port(1).get_connection().set_source(reshape.out_port(0))

            node['type'] = 'GroupConvolutionBackpropData'
        else:
            node['type'] = 'ConvolutionBackpropData'
Example #30
0
def muladd_to_scaleshift_action(graph: Graph, match: dict):
    mul = match['mul']
    add = match['add']
    output = match['output']

    # Pass works correctly only in case when node have only 1 output
    if len(mul.out_port(0).get_destinations()) > 1:
        return

    if mul.soft_get('can_be_scaleshift') is False or add.soft_get('can_be_scaleshift') is False:
        return

    mul_weights_id = get_value_id(mul)
    mul_input_id = get_tensor_id(mul)
    add_weights_id = get_value_id(add)

    if mul_weights_id is None:
        log.debug("Mul->Add to ScaleShift: Mul {} has no weights".format(mul.name))
        return
    if mul_input_id is None:
        log.debug("Mul->Add to ScaleShift: Mul {} has no input".format(mul.name))
        return
    if add_weights_id is None:
        log.debug("Mul->Add to ScaleShift: Add {} has no weights".format(add.name))
        return

    input = mul.in_node(mul_input_id)
    weights = mul.in_node(mul_weights_id)
    bias = add.in_node(add_weights_id)

    # Transform values
    weights.value = np.squeeze(weights.value)
    weights.shape = int64_array(weights.value.shape)

    bias.value = np.squeeze(bias.value)
    bias.shape = int64_array(bias.value.shape)

    # Broadcast weights if they are scalar
    if weights.value.ndim == 0 and bias.value.ndim == 1:
        weights.value = np.full(bias.shape, weights.value.item(), dtype=weights.value.dtype)
        weights.shape = int64_array(weights.value.shape)

    if bias.shape != weights.shape:
        log.warning('Mul->Add to ScaleShift conversion stopped {} != {}'.format(weights.shape, bias.shape))
        return

    if bias.value.ndim != weights.value.ndim or bias.value.size != weights.value.size:
        log.debug("Skipping Mul->Add to ScaleShift conversion for nodes {}, {} because of different weights "
                  "and biases".format(mul.name, add.name))
        return

    if bias.value.size == 1 and weights.value.size == 1:
        log.debug("Skipping Mul->Add to ScaleShift conversion for nodes {}, {}. Will be converted to Power"
                  "".format(mul.name, add.name))
        return

    op_name = "ScaleShift"

    log.debug("Fusing Mul->Add to {}. Input nodes: {} and {}, bias.shape = {}, weights.shape = {}"
              "".format(op_name, mul.id, add.id, bias.shape, weights.shape))

    graph.remove_edge(input.node, mul.id)
    graph.remove_edge(weights.node, mul.id)
    graph.remove_edge(bias.node, add.id)
    graph.remove_edge(add.node, output.id)

    op_node = graph.unique_id(mul.name + '/Fused{}_'.format(op_name))

    graph.add_node(op_node, **add_attrs_props(dict(kind='op', type=op_name, name=op_node, op=op_name,
                                                   data_type=input.data_type)))
    scsh = Node(graph, op_node)
    scsh.add_input_port(0)
    scsh.add_input_port(1)
    scsh.add_input_port(2)
    scsh.add_output_port(0)

    update_ie_fields(graph.node[op_node])

    graph.add_edges_from([
        (input.node, op_node, {'in': 0}),
        (weights.node, op_node, {'in': 1, 'bin': 'weights'}),
        (bias.node, op_node, {'in': 2, 'bin': 'biases'}),
        (op_node, output.node, {'out': 0})
    ])

    return