예제 #1
0
    def replace_pattern(self, graph: Graph, match: dict):
        mul_node = match['mul_op']
        const_node = match['const_op']
        max_node = match['max_op']
        max_name = max_node.soft_get('name', max_node.id)

        const_value = const_node.out_port(0).data.get_value()
        if const_value is None or const_value.size != 1:
            log.debug(
                'Mul layer "{}" can not participate in conversion to the LeakyReLU because constant "{}" '
                'contains more than one element: {}'.format(
                    mul_node.id, const_node.id, const_value.size))
            return

        # Create new LeakyReLU operation
        leaky_relu_node = LeakyReLU(
            graph, dict(negative_slope=const_value.item(0))).create_node()

        data_in_port = int(
            mul_node.in_port(0).get_source().node.type == 'Const')
        mul_node.in_port(data_in_port).get_source().connect(
            leaky_relu_node.in_port(0))
        max_node.out_port(0).get_connection().set_source(
            leaky_relu_node.out_port(0))

        rename_nodes([(max_node, max_name + '/TBR'),
                      (leaky_relu_node, max_name)])

        log.debug(
            'Successful conversion from {} {} to ReLU with negative slope (leaky ReLU)'
            ''.format(max_node.id, mul_node.id))
예제 #2
0
    def find_and_replace_pattern(self, graph: Graph):
        for node in graph.get_op_nodes(op='MVNCaffe'):
            node_name = node.soft_get('name', node.id)

            start_axis = 2
            if node['across_channels'] == 1:
                start_axis = 1

            rank = Rank(graph, {'name': node_name + '/Rank'}).create_node()

            # create range of axes based on `start_axis` and rank of input
            rng = create_op_with_const_inputs(graph, Range, {
                0: int64_array(start_axis),
                2: int64_array(1)
            }, {
                'name': node_name + '/Range',
                'output_type': np.int64
            })
            rng.in_port(1).connect(rank.out_port(0))

            new_mvn = MVN(
                graph, {
                    'eps': node.soft_get('eps', 1e-9),
                    'eps_mode': 'inside_sqrt',
                    'normalize_variance': node.soft_get(
                        'normalize_variance', 1)
                }).create_node([node.in_port(0).get_source().node, rng])
            new_mvn.in_port(0).get_connection().add_destination(
                rank.in_port(0))
            node.out_port(0).get_connection().set_source(new_mvn.out_port(0))
            rename_nodes([(node, node_name + '/tbd'), (new_mvn, node_name)])

            graph.remove_node(node.id)
    def find_and_replace_pattern(self, graph: Graph):
        for roll_node in graph.get_op_nodes(op='Roll'):
            if not roll_node.in_port(2).disconnected():
                return
            node_name = roll_node.soft_get('name', roll_node.id)

            # reshape to 1d tensor
            reshape_to_1d = create_op_node_with_second_input(
                graph, Reshape, int64_array([-1]),
                {'name': node_name + '/reshape'})
            roll_node.in_port(0).get_connection().insert_node(reshape_to_1d)

            # add zero const as axes input to roll
            const_zero = Const(graph, {
                'value': int64_array([0]),
                'name': node_name + '/axes'
            }).create_node()
            const_zero.out_port(0).connect(roll_node.in_port(2))

            # reshape to original shape
            shape_of = Shape(graph, {
                'name': node_name + '/shape_of'
            }).create_node()
            reshape_to_1d.in_port(0).get_connection().add_destination(
                shape_of.in_port(0))
            reshape_to_orig_shape = Reshape(graph, {}).create_node()
            rename_nodes([(roll_node, node_name + '/roll'),
                          (reshape_to_orig_shape, node_name)])
            shape_of.out_port(0).connect(reshape_to_orig_shape.in_port(1))
            roll_node.out_port(0).get_connection().insert_node(
                reshape_to_orig_shape)
예제 #4
0
    def replace_pattern(graph: Graph, match: dict):
        node = match['op']
        if node.has_port('in', 2) and not node.in_port(
                2).disconnected() and not node.has_and_set('shape_input'):
            bias_name = node.name
            new_node_name = node.name + '/WithoutBiases'
            add = Add(graph, dict(name=bias_name)).create_node()
            rename_nodes([(node, new_node_name), (add, bias_name)])
            node.out_port(0).get_connection().set_source(add.out_port(0))
            node.out_port(0).connect(add.in_port(0))
            node.in_port(2).get_connection().set_destination(add.in_port(1))

            bias = add.in_port(1).get_source().node
            if bias.has_valid("type") and bias.type == "Const":
                input_shape = add.in_port(0).data.get_shape()
                if len(input_shape) > 2:
                    dims_to_add = len(input_shape) - 2 if graph.graph[
                        'layout'] == 'NCHW' else 0
                    if dims_to_add > 0:
                        reshape = create_op_node_with_second_input(
                            graph, Reshape,
                            int64_array([input_shape[1]] + [1] * dims_to_add),
                            {'name': node.id + '/Dims'})
                        add.in_port(1).get_connection().set_destination(
                            reshape.in_port(0))
                        reshape.out_port(0).connect(add.in_port(1))
예제 #5
0
    def replace_layer_norm(self, graph: Graph, match: dict):
        inp = match['pool0']
        node_before = inp.in_port(0).get_source().node
        node_before_name = node_before.soft_get('name', node_before.id)

        # take/check the values of the add, pow and axes for ReduceMean
        pow_param = match['pow_param']
        add_param = match['add_param']
        if add_param.value.size == 1 and pow_param.value.size == 1 and add_param.value.item() <= 1e-05 \
                and pow_param.value.item() == 0.5 and match['pool0_param'].value == match['pool1_param'].value:
            log.debug('Found LayerNorm pattern after {} with name {}'.format(
                node_before.op, node_before_name))
            mvn = create_op_with_const_inputs(
                graph, MVN, {1: match['pool1_param'].value}, {
                    'eps': add_param.value.item(),
                    'normalize_variance': 1,
                    'eps_mode': 'inside_sqrt'
                })
            div_name = match['div'].soft_get('name', match['div'].id)
            rename_nodes([(match['div'], div_name + '/to_be_removed'),
                          (mvn, div_name)])

            inp.in_port(0).get_connection().set_destination(mvn.in_port(0))
            match['div'].out_port(0).get_connection().set_source(
                mvn.out_port(0))
예제 #6
0
    def replace_sub_graph(self, graph: Graph, match: dict):
        div_sqrt = match['op']
        div_sqrt_name = div_sqrt.soft_get('name', div_sqrt.id)
        shape_node = Shape(graph,
                           dict(name=div_sqrt_name + '/Shape')).create_node()
        data_out_port = div_sqrt.in_port(0).get_source()
        shape_node.in_port(0).connect(data_out_port)

        shape_values_node = node_to_get_shape_value_of_indices(
            shape_node=shape_node, indices=[-1])

        pow_node = AttributedPower(
            graph, dict(name=div_sqrt_name + '/Sqrt',
                        power=mo_array(0.5))).create_node()

        # Due to specification, Power must have inputs with the same data type.
        convert_pow_input = Cast(
            graph,
            dict(dst_type=np.float32,
                 name=shape_values_node.name +
                 '/ConvertToFP32')).create_node()
        div_node = Div(graph, dict(name="Div")).create_node()

        shape_values_node.out_port(0).connect(convert_pow_input.in_port(0))
        convert_pow_input.out_port(0).connect(pow_node.in_port(0))
        div_sqrt.in_port(0).get_connection().set_destination(
            div_node.in_port(0))
        div_node.in_port(1).connect(pow_node.out_port(0))
        div_sqrt.out_port(0).get_connection().set_source(div_node.out_port(0))

        rename_nodes([(div_sqrt, div_sqrt_name + '/ShouldBeDeleted'),
                      (div_node, div_sqrt_name)])
예제 #7
0
    def replace_gelu(self, graph: Graph, match: dict):
        # Gaussian Error Linear Unit
        # f(x) = 0.5 * x * (1 + erf(x / sqrt(2))
        out_node = match['mul0']
        node_name = out_node.soft_get('name', out_node.id)
        div = match['div']
        inp_node = div.in_port(0).get_source().node
        inp_name = inp_node.soft_get('name', out_node.id)
        log.debug('Found potential Erf-based GeLU pattern after {} with name {}'.format(inp_node.op, inp_name))

        # take the values of the mul, add and div
        div_param = match['div_param']
        add_param = match['add_param']
        mul_param = match['mul_param']

        if add_param.value.size == 1 and mul_param.value.size == 1 and div_param.value.size == 1:
            mul_param = match['mul_param'].value.item()
            add_param = match['add_param'].value.item()
            div_param = match['div_param'].value.item()

            sqrt2 = sqrt(2.0)
            # check that the values match the approximation
            if fabs(div_param - sqrt2) < 1e-06 and mul_param == 0.5 and add_param == 1.0:
                log.debug('Confirmed Erf-based GELU pattern after {} with name {}'.format(inp_node.op, inp_name))
                gelu = GeLUOP(graph, dict(name=inp_name + '/GELU_', approximation_mode='erf')).create_node()
                div.in_port(0).get_connection().set_destination(gelu.in_port(0))
                out_node.out_port(0).get_connection().set_source(gelu.out_port(0))
                rename_nodes([(out_node, node_name + '/TBD'), (gelu, node_name)])
    def replace_op(self, graph: Graph, node: Node):
        node_name = node.soft_get('name', node.id)

        # broadcast default value to required shape
        broadcast_node = Broadcast(graph, {
            'name': node_name + '/Broadcast_'
        }).create_node()
        node.in_port(1).get_connection().set_destination(
            broadcast_node.in_port(1))
        if not node.in_port(3).disconnected():
            node.in_port(3).get_connection().set_destination(
                broadcast_node.in_port(0))
        else:
            broadcast_node.in_port(0).connect(
                Const(
                    graph, {
                        'name': broadcast_node.name + '/FillValue_',
                        'value': np.float32(0)
                    }).create_node().out_port(0))

        # update broadcasted tensor with required values at required locations
        scatternd_node = ScatterNDUpdate(
            graph, {
                'name': node_name + '/ScatterNDUpdate_'
            }).create_node()
        scatternd_node.in_port(0).connect(broadcast_node.out_port(0))
        node.in_port(0).get_connection().set_destination(
            scatternd_node.in_port(1))
        node.in_port(2).get_connection().set_destination(
            scatternd_node.in_port(2))

        rename_nodes([(node, node_name + "/AbandonedName"),
                      (scatternd_node, node_name)])

        return [scatternd_node.id]
    def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]):
        cmp = match['complex']
        complex_abs = match['abs']
        complex_abs_name = complex_abs.soft_get('name', complex_abs.id)

        power_type = data_type_str_to_np(graph.graph['cmd_params'].data_type)

        pow0 = create_op_with_const_inputs(
            graph, Pow, {1: power_type(2.0)},
            {'name': complex_abs_name + '/real_part_squared'})
        pow1 = create_op_with_const_inputs(
            graph, Pow, {1: power_type(2.0)},
            {'name': complex_abs_name + '/imag_part_squared'})

        cmp.in_port(0).get_connection().set_destination(pow0.in_port(0))
        cmp.in_port(1).get_connection().set_destination(pow1.in_port(0))

        add = Add(graph, {
            'name': complex_abs_name + '/squared_abs'
        }).create_node([pow0, pow1])
        sqrt = create_op_with_const_inputs(graph, Pow, {1: power_type(0.5)},
                                           {})
        add.out_port(0).connect(sqrt.in_port(0))

        complex_abs.out_port(0).get_connection().set_source(sqrt.out_port(0))

        rename_nodes([(complex_abs, complex_abs_name + '/to_be_removed'),
                      (sqrt, complex_abs_name)])
    def replace_op(self, graph: Graph, node: Node):
        node_name = node.soft_get('name', node.id)
        assert node.has_valid(
            'axis'
        ), 'The node "{}" does not have mandatory attribute "axis"'.format(
            node_name)

        flatten_node = FlattenONNX(graph, {
            'name': node_name + '/FlattenONNX_',
            'axis': node.axis
        }).create_node()
        shape_node = Shape(graph, {
            'name': node_name + '/ShapeOf_'
        }).create_node()
        logsoftmax_node = LogSoftmax(graph, {
            'name': node_name + '/LogSoftmax_',
            'axis': 1
        }).create_node()
        reshape_node = Reshape(graph, {}).create_node()

        rename_nodes([(node, node_name + '/delete'),
                      (reshape_node, node_name)])

        shape_node.out_port(0).connect(reshape_node.in_port(1))
        logsoftmax_node.out_port(0).connect(reshape_node.in_port(0))
        flatten_node.out_port(0).connect(logsoftmax_node.in_port(0))

        source = node.in_port(0).get_source()

        flatten_node.in_port(0).connect(source)
        shape_node.in_port(0).connect(source)

        return [reshape_node.id]
예제 #11
0
    def find_and_replace_pattern(self, graph: Graph):
        for fake_output in graph.get_op_nodes(op='FakeOutput'):
            name = fake_output.soft_get('name', fake_output.id)

            producer = fake_output.in_port(0).get_source().node
            producer_outputs = 0
            for port in producer.out_ports().values():
                if not port.disconnected():
                    producer_outputs += 1
            if producer_outputs != 1:
                # At this stage we don't know the type of output, so we rely on MO transformation which updates the
                # Const type for elementwise operations in case of input data types mismatch
                add = create_op_with_const_inputs(graph, Add, {1: int64_array(0)}, {'can_be_fused': False})
                rename_nodes([(fake_output, name + '/TBD'), (add, name)])

                prev_op_in_port = fake_output.in_port(0).get_connection().get_source()
                # Get tensor names incoming to FakeOutput
                tensor_names = prev_op_in_port.get_tensor_names()

                # Remove tensor info from data node
                prev_op_in_port.remove_tensor_names()

                fake_output.in_port(0).get_connection().set_destination(add.in_port(0))
                fake_output.out_port(0).get_connection().set_source(add.out_port(0))

                # Move tensor names to Add op, which replaces FakeOutput
                if len(tensor_names) > 0:
                    add.out_port(0).add_tensor_names(tensor_names)

            else:
                result_in_port = fake_output.out_port(0).get_destination()
                result_in_port.disconnect()
                fake_output.in_port(0).get_connection().set_destination(result_in_port)
                rename_nodes([(fake_output, name + '/TBD'), (producer, name)])
예제 #12
0
    def transform_map_fn_input_slicing(external_match: dict, internal_match: dict):
        """
        Transforms TensorFlow 2 input slicing into use of axis attribute for input port of Loop node
        :param external_match: a match used for handling a part of the main graph responsible for input slicing
        :param internal_match: a match used for handling a part of the body graph responsible for input slicing
        """
        loop_node = external_match['while']
        unstack_node = external_match['unstack']
        body_graph = loop_node['body']

        tensor_list_get_item_node = internal_match['slicing']
        unstack_placeholder = internal_match['tensor_list']
        tensor_list_get_item_node_name = tensor_list_get_item_node.soft_get('name', tensor_list_get_item_node.id)

        # 1. process the body graph to avoid unsupported operations: TensorListGetItem and TensorListSetItem
        # replace TensorListGetItem with Squeeze node and iterate through slices using axis for input port
        squeeze_list_element = create_op_with_const_inputs(body_graph, Squeeze, {1: int64_array(0)},
                                                           {'name': 'TensorListGetItemSqueeze'})
        tensor_list_get_item_node.in_port(0).get_connection().set_destination(squeeze_list_element.in_port(0))
        tensor_list_get_item_node.out_port(0).get_connection().set_source(squeeze_list_element.out_port(0))
        rename_nodes([(tensor_list_get_item_node, tensor_list_get_item_node_name + '/AbandonedName'),
                      (squeeze_list_element, tensor_list_get_item_node_name)])
        unstack_placeholder_layer_id = unstack_placeholder.internal_layer_id
        Loop.update_port_map_value_ext(loop_node.input_port_map, 'internal_layer_id', unstack_placeholder_layer_id,
                                       'axis', 0)

        # 2. process locality of Loop node in the main graph to avoid unsupported operations:
        # TensorListFromTensor, TensorListReserve, and TensorListStack
        # remove TensorListFromTensor and pass a tensor to Loop as is
        unstack_node.out_port(0).get_connection().set_source(unstack_node.in_port(0).get_connection().get_source())
예제 #13
0
    def find_and_replace_pattern(self, graph: Graph):
        for attr_pad in graph.get_op_nodes(op='AttributedPad'):
            # save the original node name to use it in the new Pad op instance
            original_name = attr_pad.soft_get('name', attr_pad.id)

            new_pad = Pad(graph, {
                'mode': attr_pad.soft_get('mode', None),
            }).create_node()
            rename_nodes([(attr_pad, original_name + '/to_be_removed'),
                          (new_pad, original_name)])

            attr_pad.in_port(0).get_connection().set_destination(
                new_pad.in_port(0))
            new_pad.in_port(1).connect(
                Const(graph, {
                    'value': attr_pad.pads[:, 0]
                }).create_node().out_port(0))
            new_pad.in_port(2).connect(
                Const(graph, {
                    'value': attr_pad.pads[:, 1]
                }).create_node().out_port(0))
            if attr_pad.soft_get('mode') == 'constant':
                # create Constant node of proper data type (equal to the data type of the Pad first input)
                convert_pad_value = create_op_with_const_inputs(
                    graph, ConvertLike, {0: attr_pad.fill_value},
                    {'name': original_name + '/pad_value_convert'})
                convert_pad_value.in_port(1).connect(
                    new_pad.in_port(0).get_source())
                new_pad.in_port(3).connect(convert_pad_value.out_port(0))

            attr_pad.out_port(0).get_connection().set_source(
                new_pad.out_port(0))
            graph.remove_node(attr_pad.id)
예제 #14
0
    def find_and_replace_pattern(self, graph: Graph):
        for node in graph.get_op_nodes(op='Interpolate', version='opset1'):
            transformation_mode = 'align_corners' if int(
                node.soft_get('align_corners', 0)) else 'half_pixel'
            interpolate1_name = node.soft_get('name', node.id)
            interpolate4 = create_op_with_const_inputs(
                graph, Interpolate, {
                    2: mo_array([1.0, 1.0]),
                    3: int64_array(node.axes)
                }, {
                    'mode': node.mode,
                    'antialias': node.antialias,
                    'coordinate_transformation_mode': transformation_mode,
                    'pads_begin': correct_pad(node.soft_get('pads_begin', 0)),
                    'pads_end': correct_pad(node.soft_get('pads_end', 0)),
                    'nearest_mode': 'round_prefer_floor',
                    'cube_coeff': -0.75,
                    'shape_calculation_mode': 'sizes',
                    'version': 'opset4',
                    'in_ports_count': 4,
                })

            interpolate1_input_connection = node.in_port(0).get_connection()
            interpolate1_input_connection.set_destination(
                interpolate4.in_port(0))

            sizes_connection = node.in_port(1).get_connection()
            sizes_connection.set_destination(interpolate4.in_port(1))

            node.out_port(0).get_connection().set_source(
                interpolate4.out_port(0))
            rename_nodes([(node, interpolate1_name + '/delete'),
                          (interpolate4, interpolate1_name)])
예제 #15
0
    def find_and_replace_pattern(self, graph: Graph):
        for dequantize_node in graph.get_op_nodes(op='DequantizeLinear'):
            node_name = dequantize_node.soft_get('name', dequantize_node.id)
            axis = dequantize_node.soft_get('axis', None)
            scale_y_shape = dequantize_node.in_port(1).data.get_shape()
            model_data_type = data_type_str_to_np(
                graph.graph['cmd_params'].data_type)
            cast = Cast(graph, {
                'dst_type': model_data_type,
                'name': node_name + '/Cast'
            }).create_node()
            dequantize_node.in_port(0).get_connection().set_destination(
                cast.in_port(0))
            mul = Mul(graph, {'can_be_fused': False}).create_node()

            is_second_port_connected = dequantize_node.is_in_port_connected(2)
            if is_second_port_connected:
                # its is necessary not to replace subrtract for pattern in offline transformations
                # See ConvertQuantizeDequantize transformation in ngraph
                sub = Sub(graph, {
                    'name': node_name + '/Sub',
                    'zero_point_sub': True
                }).create_node()
                cast.out_port(0).connect(sub.in_port(0))
                dequantize_node.in_port(2).get_connection().set_destination(
                    sub.in_port(1))
                sub.out_port(0).connect(mul.in_port(0))
            else:
                cast.out_port(0).connect(mul.in_port(0))

            dequantize_node.in_port(1).get_connection().set_destination(
                mul.in_port(1))
            dequantize_node.out_port(0).get_connection().set_source(
                mul.out_port(0))
            rename_nodes([(dequantize_node, node_name + '/TBD'),
                          (mul, node_name)])

            assert scale_y_shape is not None
            if axis is not None and len(
                    scale_y_shape) > 0 and scale_y_shape[0] > 1:
                input_shape = cast.in_port(0).data.get_shape()
                target_shape = np.ones(len(input_shape), np.int64)
                target_shape[axis] = input_shape[axis]

                mul_reshape = create_op_with_const_inputs(
                    graph, Reshape, {1: int64_array(target_shape)},
                    {'name': node_name + '/Reshape/Mul'})
                mul.in_port(1).get_connection().set_destination(
                    mul_reshape.in_port(0))
                mul_reshape.out_port(0).connect(mul.in_port(1))

                if is_second_port_connected:
                    sub_reshape = create_op_with_const_inputs(
                        graph, Reshape, {1: int64_array(target_shape)},
                        {'name': node_name + '/Reshape/Sub'})
                    sub.in_port(1).get_connection().set_destination(
                        sub_reshape.in_port(0))
                    sub_reshape.out_port(0).connect(sub.in_port(1))
예제 #16
0
    def convert_fft_to_dft(self, graph: Graph, mx_fft: Node):
        mx_fft_name = mx_fft.soft_get('name', mx_fft.id)
        unsqueeze_node = create_op_with_const_inputs(
            graph, Unsqueeze, {1: int64_array([-1])},
            {'name': mx_fft_name + '/Unsqueeze'})
        rank_node = Rank(graph, {'name': mx_fft_name + '/Rank'}).create_node()

        mx_fft_connection = mx_fft.in_port(0).get_connection()
        mx_fft_connection.set_destination(unsqueeze_node.in_port(0))
        mx_fft_connection.get_source().connect(rank_node.in_port(0))

        add_node = create_op_with_const_inputs(graph, Add, {1: int64_array(1)},
                                               {'name': mx_fft_name + '/Add'},
                                               rank_node)
        broadcast_node1 = create_op_with_const_inputs(
            graph, Broadcast, {0: int64_array(0)},
            {'name': mx_fft_name + '/Pad_broadcast'})
        add_node.out_port(0).connect(broadcast_node1.in_port(1))

        scatter_node = create_op_with_const_inputs(
            graph, ScatterUpdate, {
                2: int64_array(1),
                3: int64_array(0)
            }, {'name': mx_fft_name + '/ScatterUpdate'})
        broadcast_node1.out_port(0).connect(scatter_node.in_port(0))
        rank_node.out_port(0).connect(scatter_node.in_port(1))

        pad_node = Pad(graph, {
            'name': mx_fft_name + '/Pad',
            'mode': 'constant'
        }).create_node([unsqueeze_node, broadcast_node1, scatter_node])

        dft_node = create_op_with_const_inputs(
            graph, DFT, {1: int64_array([-1])}, {
                'name': mx_fft_name + '/DFT',
                'in_ports_count': 2
            }, pad_node)

        sub_node = create_op_with_const_inputs(graph, Sub, {1: int64_array(1)},
                                               {'name': mx_fft_name + '/Sub'})
        rank_node.out_port(0).connect(sub_node.in_port(0))
        broadcast_node2 = create_op_with_const_inputs(
            graph, Broadcast, {0: int64_array(0)},
            {'name': mx_fft_name + '/Reshape_broadcast'})
        sub_node.out_port(0).connect(broadcast_node2.in_port(1))
        concat_node = create_op_with_const_inputs(
            graph, Concat, {1: int64_array([-1, 2])}, {
                'name': mx_fft_name + '/New_shape',
                'in_ports_count': 2,
                'axis': 0
            }, broadcast_node2)

        reshape_node = Reshape(graph, {}).create_node([dft_node, concat_node])

        mx_fft.out_port(0).get_connection().set_source(
            reshape_node.out_port(0))
        rename_nodes([(mx_fft, mx_fft_name + '/to_be_removed'),
                      (reshape_node, mx_fft_name)])
예제 #17
0
    def transform_map_fn_output_concatenation(external_match: dict,
                                              internal_match: dict):
        """
        Transforms TensorFlow 2 output concatenation into use of axis attribute for output port of Loop node
        :param external_match: a match used for handling a part of the main graph responsible for output concatenation
        :param internal_match: a match used for handling a part of the body graph responsible for output concatenation
        """
        loop_node = external_match['while']
        stack_node = external_match['stack']
        list_reserve_node = external_match['reserve']
        body_graph = loop_node['body']

        tensor_list_set_item_node = internal_match['concatenation']
        tensor_list_set_item_node_name = tensor_list_set_item_node.soft_get(
            'name', tensor_list_set_item_node.id)
        list_result_node = internal_match['concatenation_result']

        # replace TensorListSetItem with Unsqueeze and use axis attribute for corresponding Result node
        # to concatenate results from different iterations
        unsqueeze_list_element = create_op_with_const_inputs(
            body_graph, Unsqueeze, {1: int64_array(0)},
            {'name': 'TensorListSetItemUnsqueeze'})
        tensor_list_set_item_node.in_port(2).get_connection().set_destination(
            unsqueeze_list_element.in_port(0))
        tensor_list_set_item_node.out_port(0).get_connection().set_source(
            unsqueeze_list_element.out_port(0))
        rename_nodes([(tensor_list_set_item_node,
                       tensor_list_set_item_node_name + '/AbandonedName'),
                      (unsqueeze_list_element, tensor_list_set_item_node_name)
                      ])
        list_result_node_layer_id = list_result_node.internal_layer_id
        Loop.update_port_map_value_ext(loop_node.output_port_map,
                                       'internal_layer_id',
                                       list_result_node_layer_id, 'axis', 0)

        # remove TensorListStack to by-pass the node since the result from the Loop node is already concatenated
        stack_node.out_port(0).get_connection().set_source(
            stack_node.in_port(0).get_connection().get_source())

        # disconnect ListReserve node because it is no longer needed for Loop
        list_reserve_node.out_port(0).disconnect()

        # connect a number of iterations with trip count that can be received from the second input of ListReserve
        # create a constant network with True value for execution_condition so that IE can ignore execution condition
        # and perform trip_counts iterations. This approach with known trip count value allows to avoid dynamism.
        loop_node.in_port(1).disconnect()
        list_reserve_node.in_port(1).get_source().connect(loop_node.in_port(1))
        for record in loop_node.output_port_map:
            if 'purpose' in record and record[
                    'purpose'] == 'execution_condition':
                exec_cond_layer_id = record['internal_layer_id']
                exec_cond_node = Loop.get_body_node_by_internal_id(
                    loop_node, exec_cond_layer_id)
                const_true = Const(body_graph, {
                    'value': np.array(True, dtype=np.bool)
                }).create_node()
                exec_cond_node.in_port(0).get_connection().set_source(
                    const_true.out_port(0))
예제 #18
0
    def replace_sub_graph(self, graph: Graph, match: dict):
        node = match['op']
        slice_name = node.soft_get('name', node.id)

        slice_node = create_op_with_const_inputs(graph, Slice, {1: node.starts, 2: node.ends, 3: node.axes})
        rename_nodes([(node, slice_name + '/to_be_removed'), (slice_node, slice_name)])

        node.in_port(0).get_connection().set_destination(slice_node.in_port(0))
        node.out_port(0).get_connection().set_source(slice_node.out_port(0))
예제 #19
0
    def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]):
        node = match['op']
        name = node.soft_get('name', node.id)

        # biases normalization
        if 2 in node.in_ports() and not node.in_port(2).disconnected():
            bias_node = Add(graph, {'name': name + '/Bias_'}).create_node()
            node_name = node.name + '/WithoutBiases'
            bias_node_name = node.name
            rename_nodes([(node, node_name), (bias_node, bias_node_name)])
            node.out_port(0).get_connection().set_source(bias_node.out_port(0))
            node.in_port(2).get_connection().set_destination(
                bias_node.in_port(1))
            node.out_port(0).connect(bias_node.in_port(0))

        # weights normalization
        assert node.has_valid('out-size')
        out_size = node['out-size']
        reshape_dim = int64_array([-1, out_size])
        if node.has_and_set('transpose_weights'):
            reshape_dim = int64_array([out_size, -1])
        node.insert_op_on_input_port(
            in_port_idx=1,
            new_op_class=Reshape,
            new_op_attrs={'name': name + '/weights_reshape'},
            value=reshape_dim)
        if node.has_and_set('transpose_weights'):
            node.insert_op_on_input_port(
                in_port_idx=1,
                new_op_class=Transpose,
                new_op_attrs={'name': name + '/weights_transpose'},
                value=int64_array([1, 0]))

        # input normalization for 4D Caffe and MXNet FullyConnected
        if graph.graph['fw'] == 'caffe':
            node.insert_op_on_input_port(in_port_idx=0,
                                         new_op_class=Reshape,
                                         new_op_attrs={
                                             'name':
                                             name + '/flatten_fc_input',
                                             'special_zero': True
                                         },
                                         value=int64_array([0, -1]))

        if graph.graph['fw'] == 'mxnet':
            if node.flatten is not False:
                node.insert_op_on_input_port(in_port_idx=0,
                                             new_op_class=Reshape,
                                             new_op_attrs={
                                                 'name':
                                                 name + '/flatten_fc_input',
                                                 'special_zero': True
                                             },
                                             value=int64_array([0, -1]))

        MatMul.update_node_stat(node, {})
예제 #20
0
def replace_strided_slice(node: Node, mask: np.ndarray, op: callable):
    node_name = node.soft_get('name', node.id)
    axes = np.where(mask == 1)[0]
    new_node = create_op_node_with_second_input(node.graph, op,
                                                int64_array(axes))
    node.in_port(0).get_connection().set_destination(new_node.in_port(0))
    node.out_port(0).get_connection().set_source(new_node.out_port(0))

    rename_nodes([(node, node_name + '/ShouldBeDeleted'),
                  (new_node, node_name)])
    node.graph.remove_node(node.id)
예제 #21
0
 def find_and_replace_pattern(self, graph: Graph):
     for shapeof_node in graph.get_op_nodes(op='ShapeOf'):
         in_node = shapeof_node.in_port(0).get_source().node
         if in_node.op == 'Const':
             shapeof_node.in_port(0).disconnect()
             shape_name = shapeof_node.soft_get('name', shapeof_node.id)
             shape_value = shapeof_node.out_port(0).data.get_value()
             shape_const_node = Const(graph, {'name': shape_name + '/ExecutionConstValue',
                                              'value': shape_value}).create_node()
             shapeof_node.out_port(0).get_connection().set_source(shape_const_node.out_port(0))
             rename_nodes([(shapeof_node, shape_name + '/TBD'), (shape_const_node, shape_name)])
예제 #22
0
파일: Pack.py 프로젝트: yury-intel/openvino
    def replace_op(self, graph: Graph, node: Node):
        out_node = Concat(graph, {'axis': node.axis, 'in_ports_count': len(node.in_ports())}).create_node()
        pack_name = node.soft_get('name', node.id)

        for ind in node.in_ports():
            unsqueeze_node = create_op_with_const_inputs(graph, Unsqueeze, {1: int64_array([node.axis])},
                                                         {'name': node.soft_get('name', node.id) + '/Unsqueeze'})
            node.in_port(ind).get_connection().set_destination(unsqueeze_node.in_port(0))
            unsqueeze_node.out_port(0).connect(out_node.in_port(ind))

        rename_nodes([(node, pack_name + '/TBR'), (out_node, pack_name)])
        return [out_node.id]
    def replace_pattern(self, graph: Graph, match: dict):
        if not self.is_applicable(match):
            return

        unsqueeze_node = match['unsqueeze']
        unsqueeze_name = unsqueeze_node.soft_get('name', unsqueeze_node.id)
        second_input_of_unsqueeze = unsqueeze_node.in_port(
            1).get_connection().get_source().node
        d_idx = int(second_input_of_unsqueeze.value)
        axis = d_idx - 1

        shape_node = Shape(graph,
                           dict(name=unsqueeze_name + '/Shape')).create_node()
        axis_len_node = node_to_get_shape_value_of_indices(shape_node, [axis])

        second_input_of_tile = match['tile'].in_port(
            1).get_connection().get_source().node
        scale = int64_array([second_input_of_tile.value[d_idx]])
        float_scale = float32_array([second_input_of_tile.value[d_idx]])
        mul_node = create_op_with_const_inputs(
            graph, Mul, {1: scale}, {'name': unsqueeze_name + '/Mul'})

        axis_len_node.out_port(0).connect(mul_node.in_port(0))

        interp_node = create_op_with_const_inputs(
            graph, Interpolate, {
                2: float_scale,
                3: int64_array([axis])
            }, {
                'mode': 'nearest',
                'antialias': 0,
                'pads_begin': int64_array([0]),
                'pads_end': int64_array([0]),
                'coordinate_transformation_mode': 'half_pixel',
                'nearest_mode': 'round_prefer_floor',
                'cube_coeff': -0.75,
                'version': 'opset4',
                'shape_calculation_mode': 'scales',
                'in_ports_count': 4,
                'maybe_part_of_sequence': True
            })
        mul_node.out_port(0).connect(interp_node.in_port(1))

        reshape_node = match['reshape']
        reshape_node.out_port(0).get_connection().set_source(
            interp_node.out_port(0))
        reshape_name = reshape_node.soft_get('name', reshape_node.id)
        rename_nodes([(reshape_node, reshape_name + '/delete'),
                      (interp_node, reshape_name)])

        unsqueeze_connection = unsqueeze_node.in_port(0).get_connection()
        unsqueeze_connection.set_destination(interp_node.in_port(0))
        unsqueeze_connection.get_source().connect(shape_node.in_port(0))
예제 #24
0
    def find_and_replace_pattern(self, graph: Graph):
        for node in graph.get_op_nodes(op='LayerNorm'):
            node_name = node.soft_get('name', node.id)

            if node.output_mean_var is True:
                if not node.out_port(1).disconnected() or not node.out_port(2).disconnected():
                    raise Error("Node {} is supported with only one output".format(node_name))
                log.error('LayerNorm node {} with attribute "output_mean_var" = True is not supported.'
                          'But since the node has one output, the conversion will continue.'.format(node_name),
                          extra={'is_warning': True})

            input_shape = node.in_port(0).data.get_shape()
            assert node.has_valid('axis'), 'Incorrect axis value for the node {}'.format(node_name)
            axis = node.axis

            mvn = create_op_node_with_second_input(graph, MVN, int64_array([axis]),
                                                   dict(eps=node.epsilon, name=node_name + '/LayerNorm/MVN_',
                                                        across_channels=1, normalize_variance=1, eps_mode='inside_sqrt'))

            mul = Mul(graph, {'name': node_name + '/LayerNorm/mul_'}).create_node()
            add = Add(graph, {'name': mul.name + '/LayerNorm/add_'}).create_node()

            node.in_port(0).get_connection().set_destination(mvn.in_port(0))
            node.in_port(1).get_connection().set_destination(mul.in_port(1))
            node.in_port(2).get_connection().set_destination(add.in_port(1))

            mvn.out_port(0).connect(mul.in_port(0))
            mul.out_port(0).connect(add.in_port(0))
            node.out_port(0).get_connection().set_source(add.out_port(0))

            # MXNet LayerNorm gamma and beta attributes are 1D tensors with shape = [input_shape[axis]]
            # We have to unsqueeze values for Mul and Add operations to avoid shapes incompatibility problems
            # if axis != -1
            canonical_axis = get_canonical_axis_index(input_shape, axis)
            unsqueeze_value = []
            for idx, val in enumerate(input_shape):
                if idx != canonical_axis:
                    unsqueeze_value.append(idx)

            mul_const_unsqueeze = create_op_node_with_second_input(graph, Unsqueeze,
                                                                   int64_array(unsqueeze_value),
                                                                   dict(name=mul.name + '/Unsqueeze',
                                                                        override_output_shape=True))
            add_const_unsqueeze = create_op_node_with_second_input(graph, Unsqueeze,
                                                                   int64_array(unsqueeze_value),
                                                                   dict(name=add.name + '/Unsqueeze',
                                                                        override_output_shape=True))

            mul.in_port(1).get_connection().insert_node(mul_const_unsqueeze)
            add.in_port(1).get_connection().insert_node(add_const_unsqueeze)

            rename_nodes([(node, node_name + '/ShouldBeDeleted'), (add, node_name)])
예제 #25
0
    def replace(node: Node, const: Node):
        graph = node.graph
        shape = const.shape
        const_name = const.soft_get('name', const.id)

        non_one_dims = np.argwhere(shape != 1).flatten()
        one_dims = np.argwhere(shape == 1).flatten()

        if not (non_one_dims.size == 1 and 5 < np.prod(shape) < 500):
            # (5;500) range is deduced to affect less models
            return

        value = const.value
        if not np.array_equal(np.arange(0, np.prod(shape), 1).reshape(shape), value):
            return

        positive_idx = non_one_dims.item(0)
        negative_idx = positive_idx - len(shape)

        node_name = node.soft_get('name', node.id)
        gather = create_op_with_const_inputs(graph, Gather, {1: int64_array(negative_idx), 2: int64_array(0)},
                                             {'name': node_name + '/BroadcastingDim'})
        gather_for_const = create_op_with_const_inputs(graph, Gather, {1: int64_array(negative_idx), 2: int64_array(0)},
                                                       {'name': const_name + '/BroadcastingDim'})
        shapeof_node = Shape(graph, {'name': const_name + '/ShapeOf'}).create_node()
        shapeof_node.out_port(0).connect(gather_for_const.in_port(0))

        equal_node = create_op_with_const_inputs(graph, Equal, {1: int64_array(1)}, {'name': node_name + '/ConstOne'})
        gather.out_port(0).connect(equal_node.in_port(0))

        select_node = Select(graph, {'name': node_name + '/Select',
                                      'auto_broadcast': 'numpy'}).create_node([equal_node, gather_for_const, gather])

        const.out_port(0).connect(shapeof_node.in_port(0))

        range_node = create_op_with_const_inputs(graph, Range,
                                                 {0: mo_array(0, dtype=value.dtype),
                                                  2: mo_array(1, dtype=value.dtype)},
                                                 {'name': const_name + '/Range', 'dtype': value.dtype})
        select_node.out_port(0).connect(range_node.in_port(1))

        node.in_port(1).get_connection().add_destination(gather.in_port(0))

        node.in_port(0).get_connection().set_source(range_node.out_port(0))

        if one_dims.size:
            unsqueeze = create_op_node_with_second_input(graph, Unsqueeze, one_dims,
                                                         {'name': const_name + '/KeepShape'})
            range_node.out_port(0).get_connection().insert_node(unsqueeze)
            rename_nodes([(const, const_name + '/ToBeDeleted'), (unsqueeze, const_name)])
        else:
            rename_nodes([(const, const_name + '/ToBeDeleted'), (range_node, const_name)])
예제 #26
0
    def find_and_replace_pattern(self, graph: Graph):
        for attr_roll in graph.get_op_nodes(op='AttributedRoll'):
            original_name = attr_roll.soft_get('name', attr_roll.id)
            port_value_dict = {1: attr_roll.shift}
            if attr_roll.has_valid('axes'):
                port_value_dict.update({2: attr_roll.axes})

            new_roll = create_op_with_const_inputs(graph, op=Roll, port_value_dict=port_value_dict)
            rename_nodes([(attr_roll, original_name + '/to_be_removed'), (new_roll, original_name)])

            attr_roll.in_port(0).get_connection().set_destination(new_roll.in_port(0))
            attr_roll.out_port(0).get_connection().set_source(new_roll.out_port(0))
            graph.remove_node(attr_roll.id)
    def find_and_replace_pattern(self, graph: Graph):
        for cbv in graph.get_op_nodes(op='ClipByValueTF'):
            cbv_name = cbv.soft_get('name', cbv.id)
            minimum = Minimum(graph, {'name': cbv_name + '/CLipMinimum'}).create_node()
            maximum = Maximum(graph, {'name': cbv_name + '/CLipMaximum'}).create_node()
            minimum.in_port(0).connect(cbv.in_port(0).get_source())
            minimum.in_port(1).connect(cbv.in_port(2).get_source())
            maximum.in_port(0).connect(minimum.out_port(0))
            maximum.in_port(1).connect(cbv.in_port(1).get_source())
            cbv.out_port(0).get_connection().set_source(maximum.out_port(0))

            rename_nodes([(cbv, cbv_name + '/TBR'), (maximum, cbv_name)])
            graph.remove_node(cbv.id)
예제 #28
0
    def replace_tdnn(self, graph: Graph, tdnn_node: Node):
        tdnn_name = tdnn_node.soft_get('name', tdnn_node.id)

        concat_node = Concat(graph, {'axis': 1}).create_node()
        rename_nodes([(tdnn_node, tdnn_name + '/to_be_removed'),
                      (concat_node, tdnn_name)])

        for offset_ind, t in enumerate(tdnn_node['time_offsets']):
            concat_node.add_input_port(offset_ind)
            if t != 0:
                memory_name = tdnn_name + '/MemoryOffset/' + str(abs(t))
                memoryoffset_node = MemoryOffset(
                    graph, {
                        'name': memory_name,
                        't': t,
                        'pair_name': memory_name + '_out',
                        'has_default': False,
                        'splitted': False
                    }).create_node()

                tdnn_node.in_port(0).get_source().connect(
                    memoryoffset_node.in_port(0))
                memoryoffset_node.out_port(0).connect(
                    concat_node.in_port(offset_ind))
            else:
                # 0 time delay is not allowed in IE, it's meaningless
                # if time offset is 0 then connect input of tdnncomponent directly to Concat without memoryoffset
                tdnn_node.in_port(0).get_source().connect(
                    concat_node.in_port(offset_ind))

        weights = tdnn_node['weights']
        fc_inputs = {1: weights}

        bias_term = False
        if tdnn_node.has_valid('biases'):
            assert len(tdnn_node['biases']) == weights.shape[0]
            fc_inputs.update({2: tdnn_node['biases']})
            bias_term = True

        fc_node = create_op_with_const_inputs(
            graph, FullyConnected, fc_inputs, {
                'name': tdnn_name + '/FC',
                'out-size': weights.shape[0],
                'transpose_weights': True,
                'bias_term': bias_term
            })

        concat_node.out_port(0).connect(fc_node.in_port(0))
        tdnn_node.in_port(0).disconnect()
        tdnn_node.out_port(0).get_connection().set_source(fc_node.out_port(0))
예제 #29
0
def replace_with_hsigmoid(graph: Graph, first_node: Node, last_node: Node):
    # determine the input port of first and last nodes which gets the 'input' node output
    add_input_port_idx = int(
        first_node.in_port(0).get_connection().get_source().node.soft_get('op')
        == 'Const')
    last_node_name = last_node.soft_get('name', last_node.id)

    hsigmoid = HSigmoid(graph, {}).create_node()
    hsigmoid.in_port(0).connect(
        first_node.in_port(add_input_port_idx).get_source())
    last_node.out_port(0).get_connection().set_source(hsigmoid.out_port(0))

    rename_nodes([(last_node, last_node_name + '/TBR'),
                  (hsigmoid, last_node_name)])
예제 #30
0
    def transform_tensor_list_output_concatenation(external_match: dict,
                                                   internal_match: dict):
        """
        Transforms TensorFlow 2 output concatenation into use of axis attribute for output port of Loop node
        :param external_match: a match used for handling a part of the main graph responsible for output concatenation
        :param internal_match: a match used for handling a part of the body graph responsible for output concatenation
        """
        loop_node = external_match['while']
        empty_tensor_list_node = external_match['reserve']
        body_graph = loop_node['body']

        tensor_list_push_back_node = internal_match['concatenation']
        tensor_list_push_back_node_name = tensor_list_push_back_node.soft_get(
            'name', tensor_list_push_back_node.id)
        list_result_node = internal_match['concatenation_result']

        # replace TensorListPushBack with Unsqueeze and use axis attribute for corresponding Result node
        # to concatenate results from different iterations
        unsqueeze_list_element = create_op_with_const_inputs(
            body_graph, Unsqueeze, {1: int64_array(0)}, {
                'name':
                tensor_list_push_back_node_name +
                '/TensorListPushBackUnsqueeze'
            })
        tensor_list_push_back_node.in_port(1).get_connection().set_destination(
            unsqueeze_list_element.in_port(0))
        tensor_list_push_back_node.out_port(0).get_connection().set_source(
            unsqueeze_list_element.out_port(0))
        rename_nodes([(tensor_list_push_back_node,
                       tensor_list_push_back_node_name + '/AbandonedName'),
                      (unsqueeze_list_element, tensor_list_push_back_node_name)
                      ])
        list_result_node_layer_id = list_result_node.internal_layer_id
        Loop.update_port_map_value_ext(loop_node.output_port_map,
                                       'internal_layer_id',
                                       list_result_node_layer_id, 'axis', 0)

        # disconnect EmptyTensorList node because it is no longer needed for Loop
        empty_tensor_list_node.out_port(0).disconnect()

        loop_node.in_port(1).disconnect()
        empty_tensor_list_node.in_port(1).get_source().connect(
            loop_node.in_port(1))

        # remove back edge
        for record in loop_node.back_edges:
            if 'from_layer' in record and record[
                    'from_layer'] == list_result_node_layer_id:
                loop_node.back_edges.remove(record)